code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2012-2014 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.xfinity.sirius.api.impl.membership
import scalax.file.Path
import scalax.io.Line.Terminators.NewLine
object FileBasedClusterConfig {
def apply(config: String): FileBasedClusterConfig = {
val configFile = Path.fromString(config)
if (!configFile.exists) {
throw new IllegalStateException("ClusterConfig file not found at location %s, cannot boot.".format(config))
}
new FileBasedClusterConfig(configFile)
}
}
/**
* ClusterConfig based on a static file. File will be re-read each time members is accessed.
*
* @param config Path of config file
*/
private[membership] class FileBasedClusterConfig(config: Path) extends ClusterConfig {
/**
* List of akka paths for the members of the cluster.
*
* @return list of members
*/
def members = {
config.lines(NewLine, includeTerminator = false)
.toList
.filterNot(_.startsWith("#"))
}
}
|
weggert/sirius
|
src/main/scala/com/comcast/xfinity/sirius/api/impl/membership/FileBasedClusterConfig.scala
|
Scala
|
apache-2.0
| 1,559 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScConstrExpr
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScConstrExprImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScConstrExpr {
override def toString: String = "ConstructorExpression"
}
|
triggerNZ/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScConstrExprImpl.scala
|
Scala
|
apache-2.0
| 398 |
package com.autodesk.tct
import akka.actor.{ActorSystem, Props}
import com.autodesk.tct.challenger.cassandra.CassandraClient
import com.autodesk.tct.challenger.data.repositories.RepositoryFactory
import com.autodesk.tct.services.UserService
import com.autodesk.tct.share.{ApplyPolicy, ApplyRegistrationPolicy}
import play._
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
/**
* Global boot object
*/
class Boot extends GlobalSettings {
/**
* Defines the startup actions
* @param app the play app
*/
override def onStart(app: Application) {
super.onStart(app)
/**
* Get configuration
*/
val conf = Play.application.configuration
/**
* Initialize cassandra metadata
*/
CassandraClient.init(
conf.getStringList("application.db.cassandra.addresses").asScala.toList,
conf.getString("application.db.cassandra.namespace")
)
/**
* Load cassandra driver
*/
RepositoryFactory.driver(conf.getString("application.db.repository_factory"))
/**
* Schedule to apply registration policy to all events which go beyond the due data
*/
val system = ActorSystem("TctChallenger")
val applyRegistrationPolicy = system.actorOf(Props(new ApplyRegistrationPolicy), name = "actor")
system.scheduler.schedule(0.second, 6.hours, applyRegistrationPolicy, ApplyPolicy())
/**
* Create default admin account
*/
UserService.createAdmin
}
}
|
adsk-cp-tct/challenger-backend
|
app/com/autodesk/tct/Boot.scala
|
Scala
|
gpl-3.0
| 1,537 |
package cromwell.backend.impl.spark
import akka.actor.{ActorRef, Props}
import cromwell.backend._
import cromwell.backend.io.JobPathsWithDocker
import cromwell.backend.sfs.SharedFileSystemExpressionFunctions
import cromwell.core.CallContext
import wom.expression.IoFunctionSet
import wom.graph.TaskCallNode
case class SparkBackendFactory(name: String, configurationDescriptor: BackendConfigurationDescriptor) extends BackendLifecycleActorFactory {
override def workflowInitializationActorProps(workflowDescriptor: BackendWorkflowDescriptor, ioActor: ActorRef,
calls: Set[TaskCallNode], serviceRegistryActor: ActorRef, restarting: Boolean): Option[Props] = {
Option(SparkInitializationActor.props(workflowDescriptor, calls, configurationDescriptor, serviceRegistryActor))
}
override def jobExecutionActorProps(jobDescriptor: BackendJobDescriptor,
initializationData: Option[BackendInitializationData],
serviceRegistryActor: ActorRef,
ioActor: ActorRef,
backendSingletonActor: Option[ActorRef]): Props = {
SparkJobExecutionActor.props(jobDescriptor, configurationDescriptor)
}
override def expressionLanguageFunctions(workflowDescriptor: BackendWorkflowDescriptor, jobKey: BackendJobDescriptorKey,
initializationData: Option[BackendInitializationData]): IoFunctionSet = {
val jobPaths = JobPathsWithDocker(jobKey, workflowDescriptor, configurationDescriptor.backendConfig)
val callContext = CallContext(
jobPaths.callExecutionRoot,
jobPaths.stdout.toAbsolutePath.toString,
jobPaths.stderr.toAbsolutePath.toString
)
new SharedFileSystemExpressionFunctions(SparkJobExecutionActor.DefaultPathBuilders, callContext)
}
}
|
ohsu-comp-bio/cromwell
|
supportedBackends/spark/src/main/scala/cromwell/backend/impl/spark/SparkBackendFactory.scala
|
Scala
|
bsd-3-clause
| 1,916 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.feature
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.mllib.stat.MultivariateOnlineSummarizer
import org.apache.spark.rdd.RDD
/**
* Standardizes features by removing the mean and scaling to unit std using column summary
* statistics on the samples in the training set.
*
* The "unit std" is computed using the corrected sample standard deviation
* (https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation),
* which is computed as the square root of the unbiased sample variance.
*
* @param withMean False by default. Centers the data with mean before scaling. It will build a
* dense output, so take care when applying to sparse input.
* @param withStd True by default. Scales the data to unit standard deviation.
*/
@Since("1.1.0")
class StandardScaler @Since("1.1.0") (withMean: Boolean, withStd: Boolean) extends Logging {
@Since("1.1.0")
def this() = this(false, true)
if (!(withMean || withStd)) {
logWarning("Both withMean and withStd are false. The model does nothing.")
}
/**
* Computes the mean and variance and stores as a model to be used for later scaling.
*
* @param data The data used to compute the mean and variance to build the transformation model.
* @return a StandardScalarModel
*/
@Since("1.1.0")
def fit(data: RDD[Vector]): StandardScalerModel = {
// TODO: skip computation if both withMean and withStd are false
val summary = data.treeAggregate(new MultivariateOnlineSummarizer)(
(aggregator, data) => aggregator.add(data),
(aggregator1, aggregator2) => aggregator1.merge(aggregator2))
new StandardScalerModel(
Vectors.dense(summary.variance.toArray.map(v => math.sqrt(v))),
summary.mean,
withStd,
withMean)
}
}
/**
* Represents a StandardScaler model that can transform vectors.
*
* @param std column standard deviation values
* @param mean column mean values
* @param withStd whether to scale the data to have unit standard deviation
* @param withMean whether to center the data before scaling
*/
@Since("1.1.0")
class StandardScalerModel @Since("1.3.0") (
@Since("1.3.0") val std: Vector,
@Since("1.1.0") val mean: Vector,
@Since("1.3.0") var withStd: Boolean,
@Since("1.3.0") var withMean: Boolean) extends VectorTransformer {
/**
*/
@Since("1.3.0")
def this(std: Vector, mean: Vector) {
this(std, mean, withStd = std != null, withMean = mean != null)
require(this.withStd || this.withMean,
"at least one of std or mean vectors must be provided")
if (this.withStd && this.withMean) {
require(mean.size == std.size,
"mean and std vectors must have equal size if both are provided")
}
}
@Since("1.3.0")
def this(std: Vector) = this(std, null)
/**
* :: DeveloperApi ::
*/
@Since("1.3.0")
@DeveloperApi
def setWithMean(withMean: Boolean): this.type = {
require(!(withMean && this.mean == null), "cannot set withMean to true while mean is null")
this.withMean = withMean
this
}
/**
* :: DeveloperApi ::
*/
@Since("1.3.0")
@DeveloperApi
def setWithStd(withStd: Boolean): this.type = {
require(!(withStd && this.std == null),
"cannot set withStd to true while std is null")
this.withStd = withStd
this
}
// Since `shift` will be only used in `withMean` branch, we have it as
// `lazy val` so it will be evaluated in that branch. Note that we don't
// want to create this array multiple times in `transform` function.
private lazy val shift: Array[Double] = mean.toArray
/**
* Applies standardization transformation on a vector.
*
* @param vector Vector to be standardized.
* @return Standardized vector. If the std of a column is zero, it will return default `0.0`
* for the column with zero std.
*/
@Since("1.1.0")
override def transform(vector: Vector): Vector = {
require(mean.size == vector.size)
if (withMean) {
// By default, Scala generates Java methods for member variables. So every time when
// the member variables are accessed, `invokespecial` will be called which is expensive.
// This can be avoid by having a local reference of `shift`.
val localShift = shift
// Must have a copy of the values since it will be modified in place
val values = vector match {
// specially handle DenseVector because its toArray does not clone already
case d: DenseVector => d.values.clone()
case v: Vector => v.toArray
}
val size = values.length
if (withStd) {
var i = 0
while (i < size) {
values(i) = if (std(i) != 0.0) (values(i) - localShift(i)) * (1.0 / std(i)) else 0.0
i += 1
}
} else {
var i = 0
while (i < size) {
values(i) -= localShift(i)
i += 1
}
}
Vectors.dense(values)
} else if (withStd) {
vector match {
case DenseVector(vs) =>
val values = vs.clone()
val size = values.length
var i = 0
while(i < size) {
values(i) *= (if (std(i) != 0.0) 1.0 / std(i) else 0.0)
i += 1
}
Vectors.dense(values)
case SparseVector(size, indices, vs) =>
// For sparse vector, the `index` array inside sparse vector object will not be changed,
// so we can re-use it to save memory.
val values = vs.clone()
val nnz = values.length
var i = 0
while (i < nnz) {
values(i) *= (if (std(indices(i)) != 0.0) 1.0 / std(indices(i)) else 0.0)
i += 1
}
Vectors.sparse(size, indices, values)
case v => throw new IllegalArgumentException("Do not support vector type " + v.getClass)
}
} else {
// Note that it's safe since we always assume that the data in RDD should be immutable.
vector
}
}
}
|
mike0sv/spark
|
mllib/src/main/scala/org/apache/spark/mllib/feature/StandardScaler.scala
|
Scala
|
apache-2.0
| 6,949 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.jobs.accumulo
import org.geotools.data.Query
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.accumulo.data.AccumuloQueryPlan.JoinPlan
import org.locationtech.geomesa.accumulo.index._
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.index.attribute.AttributeIndex
import org.locationtech.geomesa.index.index.z2.Z2Index
import org.locationtech.geomesa.index.index.z3.Z3Index
import org.locationtech.geomesa.index.planning.QueryPlanner.CostEvaluation
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AccumuloJobUtilsTest extends Specification with TestWithFeatureType {
override val spec =
"name:String:index=join:cardinality=high,age:Int:index=full:cardinality=high,dtg:Date,*geom:Point:srid=4326"
def getQuery(ecql: String, attributes: Array[String] = null): Query = {
val q = new Query(sftName, ECQL.toFilter(ecql), attributes)
// use heuristic cost evaluation to ensure consistent expectations
q.getHints.put(QueryHints.COST_EVALUATION, CostEvaluation.Index)
q
}
val queries = Seq(
// check for non-join attributes queries
("name = 'foo' AND bbox(geom,0,0,10,10)", Array("geom"), JoinIndex),
// check for join queries fall back to secondary option
("name = 'foo' AND bbox(geom,0,0,10,10)", null, Z2Index),
// check for fall-back full table scan
("name = 'foo'", null, Z3Index),
// check for full indices
("age = 20", null, AttributeIndex),
// check for full indices in complex queries
("age = 20 and bbox(geom,0,0,10,10)", null, AttributeIndex),
// check for other indices
("bbox(geom,0,0,10,10)", null, Z2Index)
)
"AccumuloJobUtils" should {
"load list of jars from class resource" in {
AccumuloJobUtils.defaultLibJars must not(beNull)
AccumuloJobUtils.defaultLibJars must not(beEmpty)
AccumuloJobUtils.defaultLibJars must contain("accumulo-core")
AccumuloJobUtils.defaultLibJars must contain("libthrift")
}
"not return join plans for getSingleQueryPlan" in {
foreach(queries) { case (ecql, attributes, index) =>
// check that non-join attributes queries are supported
val qp = AccumuloJobUtils.getSingleQueryPlan(ds, getQuery(ecql, attributes))
qp must not(beAnInstanceOf[JoinPlan])
qp.filter.index.name mustEqual index.name
}
}
"not return join plans for getMultiQueryPlan" in {
foreach(queries) { case (ecql, attributes, index) =>
// check that non-join attributes queries are supported
val qp = AccumuloJobUtils.getMultipleQueryPlan(ds, getQuery(ecql, attributes))
foreach(qp)(_ must not(beAnInstanceOf[JoinPlan]))
foreach(qp)(_ .filter.index.name mustEqual index.name)
}
}
}
}
|
aheyne/geomesa
|
geomesa-accumulo/geomesa-accumulo-jobs/src/test/scala/org/locationtech/geomesa/jobs/accumulo/AccumuloJobUtilsTest.scala
|
Scala
|
apache-2.0
| 3,433 |
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.yarn.applicationmaster
import org.json4s._
import org.json4s.native.JsonMethods._
import us.jubat.yarn.common._
import scala.util.{Failure, Success, Try}
/**
* Serverに対するリクエストを処理します。
*/
class ApplicationMasterServlet() extends RestServlet {
private var mController: Option[ApplicationMasterController] = None
def setController(aController: ApplicationMasterController) = {
mController = Some(aController)
}
delete("/") {
logger.info("""delete("/status") is called.""")
Try {
mController.get.stopSelf()
} match {
case Success(msg) => //TODO: stopSelf()など返り値がUnitのメソッドの場合msg.toStringはログ出してない可能性
logger.info(msg.toString)
msg
case Failure(e) =>
logger.info("error happen.", e)
halt(400, e)
}
}
put("/:seq/status") {
logger.info(
s"""put("/:seq=${params("seq")}/status") is called.
|${request.body}
""".stripMargin)
Try {
val tSeq = params("seq").toInt
(parse(request.body) \\ "status").extract[String] match {
case ControllerStatus.Wait.name =>
logger.info("status is wait")
mController.get.changeWait(tSeq)
case ControllerStatus.Stop.name =>
logger.info("status is stop")
val tSeq = params("seq").toInt
mController.get.removeContainer(tSeq)
}
} match {
case Success(msg) =>
logger.info(msg.toString) //TODO: ここもログでてないかも
msg
case Failure(e) =>
logger.info("error happen.", e)
halt(400, e)
}
}
put("/:seq/location") {
logger.info(
s"""put("/:seq=${params("seq")}/location") is called.
|${request.body}
""".stripMargin)
Try {
val tSeq = params("seq")
val tParams = parse(request.body)
val tContainerLocation = new Location(
(tParams \\ "container" \\ "host").extract[String],
(tParams \\ "container" \\ "port").extract[Int])
val tJubatusLocation = new Location(
(tParams \\ "jubatusServer" \\ "host").extract[String],
(tParams \\ "jubatusServer" \\ "port").extract[Int])
mController.get.registerContainer(tSeq.toInt, tContainerLocation, tJubatusLocation)
} match {
case Success(msg) =>
logger.info(msg.toString)
msg
case Failure(e) =>
logger.info("error happen.", e)
halt(400, e)
}
}
post("/model/:id") {
logger.info(
s"""port("/model/:id=${params("id")}") is called.
|${request.body}
""".stripMargin
)
Try {
val tId = params("id")
val tParams = parse(request.body)
val tPrefixPath = (tParams \\ "pathPrefix").extract[String]
mController.get.save(tPrefixPath, tId)
} match {
case Success(_) =>
case Failure(e) =>
logger.info("error happen.", e)
halt(400, e)
}
}
put("/model") {
logger.info(
s"""put("/model") is called.
|${request.body}
""".stripMargin
)
Try {
val tParams = parse(request.body)
val tPrefixPath = (tParams \\ "pathPrefix").extract[String]
val tId = (tParams \\ "id").extract[String]
mController.get.load(tPrefixPath, tId)
} match {
case Success(_) =>
case Failure(e) =>
logger.info("error happen.", e)
halt(400, e)
}
}
}
|
jubatus/jubatus-on-yarn
|
jubatusonyarn/jubatus-on-yarn-application-master/src/main/scala/us/jubat/yarn/applicationmaster/ApplicationMasterServlet.scala
|
Scala
|
lgpl-2.1
| 4,280 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io.{File, NotSerializableException}
import java.lang.Thread.UncaughtExceptionHandler
import java.lang.management.ManagementFactory
import java.net.{URI, URL}
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent._
import javax.annotation.concurrent.GuardedBy
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.spark._
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.rpc.RpcTimeout
import org.apache.spark.scheduler.{DirectTaskResult, IndirectTaskResult, Task, TaskDescription}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{StorageLevel, TaskResultBlockId}
import org.apache.spark.util._
import org.apache.spark.util.io.ChunkedByteBuffer
/**
* Spark executor, backed by a threadpool to run tasks.
*
* This can be used with Mesos, YARN, and the standalone scheduler.
* An internal RPC interface is used for communication with the driver,
* except in the case of Mesos fine-grained mode.
*/
private[spark] class Executor(
executorId: String,
executorHostname: String,
env: SparkEnv,
userClassPath: Seq[URL] = Nil,
isLocal: Boolean = false,
uncaughtExceptionHandler: UncaughtExceptionHandler = new SparkUncaughtExceptionHandler)
extends Logging {
logInfo(s"Starting executor ID $executorId on host $executorHostname")
// Application dependencies (added through SparkContext) that we've fetched so far on this node.
// Each map holds the master's timestamp for the version of that file or JAR we got.
private val currentFiles: HashMap[String, Long] = new HashMap[String, Long]()
private val currentJars: HashMap[String, Long] = new HashMap[String, Long]()
private val EMPTY_BYTE_BUFFER = ByteBuffer.wrap(new Array[Byte](0))
private val conf = env.conf
// No ip or host:port - just hostname
Utils.checkHost(executorHostname)
// must not have port specified.
assert (0 == Utils.parseHostPort(executorHostname)._2)
// Make sure the local hostname we report matches the cluster scheduler's name for this host
Utils.setCustomHostname(executorHostname)
if (!isLocal) {
// Setup an uncaught exception handler for non-local mode.
// Make any thread terminations due to uncaught exceptions kill the entire
// executor process to avoid surprising stalls.
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler)
}
// Start worker thread pool
private val threadPool = {
val threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("Executor task launch worker-%d")
.setThreadFactory(new ThreadFactory {
override def newThread(r: Runnable): Thread =
// Use UninterruptibleThread to run tasks so that we can allow running codes without being
// interrupted by `Thread.interrupt()`. Some issues, such as KAFKA-1894, HADOOP-10622,
// will hang forever if some methods are interrupted.
new UninterruptibleThread(r, "unused") // thread name will be set by ThreadFactoryBuilder
})
.build()
Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor]
}
private val executorSource = new ExecutorSource(threadPool, executorId)
// Pool used for threads that supervise task killing / cancellation
private val taskReaperPool = ThreadUtils.newDaemonCachedThreadPool("Task reaper")
// For tasks which are in the process of being killed, this map holds the most recently created
// TaskReaper. All accesses to this map should be synchronized on the map itself (this isn't
// a ConcurrentHashMap because we use the synchronization for purposes other than simply guarding
// the integrity of the map's internal state). The purpose of this map is to prevent the creation
// of a separate TaskReaper for every killTask() of a given task. Instead, this map allows us to
// track whether an existing TaskReaper fulfills the role of a TaskReaper that we would otherwise
// create. The map key is a task id.
private val taskReaperForTask: HashMap[Long, TaskReaper] = HashMap[Long, TaskReaper]()
if (!isLocal) {
env.blockManager.initialize(conf.getAppId)
env.metricsSystem.registerSource(executorSource)
env.metricsSystem.registerSource(env.blockManager.shuffleMetricsSource)
}
// Whether to load classes in user jars before those in Spark jars
private val userClassPathFirst = conf.getBoolean("spark.executor.userClassPathFirst", false)
// Whether to monitor killed / interrupted tasks
private val taskReaperEnabled = conf.getBoolean("spark.task.reaper.enabled", false)
// Create our ClassLoader
// do this after SparkEnv creation so can access the SecurityManager
private val urlClassLoader = createClassLoader()
private val replClassLoader = addReplClassLoaderIfNeeded(urlClassLoader)
// Set the classloader for serializer
env.serializer.setDefaultClassLoader(replClassLoader)
// SPARK-21928. SerializerManager's internal instance of Kryo might get used in netty threads
// for fetching remote cached RDD blocks, so need to make sure it uses the right classloader too.
env.serializerManager.setDefaultClassLoader(replClassLoader)
// Max size of direct result. If task result is bigger than this, we use the block manager
// to send the result back.
private val maxDirectResultSize = Math.min(
conf.getSizeAsBytes("spark.task.maxDirectResultSize", 1L << 20),
RpcUtils.maxMessageSizeBytes(conf))
// Limit of bytes for total size of results (default is 1GB)
private val maxResultSize = Utils.getMaxResultSize(conf)
// Maintains the list of running tasks.
private val runningTasks = new ConcurrentHashMap[Long, TaskRunner]
// Executor for the heartbeat task.
private val heartbeater = ThreadUtils.newDaemonSingleThreadScheduledExecutor("driver-heartbeater")
// must be initialized before running startDriverHeartbeat()
private val heartbeatReceiverRef =
RpcUtils.makeDriverRef(HeartbeatReceiver.ENDPOINT_NAME, conf, env.rpcEnv)
/**
* When an executor is unable to send heartbeats to the driver more than `HEARTBEAT_MAX_FAILURES`
* times, it should kill itself. The default value is 60. It means we will retry to send
* heartbeats about 10 minutes because the heartbeat interval is 10s.
*/
private val HEARTBEAT_MAX_FAILURES = conf.getInt("spark.executor.heartbeat.maxFailures", 60)
/**
* Count the failure times of heartbeat. It should only be accessed in the heartbeat thread. Each
* successful heartbeat will reset it to 0.
*/
private var heartbeatFailures = 0
startDriverHeartbeater()
private[executor] def numRunningTasks: Int = runningTasks.size()
def launchTask(context: ExecutorBackend, taskDescription: TaskDescription): Unit = {
val tr = new TaskRunner(context, taskDescription)
runningTasks.put(taskDescription.taskId, tr)
threadPool.execute(tr)
}
def killTask(taskId: Long, interruptThread: Boolean, reason: String): Unit = {
val taskRunner = runningTasks.get(taskId)
if (taskRunner != null) {
if (taskReaperEnabled) {
val maybeNewTaskReaper: Option[TaskReaper] = taskReaperForTask.synchronized {
val shouldCreateReaper = taskReaperForTask.get(taskId) match {
case None => true
case Some(existingReaper) => interruptThread && !existingReaper.interruptThread
}
if (shouldCreateReaper) {
val taskReaper = new TaskReaper(
taskRunner, interruptThread = interruptThread, reason = reason)
taskReaperForTask(taskId) = taskReaper
Some(taskReaper)
} else {
None
}
}
// Execute the TaskReaper from outside of the synchronized block.
maybeNewTaskReaper.foreach(taskReaperPool.execute)
} else {
taskRunner.kill(interruptThread = interruptThread, reason = reason)
}
}
}
/**
* Function to kill the running tasks in an executor.
* This can be called by executor back-ends to kill the
* tasks instead of taking the JVM down.
* @param interruptThread whether to interrupt the task thread
*/
def killAllTasks(interruptThread: Boolean, reason: String) : Unit = {
runningTasks.keys().asScala.foreach(t =>
killTask(t, interruptThread = interruptThread, reason = reason))
}
def stop(): Unit = {
env.metricsSystem.report()
heartbeater.shutdown()
heartbeater.awaitTermination(10, TimeUnit.SECONDS)
threadPool.shutdown()
if (!isLocal) {
env.stop()
}
}
/** Returns the total amount of time this JVM process has spent in garbage collection. */
private def computeTotalGcTime(): Long = {
ManagementFactory.getGarbageCollectorMXBeans.asScala.map(_.getCollectionTime).sum
}
class TaskRunner(
execBackend: ExecutorBackend,
private val taskDescription: TaskDescription)
extends Runnable {
val taskId = taskDescription.taskId
val threadName = s"Executor task launch worker for task $taskId"
private val taskName = taskDescription.name
/** If specified, this task has been killed and this option contains the reason. */
@volatile private var reasonIfKilled: Option[String] = None
@volatile private var threadId: Long = -1
def getThreadId: Long = threadId
/** Whether this task has been finished. */
@GuardedBy("TaskRunner.this")
private var finished = false
def isFinished: Boolean = synchronized { finished }
/** How much the JVM process has spent in GC when the task starts to run. */
@volatile var startGCTime: Long = _
/**
* The task to run. This will be set in run() by deserializing the task binary coming
* from the driver. Once it is set, it will never be changed.
*/
@volatile var task: Task[Any] = _
def kill(interruptThread: Boolean, reason: String): Unit = {
logInfo(s"Executor is trying to kill $taskName (TID $taskId), reason: $reason")
reasonIfKilled = Some(reason)
if (task != null) {
synchronized {
if (!finished) {
task.kill(interruptThread, reason)
}
}
}
}
/**
* Set the finished flag to true and clear the current thread's interrupt status
*/
private def setTaskFinishedAndClearInterruptStatus(): Unit = synchronized {
this.finished = true
// SPARK-14234 - Reset the interrupted status of the thread to avoid the
// ClosedByInterruptException during execBackend.statusUpdate which causes
// Executor to crash
Thread.interrupted()
// Notify any waiting TaskReapers. Generally there will only be one reaper per task but there
// is a rare corner-case where one task can have two reapers in case cancel(interrupt=False)
// is followed by cancel(interrupt=True). Thus we use notifyAll() to avoid a lost wakeup:
notifyAll()
}
override def run(): Unit = {
threadId = Thread.currentThread.getId
Thread.currentThread.setName(threadName)
val threadMXBean = ManagementFactory.getThreadMXBean
val taskMemoryManager = new TaskMemoryManager(env.memoryManager, taskId)
val deserializeStartTime = System.currentTimeMillis()
val deserializeStartCpuTime = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
Thread.currentThread.setContextClassLoader(replClassLoader)
val ser = env.closureSerializer.newInstance()
logInfo(s"Running $taskName (TID $taskId)")
execBackend.statusUpdate(taskId, TaskState.RUNNING, EMPTY_BYTE_BUFFER)
var taskStart: Long = 0
var taskStartCpu: Long = 0
startGCTime = computeTotalGcTime()
try {
// Must be set before updateDependencies() is called, in case fetching dependencies
// requires access to properties contained within (e.g. for access control).
Executor.taskDeserializationProps.set(taskDescription.properties)
updateDependencies(taskDescription.addedFiles, taskDescription.addedJars)
task = ser.deserialize[Task[Any]](
taskDescription.serializedTask, Thread.currentThread.getContextClassLoader)
task.localProperties = taskDescription.properties
task.setTaskMemoryManager(taskMemoryManager)
// If this task has been killed before we deserialized it, let's quit now. Otherwise,
// continue executing the task.
val killReason = reasonIfKilled
if (killReason.isDefined) {
// Throw an exception rather than returning, because returning within a try{} block
// causes a NonLocalReturnControl exception to be thrown. The NonLocalReturnControl
// exception will be caught by the catch block, leading to an incorrect ExceptionFailure
// for the task.
throw new TaskKilledException(killReason.get)
}
// The purpose of updating the epoch here is to invalidate executor map output status cache
// in case FetchFailures have occurred. In local mode `env.mapOutputTracker` will be
// MapOutputTrackerMaster and its cache invalidation is not based on epoch numbers so
// we don't need to make any special calls here.
if (!isLocal) {
logDebug("Task " + taskId + "'s epoch is " + task.epoch)
env.mapOutputTracker.asInstanceOf[MapOutputTrackerWorker].updateEpoch(task.epoch)
}
// Run the actual task and measure its runtime.
taskStart = System.currentTimeMillis()
taskStartCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
var threwException = true
val value = try {
val res = task.run(
taskAttemptId = taskId,
attemptNumber = taskDescription.attemptNumber,
metricsSystem = env.metricsSystem)
threwException = false
res
} finally {
val releasedLocks = env.blockManager.releaseAllLocksForTask(taskId)
val freedMemory = taskMemoryManager.cleanUpAllAllocatedMemory()
if (freedMemory > 0 && !threwException) {
val errMsg = s"Managed memory leak detected; size = $freedMemory bytes, TID = $taskId"
if (conf.getBoolean("spark.unsafe.exceptionOnMemoryLeak", false)) {
throw new SparkException(errMsg)
} else {
logWarning(errMsg)
}
}
if (releasedLocks.nonEmpty && !threwException) {
val errMsg =
s"${releasedLocks.size} block locks were not released by TID = $taskId:\\n" +
releasedLocks.mkString("[", ", ", "]")
if (conf.getBoolean("spark.storage.exceptionOnPinLeak", false)) {
throw new SparkException(errMsg)
} else {
logInfo(errMsg)
}
}
}
task.context.fetchFailed.foreach { fetchFailure =>
// uh-oh. it appears the user code has caught the fetch-failure without throwing any
// other exceptions. Its *possible* this is what the user meant to do (though highly
// unlikely). So we will log an error and keep going.
logError(s"TID ${taskId} completed successfully though internally it encountered " +
s"unrecoverable fetch failures! Most likely this means user code is incorrectly " +
s"swallowing Spark's internal ${classOf[FetchFailedException]}", fetchFailure)
}
val taskFinish = System.currentTimeMillis()
val taskFinishCpu = if (threadMXBean.isCurrentThreadCpuTimeSupported) {
threadMXBean.getCurrentThreadCpuTime
} else 0L
// If the task has been killed, let's fail it.
task.context.killTaskIfInterrupted()
val resultSer = env.serializer.newInstance()
val beforeSerialization = System.currentTimeMillis()
val valueBytes = resultSer.serialize(value)
val afterSerialization = System.currentTimeMillis()
// Deserialization happens in two parts: first, we deserialize a Task object, which
// includes the Partition. Second, Task.run() deserializes the RDD and function to be run.
task.metrics.setExecutorDeserializeTime(
(taskStart - deserializeStartTime) + task.executorDeserializeTime)
task.metrics.setExecutorDeserializeCpuTime(
(taskStartCpu - deserializeStartCpuTime) + task.executorDeserializeCpuTime)
// We need to subtract Task.run()'s deserialization time to avoid double-counting
task.metrics.setExecutorRunTime((taskFinish - taskStart) - task.executorDeserializeTime)
task.metrics.setExecutorCpuTime(
(taskFinishCpu - taskStartCpu) - task.executorDeserializeCpuTime)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.metrics.setResultSerializationTime(afterSerialization - beforeSerialization)
// Expose task metrics using the Dropwizard metrics system.
// Update task metrics counters
executorSource.METRIC_CPU_TIME.inc(task.metrics.executorCpuTime)
executorSource.METRIC_RUN_TIME.inc(task.metrics.executorRunTime)
executorSource.METRIC_JVM_GC_TIME.inc(task.metrics.jvmGCTime)
executorSource.METRIC_DESERIALIZE_TIME.inc(task.metrics.executorDeserializeTime)
executorSource.METRIC_DESERIALIZE_CPU_TIME.inc(task.metrics.executorDeserializeCpuTime)
executorSource.METRIC_RESULT_SERIALIZE_TIME.inc(task.metrics.resultSerializationTime)
executorSource.METRIC_SHUFFLE_FETCH_WAIT_TIME
.inc(task.metrics.shuffleReadMetrics.fetchWaitTime)
executorSource.METRIC_SHUFFLE_WRITE_TIME.inc(task.metrics.shuffleWriteMetrics.writeTime)
executorSource.METRIC_SHUFFLE_TOTAL_BYTES_READ
.inc(task.metrics.shuffleReadMetrics.totalBytesRead)
executorSource.METRIC_SHUFFLE_REMOTE_BYTES_READ
.inc(task.metrics.shuffleReadMetrics.remoteBytesRead)
executorSource.METRIC_SHUFFLE_REMOTE_BYTES_READ_TO_DISK
.inc(task.metrics.shuffleReadMetrics.remoteBytesReadToDisk)
executorSource.METRIC_SHUFFLE_LOCAL_BYTES_READ
.inc(task.metrics.shuffleReadMetrics.localBytesRead)
executorSource.METRIC_SHUFFLE_RECORDS_READ
.inc(task.metrics.shuffleReadMetrics.recordsRead)
executorSource.METRIC_SHUFFLE_REMOTE_BLOCKS_FETCHED
.inc(task.metrics.shuffleReadMetrics.remoteBlocksFetched)
executorSource.METRIC_SHUFFLE_LOCAL_BLOCKS_FETCHED
.inc(task.metrics.shuffleReadMetrics.localBlocksFetched)
executorSource.METRIC_SHUFFLE_BYTES_WRITTEN
.inc(task.metrics.shuffleWriteMetrics.bytesWritten)
executorSource.METRIC_SHUFFLE_RECORDS_WRITTEN
.inc(task.metrics.shuffleWriteMetrics.recordsWritten)
executorSource.METRIC_INPUT_BYTES_READ
.inc(task.metrics.inputMetrics.bytesRead)
executorSource.METRIC_INPUT_RECORDS_READ
.inc(task.metrics.inputMetrics.recordsRead)
executorSource.METRIC_OUTPUT_BYTES_WRITTEN
.inc(task.metrics.outputMetrics.bytesWritten)
executorSource.METRIC_OUTPUT_RECORDS_WRITTEN
.inc(task.metrics.inputMetrics.recordsRead)
executorSource.METRIC_RESULT_SIZE.inc(task.metrics.resultSize)
executorSource.METRIC_DISK_BYTES_SPILLED.inc(task.metrics.diskBytesSpilled)
executorSource.METRIC_MEMORY_BYTES_SPILLED.inc(task.metrics.memoryBytesSpilled)
// Note: accumulator updates must be collected after TaskMetrics is updated
val accumUpdates = task.collectAccumulatorUpdates()
// TODO: do not serialize value twice
val directResult = new DirectTaskResult(valueBytes, accumUpdates)
val serializedDirectResult = ser.serialize(directResult)
val resultSize = serializedDirectResult.limit()
// directSend = sending directly back to the driver
val serializedResult: ByteBuffer = {
if (maxResultSize > 0 && resultSize > maxResultSize) {
logWarning(s"Finished $taskName (TID $taskId). Result is larger than maxResultSize " +
s"(${Utils.bytesToString(resultSize)} > ${Utils.bytesToString(maxResultSize)}), " +
s"dropping it.")
ser.serialize(new IndirectTaskResult[Any](TaskResultBlockId(taskId), resultSize))
} else if (resultSize > maxDirectResultSize) {
val blockId = TaskResultBlockId(taskId)
env.blockManager.putBytes(
blockId,
new ChunkedByteBuffer(serializedDirectResult.duplicate()),
StorageLevel.MEMORY_AND_DISK_SER)
logInfo(
s"Finished $taskName (TID $taskId). $resultSize bytes result sent via BlockManager)")
ser.serialize(new IndirectTaskResult[Any](blockId, resultSize))
} else {
logInfo(s"Finished $taskName (TID $taskId). $resultSize bytes result sent to driver")
serializedDirectResult
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FINISHED, serializedResult)
} catch {
case t: Throwable if hasFetchFailure && !Utils.isFatalError(t) =>
val reason = task.context.fetchFailed.get.toTaskFailedReason
if (!t.isInstanceOf[FetchFailedException]) {
// there was a fetch failure in the task, but some user code wrapped that exception
// and threw something else. Regardless, we treat it as a fetch failure.
val fetchFailedCls = classOf[FetchFailedException].getName
logWarning(s"TID ${taskId} encountered a ${fetchFailedCls} and " +
s"failed, but the ${fetchFailedCls} was hidden by another " +
s"exception. Spark is handling this like a fetch failure and ignoring the " +
s"other exception: $t")
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, ser.serialize(reason))
case t: TaskKilledException =>
logInfo(s"Executor killed $taskName (TID $taskId), reason: ${t.reason}")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(TaskKilled(t.reason)))
case _: InterruptedException | NonFatal(_) if
task != null && task.reasonIfKilled.isDefined =>
val killReason = task.reasonIfKilled.getOrElse("unknown reason")
logInfo(s"Executor interrupted and killed $taskName (TID $taskId), reason: $killReason")
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(
taskId, TaskState.KILLED, ser.serialize(TaskKilled(killReason)))
case CausedBy(cDE: CommitDeniedException) =>
val reason = cDE.toTaskCommitDeniedReason
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.KILLED, ser.serialize(reason))
case t: Throwable =>
// Attempt to exit cleanly by informing the driver of our failure.
// If anything goes wrong (or this was a fatal exception), we will delegate to
// the default uncaught exception handler, which will terminate the Executor.
logError(s"Exception in $taskName (TID $taskId)", t)
// SPARK-20904: Do not report failure to driver if if happened during shut down. Because
// libraries may set up shutdown hooks that race with running tasks during shutdown,
// spurious failures may occur and can result in improper accounting in the driver (e.g.
// the task failure would not be ignored if the shutdown happened because of premption,
// instead of an app issue).
if (!ShutdownHookManager.inShutdown()) {
// Collect latest accumulator values to report back to the driver
val accums: Seq[AccumulatorV2[_, _]] =
if (task != null) {
task.metrics.setExecutorRunTime(System.currentTimeMillis() - taskStart)
task.metrics.setJvmGCTime(computeTotalGcTime() - startGCTime)
task.collectAccumulatorUpdates(taskFailed = true)
} else {
Seq.empty
}
val accUpdates = accums.map(acc => acc.toInfo(Some(acc.value), None))
val serializedTaskEndReason = {
try {
ser.serialize(new ExceptionFailure(t, accUpdates).withAccums(accums))
} catch {
case _: NotSerializableException =>
// t is not serializable so just send the stacktrace
ser.serialize(new ExceptionFailure(t, accUpdates, false).withAccums(accums))
}
}
setTaskFinishedAndClearInterruptStatus()
execBackend.statusUpdate(taskId, TaskState.FAILED, serializedTaskEndReason)
} else {
logInfo("Not reporting error to driver during JVM shutdown.")
}
// Don't forcibly exit unless the exception was inherently fatal, to avoid
// stopping other tasks unnecessarily.
if (Utils.isFatalError(t)) {
uncaughtExceptionHandler.uncaughtException(Thread.currentThread(), t)
}
} finally {
runningTasks.remove(taskId)
}
}
private def hasFetchFailure: Boolean = {
task != null && task.context != null && task.context.fetchFailed.isDefined
}
}
/**
* Supervises the killing / cancellation of a task by sending the interrupted flag, optionally
* sending a Thread.interrupt(), and monitoring the task until it finishes.
*
* Spark's current task cancellation / task killing mechanism is "best effort" because some tasks
* may not be interruptable or may not respond to their "killed" flags being set. If a significant
* fraction of a cluster's task slots are occupied by tasks that have been marked as killed but
* remain running then this can lead to a situation where new jobs and tasks are starved of
* resources that are being used by these zombie tasks.
*
* The TaskReaper was introduced in SPARK-18761 as a mechanism to monitor and clean up zombie
* tasks. For backwards-compatibility / backportability this component is disabled by default
* and must be explicitly enabled by setting `spark.task.reaper.enabled=true`.
*
* A TaskReaper is created for a particular task when that task is killed / cancelled. Typically
* a task will have only one TaskReaper, but it's possible for a task to have up to two reapers
* in case kill is called twice with different values for the `interrupt` parameter.
*
* Once created, a TaskReaper will run until its supervised task has finished running. If the
* TaskReaper has not been configured to kill the JVM after a timeout (i.e. if
* `spark.task.reaper.killTimeout < 0`) then this implies that the TaskReaper may run indefinitely
* if the supervised task never exits.
*/
private class TaskReaper(
taskRunner: TaskRunner,
val interruptThread: Boolean,
val reason: String)
extends Runnable {
private[this] val taskId: Long = taskRunner.taskId
private[this] val killPollingIntervalMs: Long =
conf.getTimeAsMs("spark.task.reaper.pollingInterval", "10s")
private[this] val killTimeoutMs: Long = conf.getTimeAsMs("spark.task.reaper.killTimeout", "-1")
private[this] val takeThreadDump: Boolean =
conf.getBoolean("spark.task.reaper.threadDump", true)
override def run(): Unit = {
val startTimeMs = System.currentTimeMillis()
def elapsedTimeMs = System.currentTimeMillis() - startTimeMs
def timeoutExceeded(): Boolean = killTimeoutMs > 0 && elapsedTimeMs > killTimeoutMs
try {
// Only attempt to kill the task once. If interruptThread = false then a second kill
// attempt would be a no-op and if interruptThread = true then it may not be safe or
// effective to interrupt multiple times:
taskRunner.kill(interruptThread = interruptThread, reason = reason)
// Monitor the killed task until it exits. The synchronization logic here is complicated
// because we don't want to synchronize on the taskRunner while possibly taking a thread
// dump, but we also need to be careful to avoid races between checking whether the task
// has finished and wait()ing for it to finish.
var finished: Boolean = false
while (!finished && !timeoutExceeded()) {
taskRunner.synchronized {
// We need to synchronize on the TaskRunner while checking whether the task has
// finished in order to avoid a race where the task is marked as finished right after
// we check and before we call wait().
if (taskRunner.isFinished) {
finished = true
} else {
taskRunner.wait(killPollingIntervalMs)
}
}
if (taskRunner.isFinished) {
finished = true
} else {
logWarning(s"Killed task $taskId is still running after $elapsedTimeMs ms")
if (takeThreadDump) {
try {
Utils.getThreadDumpForThread(taskRunner.getThreadId).foreach { thread =>
if (thread.threadName == taskRunner.threadName) {
logWarning(s"Thread dump from task $taskId:\\n${thread.stackTrace}")
}
}
} catch {
case NonFatal(e) =>
logWarning("Exception thrown while obtaining thread dump: ", e)
}
}
}
}
if (!taskRunner.isFinished && timeoutExceeded()) {
if (isLocal) {
logError(s"Killed task $taskId could not be stopped within $killTimeoutMs ms; " +
"not killing JVM because we are running in local mode.")
} else {
// In non-local-mode, the exception thrown here will bubble up to the uncaught exception
// handler and cause the executor JVM to exit.
throw new SparkException(
s"Killing executor JVM because killed task $taskId could not be stopped within " +
s"$killTimeoutMs ms.")
}
}
} finally {
// Clean up entries in the taskReaperForTask map.
taskReaperForTask.synchronized {
taskReaperForTask.get(taskId).foreach { taskReaperInMap =>
if (taskReaperInMap eq this) {
taskReaperForTask.remove(taskId)
} else {
// This must have been a TaskReaper where interruptThread == false where a subsequent
// killTask() call for the same task had interruptThread == true and overwrote the
// map entry.
}
}
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): MutableURLClassLoader = {
// Bootstrap the list of jars with the user class path.
val now = System.currentTimeMillis()
userClassPath.foreach { url =>
currentJars(url.getPath().split("/").last) = now
}
val currentLoader = Utils.getContextOrSparkClassLoader
// For each of the jars in the jarSet, add them to the class loader.
// We assume each of the files has already been fetched.
val urls = userClassPath.toArray ++ currentJars.keySet.map { uri =>
new File(uri.split("/").last).toURI.toURL
}
if (userClassPathFirst) {
new ChildFirstURLClassLoader(urls, currentLoader)
} else {
new MutableURLClassLoader(urls, currentLoader)
}
}
/**
* If the REPL is in use, add another ClassLoader that will read
* new classes defined by the REPL as the user types code
*/
private def addReplClassLoaderIfNeeded(parent: ClassLoader): ClassLoader = {
val classUri = conf.get("spark.repl.class.uri", null)
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
try {
val _userClassPathFirst: java.lang.Boolean = userClassPathFirst
val klass = Utils.classForName("org.apache.spark.repl.ExecutorClassLoader")
.asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[SparkConf], classOf[SparkEnv],
classOf[String], classOf[ClassLoader], classOf[Boolean])
constructor.newInstance(conf, env, classUri, parent, _userClassPathFirst)
} catch {
case _: ClassNotFoundException =>
logError("Could not find org.apache.spark.repl.ExecutorClassLoader on classpath!")
System.exit(1)
null
}
} else {
parent
}
}
/**
* Download any missing dependencies if we receive a new set of files and JARs from the
* SparkContext. Also adds any new JARs we fetched to the class loader.
*/
private def updateDependencies(newFiles: Map[String, Long], newJars: Map[String, Long]) {
lazy val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
synchronized {
// Fetch missing dependencies
for ((name, timestamp) <- newFiles if currentFiles.getOrElse(name, -1L) < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentFiles(name) = timestamp
}
for ((name, timestamp) <- newJars) {
val localName = new URI(name).getPath.split("/").last
val currentTimeStamp = currentJars.get(name)
.orElse(currentJars.get(localName))
.getOrElse(-1L)
if (currentTimeStamp < timestamp) {
logInfo("Fetching " + name + " with timestamp " + timestamp)
// Fetch file with useCache mode, close cache for local mode.
Utils.fetchFile(name, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConf, timestamp, useCache = !isLocal)
currentJars(name) = timestamp
// Add it to our class loader
val url = new File(SparkFiles.getRootDirectory(), localName).toURI.toURL
if (!urlClassLoader.getURLs().contains(url)) {
logInfo("Adding " + url + " to class loader")
urlClassLoader.addURL(url)
}
}
}
}
}
/** Reports heartbeat and metrics for active tasks to the driver. */
private def reportHeartBeat(): Unit = {
// list of (task id, accumUpdates) to send back to the driver
val accumUpdates = new ArrayBuffer[(Long, Seq[AccumulatorV2[_, _]])]()
val curGCTime = computeTotalGcTime()
for (taskRunner <- runningTasks.values().asScala) {
if (taskRunner.task != null) {
taskRunner.task.metrics.mergeShuffleReadMetrics()
taskRunner.task.metrics.setJvmGCTime(curGCTime - taskRunner.startGCTime)
accumUpdates += ((taskRunner.taskId, taskRunner.task.metrics.accumulators()))
}
}
val message = Heartbeat(executorId, accumUpdates.toArray, env.blockManager.blockManagerId)
try {
val response = heartbeatReceiverRef.askSync[HeartbeatResponse](
message, RpcTimeout(conf, "spark.executor.heartbeatInterval", "10s"))
if (response.reregisterBlockManager) {
logInfo("Told to re-register on heartbeat")
env.blockManager.reregister()
}
heartbeatFailures = 0
} catch {
case NonFatal(e) =>
logWarning("Issue communicating with driver in heartbeater", e)
heartbeatFailures += 1
if (heartbeatFailures >= HEARTBEAT_MAX_FAILURES) {
logError(s"Exit as unable to send heartbeats to driver " +
s"more than $HEARTBEAT_MAX_FAILURES times")
System.exit(ExecutorExitCode.HEARTBEAT_FAILURE)
}
}
}
/**
* Schedules a task to report heartbeat and partial metrics for active tasks to driver.
*/
private def startDriverHeartbeater(): Unit = {
val intervalMs = conf.getTimeAsMs("spark.executor.heartbeatInterval", "10s")
// Wait a random interval so the heartbeats don't end up in sync
val initialDelay = intervalMs + (math.random * intervalMs).asInstanceOf[Int]
val heartbeatTask = new Runnable() {
override def run(): Unit = Utils.logUncaughtExceptions(reportHeartBeat())
}
heartbeater.scheduleAtFixedRate(heartbeatTask, initialDelay, intervalMs, TimeUnit.MILLISECONDS)
}
}
private[spark] object Executor {
// This is reserved for internal use by components that need to read task properties before a
// task is fully deserialized. When possible, the TaskContext.getLocalProperty call should be
// used instead.
val taskDeserializationProps: ThreadLocal[Properties] = new ThreadLocal[Properties]
}
|
ron8hu/spark
|
core/src/main/scala/org/apache/spark/executor/Executor.scala
|
Scala
|
apache-2.0
| 38,250 |
package im.actor.server.migrations
import java.time.Instant
import akka.actor.{ Props, ActorSystem }
import akka.persistence.RecoveryCompleted
import im.actor.server.event.TSEvent
import im.actor.server.group.{ GroupEvents, GroupOffice }
import im.actor.server.persist.GroupRepo
import slick.driver.PostgresDriver.api._
import scala.concurrent.{ Promise, Future, ExecutionContext }
import scala.concurrent.duration._
object GroupCreatorMemberMigrator extends Migration {
private case object Migrate
protected override def migrationName = "2015-08-29-GroupCreatorMemberMigration"
protected override def migrationTimeout = 1.hour
protected override def startMigration()(implicit system: ActorSystem, db: Database, ec: ExecutionContext): Future[Unit] = {
db.run(GroupRepo.findAllIds) flatMap { groupIds ⇒
Future.sequence(groupIds map { groupId ⇒
val promise = Promise[Unit]()
system.actorOf(Props(classOf[GroupCreatorMemberMigrator], promise, groupId), s"migrate_group_creator_member_${groupId}")
promise.future onFailure {
case e ⇒ system.log.error(e, s"Failed to migrate ${groupId}")
}
promise.future
}) map (_ ⇒ ())
}
}
}
private final class GroupCreatorMemberMigrator(promise: Promise[Unit], groupId: Int) extends PersistentMigrator(promise) {
import GroupCreatorMemberMigrator._
import GroupEvents._
override def persistenceId = GroupOffice.persistenceIdFor(groupId)
def receiveCommand = {
case Migrate ⇒ migrate()
}
var originalCreatorUserId: Int = -1
var creatorUserIdOpt: Option[Int] = None
def receiveRecover = {
case TSEvent(_, e: Created) ⇒
creatorUserIdOpt = Some(e.creatorUserId)
originalCreatorUserId = e.creatorUserId
case TSEvent(_, e: UserLeft) ⇒
if (creatorUserIdOpt.contains(e.userId)) {
creatorUserIdOpt = None
}
case TSEvent(_, e: UserKicked) ⇒
if (creatorUserIdOpt.contains(e.userId)) {
creatorUserIdOpt = None
}
case RecoveryCompleted ⇒
self ! Migrate
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
super.preRestart(reason, message)
promise.failure(reason)
}
private def migrate(): Unit = {
log.warning("Migrating {}", groupId)
creatorUserIdOpt match {
case Some(creatorUserId) ⇒
log.warning("Adding member {}", creatorUserId)
persist(UserInvited(Instant.now(), creatorUserId, creatorUserId))(identity)
persist(UserJoined(Instant.now(), creatorUserId, creatorUserId)) { _ ⇒
log.warning("Migrated")
promise.success(())
context stop self
}
case None ⇒
log.warning("No migration needed, creator left")
promise.success(())
context stop self
}
}
}
|
ljshj/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/migrations/GroupCreatorMemberMigrator.scala
|
Scala
|
mit
| 2,834 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql.{AnalysisException, ShowCreateTableSuite}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
class HiveShowCreateTableSuite extends ShowCreateTableSuite with TestHiveSingleton {
private var origCreateHiveTableConfig = false
protected override def beforeAll(): Unit = {
super.beforeAll()
origCreateHiveTableConfig =
spark.conf.get(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT)
spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key, true)
}
protected override def afterAll(): Unit = {
spark.conf.set(
SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT.key,
origCreateHiveTableConfig)
super.afterAll()
}
test("view") {
Seq(true, false).foreach { serde =>
withView("v1") {
sql("CREATE VIEW v1 AS SELECT 1 AS a")
checkCreateView("v1", serde)
}
}
}
test("view with output columns") {
Seq(true, false).foreach { serde =>
withView("v1") {
sql("CREATE VIEW v1 (a, b COMMENT 'b column') AS SELECT 1 AS a, 2 AS b")
checkCreateView("v1", serde)
}
}
}
test("view with table comment and properties") {
Seq(true, false).foreach { serde =>
withView("v1") {
sql(
s"""
|CREATE VIEW v1 (
| c1 COMMENT 'bla',
| c2
|)
|COMMENT 'table comment'
|TBLPROPERTIES (
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
|AS SELECT 1 AS c1, '2' AS c2
""".stripMargin
)
checkCreateView("v1", serde)
}
}
}
test("simple hive table") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|TBLPROPERTIES (
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("simple external hive table") {
withTempDir { dir =>
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|LOCATION '${dir.toURI}'
|TBLPROPERTIES (
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
}
test("partitioned hive table") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|COMMENT 'bla'
|PARTITIONED BY (
| p1 BIGINT COMMENT 'bla',
| p2 STRING
|)
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("hive table with explicit storage info") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
|COLLECTION ITEMS TERMINATED BY '@'
|MAP KEYS TERMINATED BY '#'
|NULL DEFINED AS 'NaN'
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("hive table with STORED AS clause") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|STORED AS PARQUET
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("hive table with serde info") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
|WITH SERDEPROPERTIES (
| 'mapkey.delim' = ',',
| 'field.delim' = ','
|)
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("hive bucketing is supported") {
withTable("t1") {
sql(
s"""CREATE TABLE t1 (a INT, b STRING)
|CLUSTERED BY (a)
|SORTED BY (b)
|INTO 2 BUCKETS
""".stripMargin
)
checkCreateTable("t1", serde = true)
}
}
test("hive partitioned view is not supported") {
withTable("t1") {
withView("v1") {
sql(
s"""
|CREATE TABLE t1 (c1 INT, c2 STRING)
|PARTITIONED BY (
| p1 BIGINT COMMENT 'bla',
| p2 STRING )
""".stripMargin)
createRawHiveTable(
s"""
|CREATE VIEW v1
|PARTITIONED ON (p1, p2)
|AS SELECT * from t1
""".stripMargin
)
val cause = intercept[AnalysisException] {
sql("SHOW CREATE TABLE v1")
}
assert(cause.getMessage.contains(" - partitioned view"))
val causeForSpark = intercept[AnalysisException] {
sql("SHOW CREATE TABLE v1 AS SERDE")
}
assert(causeForSpark.getMessage.contains(" - partitioned view"))
}
}
}
test("SPARK-24911: keep quotes for nested fields in hive") {
withTable("t1") {
val createTable = "CREATE TABLE `t1` (`a` STRUCT<`b`: STRING>) USING hive"
sql(createTable)
val shownDDL = getShowDDL("SHOW CREATE TABLE t1")
assert(shownDDL.substring(0, shownDDL.indexOf(" USING")) ==
"CREATE TABLE `default`.`t1` ( `a` STRUCT<`b`: STRING>)")
checkCreateTable("t1", serde = true)
}
}
private def createRawHiveTable(ddl: String): Unit = {
hiveContext.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog]
.client.runSqlHive(ddl)
}
private def checkCreateSparkTableAsHive(tableName: String): Unit = {
val table = TableIdentifier(tableName, Some("default"))
val db = table.database.get
val hiveTable = spark.sharedState.externalCatalog.getTable(db, table.table)
val sparkDDL = sql(s"SHOW CREATE TABLE ${table.quotedString}").head().getString(0)
// Drops original Hive table.
sql(s"DROP TABLE ${table.quotedString}")
try {
// Creates Spark datasource table using generated Spark DDL.
sql(sparkDDL)
val sparkTable = spark.sharedState.externalCatalog.getTable(db, table.table)
checkHiveCatalogTables(hiveTable, sparkTable)
} finally {
sql(s"DROP TABLE IF EXISTS ${table.table}")
}
}
private def checkHiveCatalogTables(hiveTable: CatalogTable, sparkTable: CatalogTable): Unit = {
def normalize(table: CatalogTable): CatalogTable = {
val nondeterministicProps = Set(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
// The following are hive specific schema parameters which we do not need to match exactly.
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
table.copy(
createTime = 0L,
lastAccessTime = 0L,
properties = table.properties.filterKeys(!nondeterministicProps.contains(_)).toMap,
stats = None,
ignoredProperties = Map.empty,
storage = table.storage.copy(properties = Map.empty),
provider = None,
tracksPartitionsInCatalog = false
)
}
def fillSerdeFromProvider(table: CatalogTable): CatalogTable = {
table.provider.flatMap(HiveSerDe.sourceToSerDe(_)).map { hiveSerde =>
val newStorage = table.storage.copy(
inputFormat = hiveSerde.inputFormat,
outputFormat = hiveSerde.outputFormat,
serde = hiveSerde.serde
)
table.copy(storage = newStorage)
}.getOrElse(table)
}
assert(normalize(fillSerdeFromProvider(sparkTable)) == normalize(hiveTable))
}
test("simple hive table in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 STRING COMMENT 'bla',
| c2 STRING
|)
|TBLPROPERTIES (
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
|STORED AS orc
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
test("show create table as serde can't work on data source table") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 STRING COMMENT 'bla',
| c2 STRING
|)
|USING orc
""".stripMargin
)
val cause = intercept[AnalysisException] {
checkCreateTable("t1", serde = true)
}
assert(cause.getMessage.contains("Use `SHOW CREATE TABLE` without `AS SERDE` instead"))
}
}
test("simple external hive table in Spark DDL") {
withTempDir { dir =>
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 STRING COMMENT 'bla',
| c2 STRING
|)
|LOCATION '${dir.toURI}'
|TBLPROPERTIES (
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
|STORED AS orc
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
}
test("hive table with STORED AS clause in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|STORED AS PARQUET
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
test("hive table with nested fields with STORED AS clause in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING,
| c3 STRUCT <s1: INT, s2: STRING>
|)
|STORED AS PARQUET
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
test("hive table with unsupported fileformat in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|STORED AS RCFILE
""".stripMargin
)
val cause = intercept[AnalysisException] {
checkCreateSparkTableAsHive("t1")
}
assert(cause.getMessage.contains("unsupported serde configuration"))
}
}
test("hive table with serde info in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 INT COMMENT 'bla',
| c2 STRING
|)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
|STORED AS
| INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
| OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
test("partitioned, bucketed hive table in Spark DDL") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| emp_id INT COMMENT 'employee id', emp_name STRING,
| emp_dob STRING COMMENT 'employee date of birth', emp_sex STRING COMMENT 'M/F'
|)
|COMMENT 'employee table'
|PARTITIONED BY (
| emp_country STRING COMMENT '2-char code', emp_state STRING COMMENT '2-char code'
|)
|CLUSTERED BY (emp_sex) SORTED BY (emp_id ASC) INTO 10 BUCKETS
|STORED AS ORC
""".stripMargin
)
checkCreateSparkTableAsHive("t1")
}
}
test("show create table for transactional hive table") {
withTable("t1") {
sql(
s"""
|CREATE TABLE t1 (
| c1 STRING COMMENT 'bla',
| c2 STRING
|)
|TBLPROPERTIES (
| 'transactional' = 'true',
| 'prop1' = 'value1',
| 'prop2' = 'value2'
|)
|CLUSTERED BY (c1) INTO 10 BUCKETS
|STORED AS ORC
""".stripMargin
)
val cause = intercept[AnalysisException] {
sql("SHOW CREATE TABLE t1")
}
assert(cause.getMessage.contains(
"SHOW CREATE TABLE doesn't support transactional Hive table"))
}
}
}
|
chuckchen/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveShowCreateTableSuite.scala
|
Scala
|
apache-2.0
| 13,730 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.rinterpreter
import java.nio.file.{Files, Paths}
import java.util._
import org.apache.commons.codec.binary.{Base64, StringUtils}
import org.apache.zeppelin.interpreter.Interpreter.FormType
import org.apache.zeppelin.interpreter.{InterpreterContext, _}
import org.apache.zeppelin.scheduler.Scheduler
import org.apache.zeppelin.spark.SparkInterpreter
import org.jsoup.Jsoup
import org.jsoup.nodes._
import org.jsoup.select.Elements
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConversions._
import scala.io.Source
abstract class RInterpreter(properties : Properties, startSpark : Boolean = true) extends Interpreter (properties) {
protected val logger: Logger = RInterpreter.logger
logger.info("Initialising an RInterpreter of class " + this.getClass.getName)
def getrContext: RContext = rContext
protected lazy val rContext : RContext = synchronized{ RContext(property) }
def open: Unit = rContext.synchronized {
logger.info("RInterpreter opening")
rContext.setInterpreterGroup(getInterpreterGroup)
rContext.open(startSpark)
rContext.testRPackage("htmltools", message =
"""You can continue
| without it, but some interactive visualizations will fail.
| You can install it from cran."""")
rContext.testRPackage("repr", license = true, message =
"""You can continue
| without it, but some forms of output from the REPL may not appear properly."""")
rContext.testRPackage("base64enc", license = true, message =
"""You can continue
| without it, but the REPL may not show images properly.""")
rContext.testRPackage("evaluate", license = false, message =
"""
|The REPL needs this to run. It can be installed from CRAN
| Thanks to Hadley Wickham and Yihui Xie for graciously making evaluate available under an Apache-compatible
| license so it can be used with this project.""".stripMargin)
}
def close: Unit = {
rContext.close
}
def getProgress(context :InterpreterContext): Int = rContext.getProgress
def cancel(context:InterpreterContext) : Unit = {}
def getFormType: FormType = {
return FormType.NONE
}
override def getScheduler : Scheduler = rContext.getScheduler
// TODO: completion is disabled because it could not be tested with current Zeppelin code
def completion(buf :String,cursor : Int) : List[String] = Array[String]("").toList
private[rinterpreter] def hiddenCompletion(buf :String,cursor : Int) : List[String] =
rContext.evalS1(s"""
|rzeppelin:::.z.completion("$buf", $cursor)
""".stripMargin).toList
}
object RInterpreter {
private val logger: Logger = LoggerFactory.getLogger(getClass)
logger.debug("logging inside the RInterpreter singleton")
// These are the additional properties we need on top of the ones provided by the spark interpreters
lazy val props: Map[String, InterpreterProperty] = new InterpreterPropertyBuilder()
.add("rhadoop.cmd", SparkInterpreter.getSystemDefault("rhadoop.cmd", "HADOOP_CMD", ""), "Usually /usr/bin/hadoop")
.add("rhadooop.streamingjar", SparkInterpreter.getSystemDefault("rhadoop.cmd", "HADOOP_STREAMING", ""), "Usually /usr/lib/hadoop/contrib/streaming/hadoop-streaming-<version>.jar")
.add("rscala.debug", SparkInterpreter.getSystemDefault("rscala.debug","RSCALA_DEBUG", "false"), "Whether to turn on rScala debugging") // TEST: Implemented but not tested
.add("rscala.timeout", SparkInterpreter.getSystemDefault("rscala.timeout","RSCALA_TIMEOUT", "60"), "Timeout for rScala") // TEST: Implemented but not tested
.build
def getProps() = {
props
}
// Some R interactive visualization packages insist on producing HTML that refers to javascript
// or css by file path. These functions are intended to load those files and embed them into the
// HTML as Base64 encoded DataURIs.
//FIXME These don't error but may not yet properly be converting script links
def scriptToBase(doc : Element, testAttr : String, tag : String, mime : String): Unit = {
val elems : Elements = doc.getElementsByTag(tag)
elems.filter( (e : Element) => {
e.attributes().hasKey(testAttr) && e.attr(testAttr) != "" && e.attr(testAttr).slice(0,1) == "/"
}
).foreach(scriptToBase(_, testAttr, mime))
}
def scriptToBase(node : Element, field : String, mime : String) : Unit = node.attr(field) match {
case x if Files.exists(Paths.get(x)) => node.attr(field, dataURI(x, mime))
case x if x.slice(0,4) == "http" => {}
case x if x.contains("ajax") => {}
case x if x.contains("googleapis") => {}
case x if x.slice(0,2) == "//" => node.attr(field, "http:" + x)
case _ => {}
}
def dataURI(file : String, mime : String) : String = {
val data: String = Source.fromFile(file).getLines().mkString("\\n")
s"""data:${mime};base64,""" + StringUtils.newStringUtf8(Base64.encodeBase64(data.getBytes(), false))
}
// The purpose here is to deal with knitr producing HTML with script and css tags outside the <body>
def processHTML(input: Array[String]): String = processHTML(input.mkString("\\n"))
def processHTML(input: String) : String = {
val doc : Document = Jsoup.parse(input)
processHTML(doc)
}
// private var counter : Int = 0
// private def writeDebug(html : String) : Unit = {
// val file = new File( s"""/tmp/debug${counter}.html""")
// counter = counter + 1
// val bw = new BufferedWriter(new FileWriter(file))
// bw.write(html)
// bw.close()
// }
private def processHTML(doc : Document) : String = {
// writeDebug(doc.outerHtml())
val bod : Element = doc.body()
val head : Element = doc.head()
// Try to ignore the knitr script that breaks zeppelin display
head.getElementsByTag("script").reverseIterator.foreach(bod.prependChild(_))
// head.getElementsByTag("style").map(_.attr("scoped", true)).foreach(bod.prependChild(_))
// Only get css from head if it links to a file
head.getElementsByTag("link").foreach(bod.prependChild(_))
scriptToBase(bod, "href", "link", "text/css")
scriptToBase(bod, "src", "script", "text/javascript")
// writeDebug(bod.ownerDocument().outerHtml())
bod.html()
}
}
|
elbamos/Zeppelin-With-R
|
r/src/main/scala/org/apache/zeppelin/rinterpreter/RInterpreter.scala
|
Scala
|
apache-2.0
| 7,074 |
package test
class A
|
pdalpra/sbt
|
sbt/src/sbt-test/source-dependencies/transitive-memberRef/src/main/scala/A.scala
|
Scala
|
bsd-3-clause
| 22 |
package clashcode.wordguess.logic
import scala.io.Source
import org.apache.commons.lang.StringEscapeUtils
import java.io.Writer
import java.io.File
import java.io.FileWriter
trait GameStatePersistence {
val separatorChar = '§'
def loadFrom(src: Source): GameState = {
val wordStates = (for {
line <- src.getLines
if (!line.trim().isEmpty())
} yield {
val parts = line.split(separatorChar)
if (parts.length == 2) {
val solved = parts(0) == "S"
// Scala src does one escaping for us
val word = StringEscapeUtils.unescapeJava(
StringEscapeUtils.unescapeJava(parts(1)))
WordState(word, solved)
} else {
println("Problem with line: " + StringEscapeUtils.escapeJava(line))
WordState("?", solved=true)
}
}).toList
GameState(wordStates)
}
def write(state: GameState, writer: Writer) {
for (wordState <- state.wordStates) {
val stateLetter = if (wordState.solved) "S" else "U"
val escapedWord = StringEscapeUtils.escapeJava(wordState.word)
writer.write(s"$stateLetter$separatorChar$escapedWord\\n")
}
}
def ensureGameStateFile(gameStatePath: String, sourceTextPath: String, minGameWordLength:Int) {
val gameStateFile = new File(gameStatePath)
val sourceTextFile = new File(sourceTextPath)
assert(sourceTextFile.exists(), "File not found: " + sourceTextFile.getAbsolutePath())
if (!gameStateFile.exists()) {
val src = Source.fromFile(sourceTextFile)
val gameState = GameStateGenerator.fromSource(src, minGameWordLength)
writeToFile(gameState, gameStateFile)
}
}
def loadFromFile(path: String): GameState =
loadFromFile(new File(path))
def writeToFile(state: GameState, path: String): Unit =
writeToFile(state, new File(path))
def loadFromFile(file: File): GameState = {
val src = Source.fromFile(file)
loadFrom(src)
}
def writeToFile(state: GameState, file: File) {
val writer = new FileWriter(file)
write(state, writer)
writer.close()
}
}
|
scala-vienna/wordguess-server
|
app/clashcode/wordguess/logic/GameStatePersistence.scala
|
Scala
|
mit
| 2,070 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity, DataFormat}
import com.intel.analytics.bigdl.dllib.nn.tf.{Conv3DBackpropFilterV2 => Conv3DBackpropFilterV2Ops}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.NodeDef
import scala.reflect.ClassTag
class Conv3DBackpropFilterV2 extends TensorflowOpsLoader {
import Utils._
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
val attributes = nodeDef.getAttrMap
val (pT, pW, pH) =
if (getString(attributes, "padding") == "SAME") {
(-1, -1, -1)
} else {
(0, 0, 0)
}
val strideList = getIntList(attributes, "strides")
require(strideList.head == 1, s"not support strides on batch")
val format = getString(attributes, "data_format")
val conv = format match {
case "NDHWC" =>
require(strideList(4) == 1, s"not support strides on depth")
val dT = strideList(1)
val dW = strideList(2)
val dH = strideList(3)
Conv3DBackpropFilterV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NHWC)
case "NCDHW" =>
require(strideList(1) == 1, s"not support strides on depth")
val dT = strideList(2)
val dW = strideList(3)
val dH = strideList(4)
Conv3DBackpropFilterV2Ops[T](dT, dW, dH, pT, pW, pH, DataFormat.NCHW)
case _ =>
throw new IllegalArgumentException(s"not supported data format: $format")
}
conv.asInstanceOf[AbstractModule[Activity, Activity, T]]
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/Conv3DBackpropFilterV2.scala
|
Scala
|
apache-2.0
| 2,460 |
package scalanlp.tensor.sparse
import scalala.scalar.Scalar
import scalala.tensor.dense.DenseVector
import scalala.tensor.mutable.{VectorLike, Vector}
import java.util.Arrays
import scalala.operators.{OpSub, OpAdd, BinaryOp}
import scalala.generic.collection.{CanMapKeyValuePairs, CanMapValues}
/**
*
* More or less taken from old scalala. -- David
*
* A sparse vector implementation based on an array of indeces and
* an array of values. Inserting a new value takes on the order
* of the number of non-zeros. Getting a value takes on the order
* of the log of the number of non-default values, with special
* constant time shortcuts for getting the previously accessed
* element or its successor. Note that this class is not threadsafe.
*
* @author dramage, dlwh
*/
class OldSparseVector(domainSize : Int, var default: Double = 0.0, initialNonzeros : Int = 0) extends Vector[Double] with VectorLike[Double,OldSparseVector] with Serializable {
if (domainSize < 0)
throw new IllegalArgumentException("Invalid domain size: "+domainSize);
val scalar = implicitly[Scalar[Double]];
/** Data array will be reassigned as the sparse vector grows. */
private var data : Array[Double] = new Array[Double](initialNonzeros);
/** Index will be reassigned as the sparse vector grows. */
private var index : Array[Int] = new Array[Int](initialNonzeros);
/** How many iterator of data,index are used. */
private var used : Int = 0;
/** The previous index and offset found by apply or update. */
private var lastOffset = -1;
/** Use the given index and data arrays, of which the first inUsed are valid. */
private def use(inIndex : Array[Int], inData : Array[Double], inUsed : Int) = {
if (inIndex.size != inData.size)
throw new IllegalArgumentException("Index and data sizes do not match");
// I spend 7% of my time in this call. It's gotta go.
//if (inIndex.contains((x:Int) => x < 0 || x > size))
// throw new IllegalArgumentException("Index array contains out-of-range index");
if (inIndex == null || inData == null)
throw new IllegalArgumentException("Index and data must be non-null");
if (inIndex.size < inUsed)
throw new IllegalArgumentException("Used is greater than provided array");
// I spend 7% of my time in this call. It's gotta go. and this one.
//for (i <- 1 until used; if (inIndex(i-1) > inIndex(i))) {
// throw new IllegalArgumentException("Input index is not sorted at "+i);
//}
//for (i <- 0 until used; if (inIndex(i) < 0)) {
// throw new IllegalArgumentException("Input index is less than 0 at "+i);
//}
data = inData;
index = inIndex;
used = inUsed;
lastOffset = -1;
}
override def size = domainSize;
def length = domainSize;
def activeSize = used;
def indexAt(offset: Int) = {
index(offset)
}
def valueAt(offset: Int) = {
data(offset)
}
def activeIterator = new Iterator[(Int,Double)] {
var offset = 0;
override def hasNext = offset < used;
override def next() = {
val rv = (index(offset),data(offset));
offset += 1;
rv;
}
}
def activeKeys = index.take(used).iterator;
def activeValues = data.take(used).iterator;
/** Zeros this vector, return */
def zero() = {
use(new Array[Int](initialNonzeros),
new Array[Double](initialNonzeros), 0);
}
/** Records that the given index was found at this.index(offset). */
protected final def found(index : Int, offset : Int) : Int = {
lastOffset = offset;
offset;
}
/**
* Returns the offset into index and data for the requested vector
* index. If the requested index is not found, the return value is
* negative and can be converted into an insertion point with -(rv+1).
*/
def findOffset(i : Int) : Int = {
if (i < 0)
throw new IndexOutOfBoundsException("index is negative (" + index + ")");
if (i >= size)
throw new IndexOutOfBoundsException("index >= size (" + index + " >= " + size + ")");
val lastIndex = if(lastOffset < 0) -1 else index(lastOffset);
if (i == lastIndex) {
// previous element; don't need to update lastOffset
lastOffset;
} else if (used == 0) {
// empty list; do nothing
-1;
} else {
// regular binary search
var begin = 0;
var end = used - 1;
// narrow the search if we have a previous reference
if (lastIndex >= 0 && lastOffset >= 0) {
if (i < lastIndex) {
// in range preceding last request
end = lastOffset;
} else {
// in range following last request
begin = lastOffset;
if (begin + 1 <= end && index(begin + 1) == i) {
// special case: successor of last request
return found(i, begin + 1);
}
}
}
// Simple optimization:
// the i'th entry can't be after entry i.
if(end > i)
end = i;
// this assert is for debug only
//assert(begin >= 0 && end >= begin,
// "Invalid range: "+begin+" to "+end);
var mid = (end + begin) >> 1;
while (begin <= end) {
mid = (end + begin) >> 1;
if (index(mid) < i)
begin = mid + 1;
else if (index(mid) > i)
end = mid - 1;
else
return found(i, mid);
}
// no match found, return insertion point
if (i <= index(mid))
-(mid)-1; // Insert here (before mid)
else
-(mid + 1)-1; // Insert after mid
}
}
override def apply(i : Int) : Double = {
val offset = findOffset(i);
if (offset >= 0) data(offset) else default;
}
/**
* Sets the given value at the given index if the value is not
* equal to the current default. The data and
* index arrays will be grown to support the insertion if
* necessary. The growth schedule doubles the amount
* of allocated memory at each allocation request up until
* the sparse vector contains 1024 iterator, at which point
* the growth is additive: an additional n * 1024 spaces will
* be allocated for n in 1,2,4,8,16. The largest amount of
* space added to this vector will be an additional 16*1024*(8+4) =
* 196608 bytes, although more space is needed temporarily
* while moving to the new arrays.
*/
def update(i : Int, value : Double) = {
val offset = findOffset(i);
if (offset >= 0) {
// found at offset
data(offset) = value;
} else if (value != default) {
// need to insert at position -(offset+1)
val insertPos = -(offset+1);
used += 1;
var newIndex = index;
var newData = data;
if (used > data.length) {
val newLength = {
if (data.length < 8) { 8 }
else if (data.length > 16*1024) { data.length + 16*1024 }
else if (data.length > 8*1024) { data.length + 8*1024 }
else if (data.length > 4*1024) { data.length + 4*1024 }
else if (data.length > 2*1024) { data.length + 2*1024 }
else if (data.length > 1*1024) { data.length + 1*1024 }
else { data.length * 2 }
}
// copy existing data into new arrays
newIndex = new Array[Int](newLength);
newData = new Array[Double](newLength);
System.arraycopy(index, 0, newIndex, 0, insertPos);
System.arraycopy(data, 0, newData, 0, insertPos);
}
// make room for insertion
System.arraycopy(index, insertPos, newIndex, insertPos + 1, used - insertPos - 1);
System.arraycopy(data, insertPos, newData, insertPos + 1, used - insertPos - 1);
// assign new value
newIndex(insertPos) = i;
newData(insertPos) = value;
// record the insertion point
found(i,insertPos);
// update pointers
index = newIndex;
data = newData;
}
}
/** Compacts the vector by removing all stored default values. */
def compact() {
val _default = default;
val nz = { // number of non-zeros
var _nz = 0;
var i = 0;
while (i < used) {
if (data(i) != _default) {
_nz += 1;
}
i += 1;
}
_nz;
}
val newData = new Array[Double](nz);
val newIndex = new Array[Int](nz);
var i = 0;
var o = 0;
while (i < used) {
if (data(i) != _default) {
newData(o) = data(i);
newIndex(o) = index(i);
o += 1;
}
i += 1;
}
use(newIndex, newData, nz);
}
def +=(c: Double):this.type = {
default += c;
var offset = 0;
while(offset < used) {
data(offset) += c;
offset += 1;
}
this
}
def -=(c: Double):this.type = {
default += c;
var offset = 0;
while(offset < used) {
data(offset) -= c;
offset += 1;
}
this
}
def *=(c: Double):this.type = {
default += c;
var offset = 0;
while(offset < used) {
data(offset) *= c;
offset += 1;
}
this
}
def /=(c: Double):this.type = {
default += c;
var offset = 0;
while(offset < used) {
data(offset) /= c;
offset += 1;
}
this
}
/** Optimized implementation for SparseVector dot DenseVector. */
def dot(that : DenseVector[Double]) : Double = {
this.checkDomain(that.domain)
val thisDefault = this.default;
var sum = 0.0;
if (thisDefault == 0) {
var o = 0;
while (o < this.used) {
sum += (this.data(o) * that.data(this.index(o)));
o += 1;
}
} else {
var o1 = 0;
var i2 = 0;
while (o1 < this.used) {
val i1 = this.index(o1);
if (i1 == i2) {
sum += (this.data(o1) * that.data(i2));
o1 += 1;
i2 += 1;
} else { // i1 < i2
sum += (thisDefault * that.data(i2));
i2 += 1;
}
}
// consume remander of that
while (i2 < that.data.length) {
sum += (thisDefault * that.data(i2));
i2 += 1;
}
}
sum;
}
/** Optimized implementation for SparseVector dot SparseVector. */
def dot(that : OldSparseVector) : Double = {
checkDomain(that.domain);
var o1 = 0; // offset into this.data, this.index
var o2 = 0; // offset into that.data, that.index
var sum = 0.0; // the dot product
val thisDefault = this.default;
val thatDefault = that.default;
if (thisDefault == 0 && thatDefault == 0) {
while (o1 < this.used && o2 < that.used) {
val i1 = this.index(o1);
val i2 = that.index(o2);
if (i1 == i2) {
sum += (this.data(o1) * that.data(o2));
o1 += 1;
o2 += 1;
} else if (i1 < i2) {
o1 += 1;
} else { // i2 > i1
o2 += 1;
}
}
} else if (thisDefault == 0) { // && thatDefault != 0
while (o1 < this.used && o2 < that.used) {
val i1 = this.index(o1);
val i2 = that.index(o2);
if (i1 == i2) {
sum += (this.data(o1) * that.data(o2));
o1 += 1;
o2 += 1;
} else if (i1 < i2) {
sum += (thatDefault * this.data(o1));
o1 += 1;
} else { // i2 > i1
o2 += 1;
}
}
// consume remainder of this
while (o1 < this.used) {
sum += (thatDefault * this.data(o1));
o1 += 1;
}
} else if (thatDefault == 0) { // thisDefault != 0
while (o1 < this.used && o2 < that.used) {
val i1 = this.index(o1);
val i2 = that.index(o2);
if (i1 == i2) {
sum += (this.data(o1) * that.data(o2));
o1 += 1;
o2 += 1;
} else if (i1 < i2) {
o1 += 1;
} else { // i2 > i1
sum += (thisDefault * that.data(o2));
o2 += 1;
}
}
// consume remainder of that
while (o2 < that.used) {
sum += (thisDefault * that.data(o2));
o2 += 1;
}
} else { // thisDefault != 0 && thatDefault != 0
var counted = 0;
while (o1 < this.used && o2 < that.used) {
val i1 = this.index(o1);
val i2 = that.index(o2);
if (i1 == i2) {
sum += (this.data(o1) * that.data(o2));
o1 += 1;
o2 += 1;
counted += 1;
} else if (i1 < i2) {
sum += (thatDefault * this.data(o1));
o1 += 1;
counted += 1;
} else { // i2 > i1
sum += (thisDefault * that.data(o2));
o2 += 1;
counted += 1;
}
}
// consume remainder of this
while (o1 < this.used) {
sum += (thatDefault * this.data(o1));
o1 += 1;
counted += 1;
}
// consume remainder of that
while (o2 < that.used) {
sum += (thisDefault * that.data(o2));
o2 += 1;
counted += 1;
}
// add in missing product total
sum += ((size - counted) * (thisDefault * thatDefault));
}
sum;
}
def copy: OldSparseVector = {
val r = new OldSparseVector(length,default);
r.use(Arrays.copyOf(index,index.length),Arrays.copyOf(data,data.length), used);
r
}
}
object OldSparseVector {
implicit val canAddDouble: BinaryOp[OldSparseVector,Double,OpAdd,OldSparseVector] = new BinaryOp[OldSparseVector,Double,OpAdd,OldSparseVector] {
def opType = OpAdd;
def apply(v1: OldSparseVector, v2: Double) = {
v1.copy += v2
}
}
implicit val canSubDouble: BinaryOp[OldSparseVector,Double,OpSub,OldSparseVector] = new BinaryOp[OldSparseVector,Double,OpSub,OldSparseVector] {
def opType = OpSub;
def apply(v1: OldSparseVector, v2: Double) = {
v1.copy -= v2
}
}
implicit val canMapValues: CanMapValues[OldSparseVector,Double,Double,OldSparseVector] = new CanMapValues[OldSparseVector,Double,Double,OldSparseVector] {
def mapNonZero(from: OldSparseVector, fn: (Double) => Double) = {
val res = from.copy;
var offset = 0;
while(offset < res.activeSize) {
res(res.indexAt(offset)) = fn(res.valueAt(offset));
offset += 1;
}
if(res.default != 0.0) {
res.default = fn(res.default);
}
res
}
def map(from: OldSparseVector, fn: (Double) => Double) = {
val res = from.copy;
var offset = 0;
while(offset < res.activeSize) {
res(res.indexAt(offset)) = fn(res.valueAt(offset));
offset += 1;
}
res.default = fn(res.default);
res
}
}
/** Optimized base class for mapping dense columns. */
implicit val canMapKeyValuePairsSparseVector = new CanMapKeyValuePairs[OldSparseVector,Int,Double,Double,OldSparseVector] {
def map(from: OldSparseVector, fn: (Int, Double) => Double) = {
val res = new OldSparseVector(from.size,from.default,from.activeSize)
for( (k,v) <- res.pairs) {
res(k) = fn(k,v)
}
res
}
def mapNonZero(from: OldSparseVector, fn: (Int, Double) => Double) = {
val res = new OldSparseVector(from.size,from.default,from.activeSize)
for( (k,v) <- res.pairsIteratorNonZero) {
res(k) = fn(k,v)
}
res
}
}
}
|
MLnick/scalanlp-core
|
data/src/main/scala/scalanlp/tensor/sparse/OldSparseVector.scala
|
Scala
|
apache-2.0
| 15,158 |
package com.mentatlabs.nsa
package scalac
package options
/* -Xprint-pos
* ===========
* 2.1.7 - 2.5.1: Print tree positions (as offsets) // previously -Xprintpos
* 2.6.0 - 2.8.2: Print tree positions (as offsets)
* 2.9.0 - 2.12.0: Print tree positions, as offsets.
*/
case object ScalacXPrintPos
extends ScalacOptionBoolean("-Xprint-pos", ScalacVersions.`2.6.0`)
|
mentat-labs/sbt-nsa
|
nsa-core/src/main/scala/com/mentatlabs/nsa/scalac/options/advanced/ScalacXPrintPos.scala
|
Scala
|
bsd-3-clause
| 385 |
trait Base {
type exp <: Exp
trait Exp { }
}
trait BaseNum extends Base {
class Num(val value: Int) extends Exp { }
type BaseNum = Num
}
trait BasePlus extends Base {
class Plus(val left: exp, val right: exp) extends Exp { }
type BasePlus = Plus
}
trait BaseNeg extends Base {
class Neg(val term: exp) extends Exp { }
type BaseNeg = Neg
}
trait BasePlusNeg extends BasePlus with BaseNeg
//----------------------EVAL
trait Eval extends Base {
type exp <: Exp
trait Exp extends super.Exp {
def eval: Int
}
}
//----------------------EVALNUM
trait EvalNum extends BaseNum with Eval {
trait NumBehavior {
self: BaseNum =>
def eval: Int = value
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
}
//----------------------EVALPLUS
trait EvalPlus extends BasePlus with Eval {
trait PlusBehavior {
self: BasePlus =>
def eval = left.eval + right.eval;
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
}
//----------------------EVALNEG
trait EvalNeg extends BaseNeg with Eval {
trait NegBehavior {
self: BaseNeg =>
def eval = - term.eval;
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------EVALNUMPLUSNEG
trait EvalNumPlusNeg extends EvalNum with EvalPlus with EvalNeg
//----------------------SHOW
trait Show extends Base {
type exp <: Exp
trait Exp extends super.Exp {
def show: String
}
}
//----------------------SHOWNUM
trait ShowNum extends BaseNum with Show {
trait NumBehavior {
self: BaseNum =>
def show: String = value.toString
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
}
//----------------------SHOWPLUS
trait ShowPlus extends BasePlus with Show {
trait PlusBehavior {
self: BasePlus =>
def show = left.show + "+" + right.show;
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
}
//----------------------SHOWNEG
trait ShowNeg extends BaseNeg with Show {
trait NegBehavior {
self: BaseNeg =>
def show = "-(" + term.show + ")";
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------SHOWNUMPLUSNEG
trait ShowNumPlusNeg extends ShowNum with ShowPlus with ShowNeg
//----------------------TEST
object ShowNumPlusNegInstance extends ShowNumPlusNeg {
override type exp = Exp
}
object Test extends App {
import ShowNumPlusNegInstance._
println("ShowNumPlusNegInstance:")
println(new Num(3).show)
println(new Plus(new Num(3), new Num(4)).show)
println(new Neg(new Num(13)).show)
}
//----------------------EVAL+SHOW (Instantiation)
trait EvalShow extends EvalNumPlusNeg with ShowNumPlusNeg {
type exp <: Exp
trait Exp extends super[EvalNumPlusNeg].Exp with super[ShowNumPlusNeg].Exp
trait NumBehavior extends super[EvalNumPlusNeg].NumBehavior with super[ShowNumPlusNeg].NumBehavior {
self: BaseNum =>
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
trait PlusBehavior extends super[EvalNumPlusNeg].PlusBehavior with super[ShowNumPlusNeg].PlusBehavior {
self: BasePlus =>
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
trait NegBehavior extends super[EvalNumPlusNeg].NegBehavior with super[ShowNumPlusNeg].NegBehavior {
self: BaseNeg =>
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------TEST
object EvalShowInstance extends EvalShow {
override type exp = Exp
}
object Test2 extends App {
import EvalShowInstance._
println("EvalShowInstance:")
println(new Num(3).show)
println(new Plus(new Num(3), new Num(4)).show)
println(new Neg(new Num(13)).show)
println(new Num(3).eval)
println(new Plus(new Num(3), new Num(4)).eval)
println(new Neg(new Num(13)).eval)
}
//----------------------DBLE
trait Dble extends Base {
type exp <: Exp
trait Exp extends super.Exp {
def dble: exp
}
}
//----------------------DBLENUM
trait DbleNum extends BaseNum with Dble {
type exp <: Exp
trait Exp extends super[BaseNum].Exp with super[Dble].Exp
trait NumBehavior {
self: BaseNum =>
def dble = Num(value * 2)
}
def Num(v: Int): exp
class Num(v: Int) extends super.Num(v) with NumBehavior
}
//----------------------DBLEPLUS
trait DblePlus extends BasePlus with Dble {
type exp <: Exp
trait Exp extends super[BasePlus].Exp with super[Dble].Exp
trait PlusBehavior {
self: BasePlus =>
def dble = Plus(left.dble, right.dble)
}
def Plus(l: exp, r: exp): exp
class Plus(l: exp, r: exp) extends super.Plus(l, r) with PlusBehavior with Exp
}
//----------------------DBLENEG
trait DbleNeg extends BaseNeg with Dble {
type exp <: Exp
trait Exp extends super[BaseNeg].Exp with super[Dble].Exp
trait NegBehavior {
self: BaseNeg =>
def dble = Neg(term.dble)
}
def Neg(t: exp): exp
class Neg(t: exp) extends super.Neg(t) with NegBehavior with Exp
}
//----------------------DBLENUMPLUSNEG
trait DbleNumPlusNeg extends DbleNum with DblePlus with DbleNeg {
type exp <: Exp;
trait Exp extends super[DbleNum].Exp with super[DblePlus].Exp with super[DbleNeg].Exp
class Num(v: Int) extends super[DbleNum].Num(v) with Exp
class Plus(l: exp, r: exp) extends super[DblePlus].Plus(l, r) with Exp
class Neg(t: exp) extends super[DbleNeg].Neg(t) with Exp
}
object DbleNumPlusNegInstance extends DbleNumPlusNeg {
override type exp = Exp
def Num(v: Int) = new Num(v)
def Plus(l: exp, r: exp) = new Plus(l,r)
def Neg(t: exp) = new Neg(t)
}
trait EvalShowDble extends EvalShow with DbleNumPlusNeg {
type exp <: Exp;
trait Exp extends super[EvalShow].Exp with super[DbleNumPlusNeg].Exp
trait NumBehavior extends super[EvalShow].NumBehavior with super[DbleNumPlusNeg].NumBehavior {
self: BaseNum =>
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
trait PlusBehavior extends super[EvalShow].PlusBehavior with super[DbleNumPlusNeg].PlusBehavior {
self: BasePlus =>
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
trait NegBehavior extends super[EvalShow].NegBehavior with super[DbleNumPlusNeg].NegBehavior {
self: BaseNeg =>
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
object EvalShowDbleInstance extends EvalShowDble {
override type exp = Exp
def Num(v: Int) = new Num(v)
def Plus(l: exp, r: exp) = new Plus(l,r)
def Neg(t: exp) = new Neg(t)
}
//----------------------TEST
object Test3 extends App {
import EvalShowDbleInstance._
println("EvalShowDbleInstance:")
println(new Num(3).dble.show)
println(new Neg(new Num(13)).dble.show)
println(new Plus(new Num(3), new Num(4)).dble.show)
}
//----------------------EQUALS
trait Equals extends Base {
type exp <: Exp;
trait Exp extends super.Exp {
def eql(other: exp): Boolean;
}
}
//----------------------EQUALSNUM
trait EqualsNum extends BaseNum with Equals {
type exp <: Exp;
trait Exp extends super.Exp {
def isNum(v: Int): Boolean = false;
}
trait NumBehavior extends Exp {
self: BaseNum =>
def eql(other: exp): Boolean = other.isNum(value);
override def isNum(v: Int) = v == value;
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
}
//----------------------EQUALSPLUS
trait EqualsPlus extends BasePlus with Equals {
type exp <: Exp;
trait Exp extends super[BasePlus].Exp with super[Equals].Exp {
def isPlus(l: exp, r: exp): Boolean = false;
}
trait PlusBehavior extends Exp {
self: BasePlus =>
def eql(other: exp): Boolean = other.isPlus(left, right);
override def isPlus(l: exp, r: exp) = (left eql l) && (right eql r)
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
}
//----------------------EQUALSNEG
trait EqualsNeg extends BaseNeg with Equals {
type exp <: Exp;
trait Exp extends super[BaseNeg].Exp with super[Equals].Exp {
def isNeg(t: exp): Boolean = false;
}
trait NegBehavior extends Exp {
self: BaseNeg =>
def eql(other: exp): Boolean = other.isNeg(term);
override def isNeg(t: exp) = term eql t
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------EQUALSNUMPLUSNEG
trait EqualsNumPlusNeg extends EqualsNum with EqualsPlus with EqualsNeg {
type exp <: Exp
trait Exp extends super[EqualsNum].Exp with super[EqualsPlus].Exp with super[EqualsNeg].Exp
}
//----------------------EQUALS+SHOW+EVAL (Instantiation)
trait EvalShowEquals extends EvalShow with EqualsNumPlusNeg {//parametric on the super traits but cut-pasted otherwise
type exp <: Exp
trait Exp extends super[EvalShow].Exp with super[EqualsNumPlusNeg].Exp
trait NumBehavior extends super[EvalShow].NumBehavior with super[EqualsNumPlusNeg].NumBehavior {
self: BaseNum =>
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
trait PlusBehavior extends super[EvalShow].PlusBehavior with super[EqualsNumPlusNeg].PlusBehavior {
self: BasePlus =>
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
trait NegBehavior extends super[EvalShow].NegBehavior with super[EqualsNumPlusNeg].NegBehavior {
self: BaseNeg =>
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------TEST
object EvalShowEqualsInstance extends EvalShowEquals {
override type exp = Exp
}
object Test4 extends App {
import EvalShowEqualsInstance._
println("EvalShowInstance:")
println(new Num(3).show)
println(new Plus(new Num(3), new Num(4)).show)
println(new Neg(new Num(13)).show)
println(new Num(3).eval)
println(new Plus(new Num(3), new Num(4)).eval)
println(new Neg(new Num(13)).eval)
println(new Num(3).eql(new Num(3)))
println(new Plus(new Num(3), new Num(4)).eql(new Num(3)))
println(new Neg(new Num(13)).eql(new Neg(new Num(3))))
}
object All extends
Eval with Show with Dble with Equals
with EvalNum with EvalPlus with EvalNeg
with ShowNum with ShowPlus with ShowNeg
with DbleNum with DblePlus with DbleNeg
with EqualsNum with EqualsPlus with EqualsNeg
{
override type exp = Exp
//type exp <: Exp
trait Exp extends
super[Eval].Exp with super[Show].Exp with super[Dble].Exp with super[Equals].Exp
with super[DbleNum].Exp with super[DblePlus].Exp with super[DbleNeg].Exp
with super[EqualsNum].Exp with super[EqualsPlus].Exp with super[EqualsNeg].Exp
trait NumBehavior extends
super[EvalNum].NumBehavior with super[ShowNum].NumBehavior
with super[DbleNum].NumBehavior with super[EqualsNum].NumBehavior {
self: BaseNum =>
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
trait PlusBehavior extends
super[EvalPlus].PlusBehavior with super[ShowPlus].PlusBehavior
with super[DblePlus].PlusBehavior with super[EqualsPlus].PlusBehavior {
self: BasePlus =>
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
trait NegBehavior extends
super[EvalNeg].NegBehavior with super[ShowNeg].NegBehavior
with super[DbleNeg].NegBehavior with super[EqualsNeg].NegBehavior {
self: BaseNeg =>
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
def Num(v: Int) = new Num(v)
def Plus(l: exp, r: exp) =new Plus(l,r)
def Neg(t: exp) = new Neg(t)
}
/*
//----------------------EQUALS+SHOW+EVAL+DBLE (Instantiation)
trait EvalShowEqualsDble extends EvalShowEquals with DbleNumPlusNeg {//parametric on the super traits but cut-pasted otherwise
type exp <: Exp
trait Exp extends super[EvalShowEquals].Exp with super[DbleNumPlusNeg].Exp
trait NumBehavior extends super[EvalShowEquals].NumBehavior with super[DbleNumPlusNeg].NumBehavior {
self: BaseNum =>
}
class Num(v: Int) extends BaseNum(v) with NumBehavior with Exp
trait PlusBehavior extends super[EvalShowEquals].PlusBehavior with super[DbleNumPlusNeg].PlusBehavior {
self: BasePlus =>
}
class Plus(l: exp, r: exp) extends BasePlus(l, r) with PlusBehavior with Exp
trait NegBehavior extends super[EvalShowEquals].NegBehavior with super[DbleNumPlusNeg].NegBehavior {
self: BaseNeg =>
}
class Neg(t: exp) extends BaseNeg(t) with NegBehavior with Exp
}
//----------------------TEST
object EvalShowEqualsDbleInstance extends EvalShowEqualsDble {
override type exp = Exp
}
*/
object Test5 extends App {
import All._
println("All:")
println(new Num(3).show)
println(new Plus(new Num(3), new Num(4)).show)
println(new Neg(new Num(13)).show)
println(new Num(3).eval)
println(new Plus(new Num(3), new Num(4)).eval)
println(new Neg(new Num(13)).eval)
println(new Num(3).eql(new Num(3)))
println(new Plus(new Num(3), new Num(4)).eql(new Num(3)))
println(new Neg(new Num(13)).eql(new Neg(new Num(3))))
println(new Num(3).dble.show)
println(new Plus(new Num(3), new Num(4)).dble.show)
println(new Neg(new Num(13)).dble.show)
}
|
ElvisResearchGroup/L42Docs
|
CaseStudyEcoop2018/src/scalaEP/mainFullyModularizedWithTests.scala
|
Scala
|
lgpl-3.0
| 13,169 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms
import com.google.inject.{Inject, Singleton}
import models.BulkReference
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.validation.{Constraint, Invalid, Valid, ValidationError}
import play.api.i18n.{Messages, MessagesImpl}
import play.api.mvc.MessagesControllerComponents
import uk.gov.hmrc.emailaddress.EmailAddress
@Singleton
class BulkReferenceForm @Inject()(mcc: MessagesControllerComponents) {
implicit lazy val messages: Messages = MessagesImpl(mcc.langs.availables.head, mcc.messagesApi)
val MAX_REFERENCE_LENGTH: Int = 99
val CHARS_ALLOWED = "^[\\\\s,a-zA-Z0-9_-]*$"
val emailConstraintRegex = "^((?:[a-zA-Z][a-zA-Z0-9_]*))(.)((?:[a-zA-Z][a-zA-Z0-9_]*))*$"
val WHITE_SPACES = ".*\\\\s.*"
val emailConstraint : Constraint[String] = Constraint("constraints.email") ({
text =>
if (text.trim.length == 0){
Invalid(Seq(ValidationError(messages("gmp.error.mandatory.an", messages("gmp.email")))))
}
else if (!EmailAddress.isValid(text.trim.toUpperCase())){
Invalid(Seq(ValidationError(messages("gmp.error.email.invalid"))))
}
else if(text.trim matches emailConstraintRegex){
Invalid(Seq(ValidationError(messages("gmp.error.email.invalid"))))
}
else {
Valid
}
})
val bulkReferenceForm = Form(
mapping(
"email" -> text.verifying(emailConstraint),
"reference" -> text
.verifying(messages("gmp.error.mandatory", messages("gmp.reference")), x => x.trim.length != 0)
.verifying(messages("gmp.error.csv.member_ref.length.invalid", messages("gmp.reference")), x => x.trim.length <= MAX_REFERENCE_LENGTH)
.verifying(messages("gmp.error.csv.member_ref.character.invalid", messages("gmp.reference")), x => x.trim.matches(CHARS_ALLOWED))
.verifying(messages("gmp.error.csv.member_ref.spaces.invalid", messages("gmp.reference")), x => !(x.trim matches WHITE_SPACES))
)(BulkReference.apply)(BulkReference.unapply)
)
}
|
hmrc/gmp-frontend
|
app/forms/BulkReferenceForm.scala
|
Scala
|
apache-2.0
| 2,613 |
import scala.reflect.ClassTag // new style: use ClassTag
import org.scalacheck._
import Prop._
import Gen._
import Arbitrary._
import util._
import Buildable._
import scala.collection.mutable.ArraySeq
object Test extends Properties("Array") {
/** At this moment the authentic scalacheck Array Builder/Arb bits are commented out.
*/
implicit def arbArray[T](implicit a: Arbitrary[T], m: ClassTag[T]): Arbitrary[Array[T]] =
Arbitrary(containerOf[List,T](arbitrary[T]) map (_.toArray))
val arrGen: Gen[Array[_]] = oneOf(
arbitrary[Array[Int]],
arbitrary[Array[Array[Int]]],
arbitrary[Array[List[String]]],
arbitrary[Array[String]],
arbitrary[Array[Boolean]],
arbitrary[Array[AnyVal]]
)
// inspired by #1857 and #2352
property("eq/ne") = forAll(arrGen, arrGen) { (c1, c2) =>
(c1 eq c2) || (c1 ne c2)
}
// inspired by #2299
def smallInt = choose(1, 10)
property("ofDim") = forAll(smallInt, smallInt, smallInt) { (i1, i2, i3) =>
val arr = Array.ofDim[String](i1, i2, i3)
val flattened = arr flatMap (x => x) flatMap (x => x)
flattened.length == i1 * i2 * i3
}
}
|
felixmulder/scala
|
test/files/scalacheck/array-new.scala
|
Scala
|
bsd-3-clause
| 1,128 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.json
import java.util.UUID
import org.openjdk.jmh.annotations.Benchmark
import org.openjdk.jmh.annotations.Scope
import org.openjdk.jmh.annotations.State
import org.openjdk.jmh.annotations.Threads
import org.openjdk.jmh.infra.Blackhole
/**
* Check performance of json deserialization for array vs list of strings.
*
* ```
* > jmh:run -prof jmh.extras.JFR -wi 10 -i 10 -f1 -t1 .*SeqDeser.*
* ...
* [info] Benchmark Mode Cnt Score Error Units
* [info] SeqDeser.jsonArray thrpt 10 14684.845 ± 972.393 ops/s
* [info] SeqDeser.jsonList thrpt 10 13132.211 ± 790.534 ops/s
* [info] SeqDeser.smileArray thrpt 10 24153.852 ± 934.886 ops/s
* [info] SeqDeser.smileList thrpt 10 18320.118 ± 520.709 ops/s
* ```
*/
@State(Scope.Thread)
class SeqDeser {
private val data = (0 until 1000).map(_ => UUID.randomUUID().toString).toList
private val json = Json.encode(data)
private val smile = Json.smileEncode(data)
@Threads(1)
@Benchmark
def jsonArray(bh: Blackhole): Unit = {
bh.consume(Json.decode[Array[String]](json))
}
@Threads(1)
@Benchmark
def jsonList(bh: Blackhole): Unit = {
bh.consume(Json.decode[List[String]](json))
}
@Threads(1)
@Benchmark
def smileArray(bh: Blackhole): Unit = {
bh.consume(Json.smileDecode[Array[String]](smile))
}
@Threads(1)
@Benchmark
def smileList(bh: Blackhole): Unit = {
bh.consume(Json.smileDecode[List[String]](smile))
}
}
|
Netflix/atlas
|
atlas-jmh/src/main/scala/com/netflix/atlas/json/SeqDeser.scala
|
Scala
|
apache-2.0
| 2,131 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async.ws
import io.gatling.core.action.Action
import io.gatling.core.session.Expression
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import io.gatling.http.action.async.CloseAction
class WsClose(
requestName: Expression[String],
wsName: String,
statsEngine: StatsEngine,
next: Action
) extends CloseAction(requestName, wsName, statsEngine, next) with WsAction with NameGen {
override val name = genName("wsClose")
}
|
thkluge/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/async/ws/WsClose.scala
|
Scala
|
apache-2.0
| 1,130 |
package pbdirect
trait Pos {
val _pos: Int
}
object Pos {
implicit def posOrdering[P <: Pos]: Ordering[P] = new Ordering[P] {
override def compare(x: P, y: P): Int = x._pos - y._pos
}
trait _0 extends Pos { override val _pos = 0 }
trait _1 extends Pos { override val _pos = 1 }
trait _2 extends Pos { override val _pos = 2 }
trait _3 extends Pos { override val _pos = 3 }
trait _4 extends Pos { override val _pos = 4 }
trait _5 extends Pos { override val _pos = 5 }
trait _6 extends Pos { override val _pos = 6 }
trait _7 extends Pos { override val _pos = 7 }
trait _8 extends Pos { override val _pos = 8 }
trait _9 extends Pos { override val _pos = 9 }
trait _10 extends Pos { override val _pos = 10 }
trait _11 extends Pos { override val _pos = 11 }
trait _12 extends Pos { override val _pos = 12 }
trait _13 extends Pos { override val _pos = 13 }
trait _14 extends Pos { override val _pos = 14 }
trait _15 extends Pos { override val _pos = 15 }
trait _16 extends Pos { override val _pos = 16 }
trait _17 extends Pos { override val _pos = 17 }
trait _18 extends Pos { override val _pos = 18 }
trait _19 extends Pos { override val _pos = 19 }
trait _20 extends Pos { override val _pos = 20 }
trait _21 extends Pos { override val _pos = 21 }
trait _22 extends Pos { override val _pos = 22 }
trait _23 extends Pos { override val _pos = 23 }
trait _24 extends Pos { override val _pos = 24 }
trait _25 extends Pos { override val _pos = 25 }
trait _26 extends Pos { override val _pos = 26 }
trait _27 extends Pos { override val _pos = 27 }
trait _28 extends Pos { override val _pos = 28 }
trait _29 extends Pos { override val _pos = 29 }
trait _30 extends Pos { override val _pos = 30 }
trait _31 extends Pos { override val _pos = 31 }
trait _32 extends Pos { override val _pos = 32 }
trait _33 extends Pos { override val _pos = 33 }
trait _34 extends Pos { override val _pos = 34 }
trait _35 extends Pos { override val _pos = 35 }
trait _36 extends Pos { override val _pos = 36 }
trait _37 extends Pos { override val _pos = 37 }
trait _38 extends Pos { override val _pos = 38 }
trait _39 extends Pos { override val _pos = 39 }
trait _40 extends Pos { override val _pos = 40 }
trait _41 extends Pos { override val _pos = 41 }
trait _42 extends Pos { override val _pos = 42 }
trait _43 extends Pos { override val _pos = 43 }
trait _44 extends Pos { override val _pos = 44 }
trait _45 extends Pos { override val _pos = 45 }
trait _46 extends Pos { override val _pos = 46 }
trait _47 extends Pos { override val _pos = 47 }
trait _48 extends Pos { override val _pos = 48 }
trait _49 extends Pos { override val _pos = 49 }
trait _50 extends Pos { override val _pos = 50 }
trait _51 extends Pos { override val _pos = 51 }
trait _52 extends Pos { override val _pos = 52 }
trait _53 extends Pos { override val _pos = 53 }
trait _54 extends Pos { override val _pos = 54 }
trait _55 extends Pos { override val _pos = 55 }
trait _56 extends Pos { override val _pos = 56 }
trait _57 extends Pos { override val _pos = 57 }
trait _58 extends Pos { override val _pos = 58 }
trait _59 extends Pos { override val _pos = 59 }
trait _60 extends Pos { override val _pos = 60 }
trait _61 extends Pos { override val _pos = 61 }
trait _62 extends Pos { override val _pos = 62 }
trait _63 extends Pos { override val _pos = 63 }
trait _64 extends Pos { override val _pos = 64 }
trait _65 extends Pos { override val _pos = 65 }
trait _66 extends Pos { override val _pos = 66 }
trait _67 extends Pos { override val _pos = 67 }
trait _68 extends Pos { override val _pos = 68 }
trait _69 extends Pos { override val _pos = 69 }
trait _70 extends Pos { override val _pos = 70 }
trait _71 extends Pos { override val _pos = 71 }
trait _72 extends Pos { override val _pos = 72 }
trait _73 extends Pos { override val _pos = 73 }
trait _74 extends Pos { override val _pos = 74 }
trait _75 extends Pos { override val _pos = 75 }
trait _76 extends Pos { override val _pos = 76 }
trait _77 extends Pos { override val _pos = 77 }
trait _78 extends Pos { override val _pos = 78 }
trait _79 extends Pos { override val _pos = 79 }
trait _80 extends Pos { override val _pos = 80 }
trait _81 extends Pos { override val _pos = 81 }
trait _82 extends Pos { override val _pos = 82 }
trait _83 extends Pos { override val _pos = 83 }
trait _84 extends Pos { override val _pos = 84 }
trait _85 extends Pos { override val _pos = 85 }
trait _86 extends Pos { override val _pos = 86 }
trait _87 extends Pos { override val _pos = 87 }
trait _88 extends Pos { override val _pos = 88 }
trait _89 extends Pos { override val _pos = 89 }
trait _90 extends Pos { override val _pos = 90 }
trait _91 extends Pos { override val _pos = 91 }
trait _92 extends Pos { override val _pos = 92 }
trait _93 extends Pos { override val _pos = 93 }
trait _94 extends Pos { override val _pos = 94 }
trait _95 extends Pos { override val _pos = 95 }
trait _96 extends Pos { override val _pos = 96 }
trait _97 extends Pos { override val _pos = 97 }
trait _98 extends Pos { override val _pos = 98 }
trait _99 extends Pos { override val _pos = 99 }
}
|
btlines/pbdirect
|
shared/src/main/scala/pbdirect/Pos.scala
|
Scala
|
mit
| 5,279 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.net.URI
import java.util.Date
import scala.language.existentials
import org.apache.hadoop.fs.Path
import org.apache.parquet.format.converter.ParquetMetadataConverter.NO_FILTER
import org.apache.parquet.hadoop.ParquetFileReader
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkException
import org.apache.spark.sql.{AnalysisException, QueryTest, Row, SaveMode}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{NoSuchPartitionException, TableAlreadyExistsException}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.execution.command.{DDLSuite, DDLUtils}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.HiveExternalCatalog
import org.apache.spark.sql.hive.HiveUtils.{CONVERT_METASTORE_ORC, CONVERT_METASTORE_PARQUET}
import org.apache.spark.sql.hive.orc.OrcFileOperator
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.internal.SQLConf.ORC_IMPLEMENTATION
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
// TODO(gatorsmile): combine HiveCatalogedDDLSuite and HiveDDLSuite
class HiveCatalogedDDLSuite extends DDLSuite with TestHiveSingleton with BeforeAndAfterEach {
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
protected override def generateTable(
catalog: SessionCatalog,
name: TableIdentifier,
isDataSource: Boolean,
partitionCols: Seq[String] = Seq("a", "b")): CatalogTable = {
val storage =
if (isDataSource) {
val serde = HiveSerDe.sourceToSerDe("parquet")
assert(serde.isDefined, "The default format is not Hive compatible")
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = serde.get.inputFormat,
outputFormat = serde.get.outputFormat,
serde = serde.get.serde,
compressed = false,
properties = Map.empty)
} else {
CatalogStorageFormat(
locationUri = Some(catalog.defaultTablePath(name)),
inputFormat = Some("org.apache.hadoop.mapred.SequenceFileInputFormat"),
outputFormat = Some("org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat"),
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
compressed = false,
properties = Map("serialization.format" -> "1"))
}
val metadata = new MetadataBuilder()
.putString("key", "value")
.build()
val schema = new StructType()
.add("col1", "int", nullable = true, metadata = metadata)
.add("col2", "string")
CatalogTable(
identifier = name,
tableType = CatalogTableType.EXTERNAL,
storage = storage,
schema = schema.copy(
fields = schema.fields ++ partitionCols.map(StructField(_, IntegerType))),
provider = if (isDataSource) Some("parquet") else Some("hive"),
partitionColumnNames = partitionCols,
createTime = 0L,
createVersion = org.apache.spark.SPARK_VERSION,
tracksPartitionsInCatalog = true)
}
protected override def normalizeCatalogTable(table: CatalogTable): CatalogTable = {
val nondeterministicProps = Set(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"COLUMN_STATS_ACCURATE",
// The following are hive specific schema parameters which we do not need to match exactly.
"numFiles",
"numRows",
"rawDataSize",
"totalSize",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
table.copy(
createTime = 0L,
lastAccessTime = 0L,
owner = "",
properties = table.properties.filterKeys(!nondeterministicProps.contains(_)),
// View texts are checked separately
viewText = None
)
}
test("alter table: set location") {
testSetLocation(isDatasourceTable = false)
}
test("alter table: set properties") {
testSetProperties(isDatasourceTable = false)
}
test("alter table: unset properties") {
testUnsetProperties(isDatasourceTable = false)
}
test("alter table: set serde") {
testSetSerde(isDatasourceTable = false)
}
test("alter table: set serde partition") {
testSetSerdePartition(isDatasourceTable = false)
}
test("alter table: change column") {
testChangeColumn(isDatasourceTable = false)
}
test("alter table: rename partition") {
testRenamePartitions(isDatasourceTable = false)
}
test("alter table: drop partition") {
testDropPartitions(isDatasourceTable = false)
}
test("alter table: add partition") {
testAddPartitions(isDatasourceTable = false)
}
test("drop table") {
testDropTable(isDatasourceTable = false)
}
test("alter datasource table add columns - orc") {
testAddColumn("orc")
}
test("alter datasource table add columns - partitioned - orc") {
testAddColumnPartitioned("orc")
}
test("SPARK-22431: illegal nested type") {
val queries = Seq(
"CREATE TABLE t AS SELECT STRUCT('a' AS `$a`, 1 AS b) q",
"CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT)",
"CREATE VIEW t AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
queries.foreach(query => {
val err = intercept[SparkException] {
spark.sql(query)
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
})
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(sql("SELECT q.`a`, q.b FROM v"), Row("a", 1) :: Nil)
val err = intercept[SparkException] {
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `$a`, 1 AS b) q")
}.getMessage
assert(err.contains("Cannot recognize hive type string"))
}
}
test("SPARK-22431: table with nested type") {
withTable("t", "x") {
spark.sql("CREATE TABLE t(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
checkAnswer(spark.table("t"), Nil)
spark.sql("CREATE TABLE x (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
checkAnswer(spark.table("x"), Nil)
}
}
test("SPARK-22431: view with nested type") {
withView("v") {
spark.sql("CREATE VIEW v AS SELECT STRUCT('a' AS `a`, 1 AS b) q")
checkAnswer(spark.table("v"), Row(Row("a", 1)) :: Nil)
spark.sql("ALTER VIEW v AS SELECT STRUCT('a' AS `b`, 1 AS b) q1")
val df = spark.table("v")
assert("q1".equals(df.schema.fields(0).name))
checkAnswer(df, Row(Row("a", 1)) :: Nil)
}
}
test("SPARK-22431: alter table tests with nested types") {
withTable("t1", "t2", "t3") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`col1`:STRING, col2:Int>)")
val newcol = spark.sql("SELECT * FROM t1").schema.fields(2).name
assert("newcol1".equals(newcol))
spark.sql("CREATE TABLE t2(q STRUCT<`a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t2 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df2 = spark.table("t2")
checkAnswer(df2, Nil)
assert("newcol1".equals(df2.schema.fields(2).name))
assert("newcol2".equals(df2.schema.fields(3).name))
spark.sql("CREATE TABLE t3(q STRUCT<`$a`:INT, col2:STRING>, i1 INT) USING PARQUET")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
spark.sql("ALTER TABLE t3 ADD COLUMNS (newcol2 STRUCT<`col1`:STRING, col2:Int>)")
val df3 = spark.table("t3")
checkAnswer(df3, Nil)
assert("newcol1".equals(df3.schema.fields(2).name))
assert("newcol2".equals(df3.schema.fields(3).name))
}
}
test("SPARK-22431: negative alter table tests with nested types") {
withTable("t1") {
spark.sql("CREATE TABLE t1 (q STRUCT<col1:INT, col2:STRING>, i1 INT)")
val err = intercept[SparkException] {
spark.sql("ALTER TABLE t1 ADD COLUMNS (newcol1 STRUCT<`$col1`:STRING, col2:Int>)")
}.getMessage
assert(err.contains("Cannot recognize hive type string:"))
}
}
}
class HiveDDLSuite
extends QueryTest with SQLTestUtils with TestHiveSingleton with BeforeAndAfterEach {
import testImplicits._
val hiveFormats = Seq("PARQUET", "ORC", "TEXTFILE", "SEQUENCEFILE", "RCFILE", "AVRO")
override def afterEach(): Unit = {
try {
// drop all databases, tables and functions after each test
spark.sessionState.catalog.reset()
} finally {
super.afterEach()
}
}
// check if the directory for recording the data of the table exists.
private def tableDirectoryExists(
tableIdentifier: TableIdentifier,
dbPath: Option[String] = None): Boolean = {
val expectedTablePath =
if (dbPath.isEmpty) {
hiveContext.sessionState.catalog.defaultTablePath(tableIdentifier)
} else {
new Path(new Path(dbPath.get), tableIdentifier.table).toUri
}
val filesystemPath = new Path(expectedTablePath.toString)
val fs = filesystemPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.exists(filesystemPath)
}
test("drop tables") {
withTable("tab1") {
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"CREATE TABLE $tabName(c1 int)")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP TABLE IF EXISTS $tabName")
sql(s"DROP VIEW IF EXISTS $tabName")
}
}
test("create a hive table without schema") {
import testImplicits._
withTempPath { tempDir =>
withTable("tab1", "tab2") {
(("a", "b") :: Nil).toDF().write.json(tempDir.getCanonicalPath)
var e = intercept[AnalysisException] { sql("CREATE TABLE tab1 USING hive") }.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab1`"))
e = intercept[AnalysisException] {
sql(s"CREATE TABLE tab2 location '${tempDir.getCanonicalPath}'")
}.getMessage
assert(e.contains("Unable to infer the schema. The schema specification is required to " +
"create the table `default`.`tab2`"))
}
}
}
test("drop external tables in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|create table $tabName
|stored as parquet
|location '${tmpDir.toURI}'
|as select 1, '3'
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("drop external data source table in default database") {
withTempDir { tmpDir =>
val tabName = "tab1"
withTable(tabName) {
assert(tmpDir.listFiles.isEmpty)
withSQLConf(SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key -> "true") {
Seq(1 -> "a").toDF("i", "j")
.write
.mode(SaveMode.Overwrite)
.format("parquet")
.option("path", tmpDir.toString)
.saveAsTable(tabName)
}
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
// This data source table is external table
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
// The data are not deleted since the table type is EXTERNAL
assert(tmpDir.listFiles.nonEmpty)
}
}
}
test("create table and view with comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(c1 int) COMMENT 'BLABLA'")
val viewName = "view1"
withView(viewName) {
sql(s"CREATE VIEW $viewName COMMENT 'no comment' AS SELECT * FROM $tabName")
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option("BLABLA"))
assert(viewMetadata.comment == Option("no comment"))
// Ensure that `comment` is removed from the table property
assert(tableMetadata.properties.get("comment").isEmpty)
assert(viewMetadata.properties.get("comment").isEmpty)
}
}
}
test("create Hive-serde table and view with unicode columns and comment") {
val catalog = spark.sessionState.catalog
val tabName = "tab1"
val viewName = "view1"
// scalastyle:off
// non ascii characters are not allowed in the source code, so we disable the scalastyle.
val colName1 = "和"
val colName2 = "尼"
val comment = "庙"
// scalastyle:on
withTable(tabName) {
sql(s"""
|CREATE TABLE $tabName(`$colName1` int COMMENT '$comment')
|COMMENT '$comment'
|PARTITIONED BY (`$colName2` int)
""".stripMargin)
sql(s"INSERT OVERWRITE TABLE $tabName partition (`$colName2`=2) SELECT 1")
withView(viewName) {
sql(
s"""
|CREATE VIEW $viewName(`$colName1` COMMENT '$comment', `$colName2`)
|COMMENT '$comment'
|AS SELECT `$colName1`, `$colName2` FROM $tabName
""".stripMargin)
val tableMetadata = catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
val viewMetadata = catalog.getTableMetadata(TableIdentifier(viewName, Some("default")))
assert(tableMetadata.comment == Option(comment))
assert(viewMetadata.comment == Option(comment))
assert(tableMetadata.schema.fields.length == 2 && viewMetadata.schema.fields.length == 2)
val column1InTable = tableMetadata.schema.fields.head
val column1InView = viewMetadata.schema.fields.head
assert(column1InTable.name == colName1 && column1InView.name == colName1)
assert(column1InTable.getComment() == Option(comment))
assert(column1InView.getComment() == Option(comment))
assert(tableMetadata.schema.fields(1).name == colName2 &&
viewMetadata.schema.fields(1).name == colName2)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $tabName"), Row(1, 2) :: Nil)
checkAnswer(sql(s"SELECT `$colName1`, `$colName2` FROM $viewName"), Row(1, 2) :: Nil)
}
}
}
test("create table: partition column names exist in table definition") {
val e = intercept[AnalysisException] {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (a string)")
}
assert(e.message == "Found duplicate column(s) in the table definition of `default`.`tbl`: `a`")
}
test("add/drop partition with location - managed table") {
val tab = "tab_with_partitions"
withTempDir { tmpDir =>
val basePath = new File(tmpDir.getCanonicalPath)
val part1Path = new File(basePath + "/part1")
val part2Path = new File(basePath + "/part2")
val dirSet = part1Path :: part2Path :: Nil
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
withTable(tab) {
sql(
s"""
|CREATE TABLE $tab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
""".stripMargin)
sql(
s"""
|ALTER TABLE $tab ADD
|PARTITION (ds='2008-04-08', hr=11) LOCATION '${part1Path.toURI}'
|PARTITION (ds='2008-04-08', hr=12) LOCATION '${part2Path.toURI}'
""".stripMargin)
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=11) SELECT 1, 'a'")
sql(s"INSERT OVERWRITE TABLE $tab partition (ds='2008-04-08', hr=12) SELECT 2, 'b'")
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
checkAnswer(
spark.table(tab),
Row(1, "a", "2008-04-08", "11") :: Row(2, "b", "2008-04-08", "12") :: Nil
)
sql(s"ALTER TABLE $tab DROP PARTITION (ds='2008-04-08', hr=11)")
// drop partition will delete the data
assert(part1Path.listFiles == null || part1Path.listFiles.isEmpty)
assert(part2Path.listFiles.nonEmpty)
sql(s"DROP TABLE $tab")
// drop table will delete the data of the managed table
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
}
}
}
test("SPARK-19129: drop partition with a empty string will drop the whole table") {
val df = spark.createDataFrame(Seq((0, "a"), (1, "b"))).toDF("partCol1", "name")
df.write.mode("overwrite").partitionBy("partCol1").saveAsTable("partitionedTable")
val e = intercept[AnalysisException] {
spark.sql("alter table partitionedTable drop partition(partCol1='')")
}.getMessage
assert(e.contains("Partition spec is invalid. The spec ([partCol1=]) contains an empty " +
"partition column value"))
}
test("add/drop partitions - external table") {
val catalog = spark.sessionState.catalog
withTempDir { tmpDir =>
val basePath = tmpDir.getCanonicalPath
val partitionPath_1stCol_part1 = new File(basePath + "/ds=2008-04-08")
val partitionPath_1stCol_part2 = new File(basePath + "/ds=2008-04-09")
val partitionPath_part1 = new File(basePath + "/ds=2008-04-08/hr=11")
val partitionPath_part2 = new File(basePath + "/ds=2008-04-09/hr=11")
val partitionPath_part3 = new File(basePath + "/ds=2008-04-08/hr=12")
val partitionPath_part4 = new File(basePath + "/ds=2008-04-09/hr=12")
val dirSet =
tmpDir :: partitionPath_1stCol_part1 :: partitionPath_1stCol_part2 ::
partitionPath_part1 :: partitionPath_part2 :: partitionPath_part3 ::
partitionPath_part4 :: Nil
val externalTab = "extTable_with_partitions"
withTable(externalTab) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $externalTab (key INT, value STRING)
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '${tmpDir.toURI}'
""".stripMargin)
// Before data insertion, all the directory are empty
assert(dirSet.forall(dir => dir.listFiles == null || dir.listFiles.isEmpty))
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $externalTab
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val hiveTable = catalog.getTableMetadata(TableIdentifier(externalTab, Some("default")))
assert(hiveTable.tableType == CatalogTableType.EXTERNAL)
// After data insertion, all the directory are not empty
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-09', unknownCol='12')")
}
assert(message.getMessage.contains("unknownCol is not a valid partition column in table " +
"`default`.`exttable_with_partitions`"))
sql(
s"""
|ALTER TABLE $externalTab DROP PARTITION (ds='2008-04-08'),
|PARTITION (hr='12')
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-09", "hr" -> "11")))
// drop partition will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(
s"""
|ALTER TABLE $externalTab ADD PARTITION (ds='2008-04-08', hr='12')
|PARTITION (ds='2008-04-08', hr=11)
""".stripMargin)
assert(catalog.listPartitions(TableIdentifier(externalTab)).map(_.spec).toSet ==
Set(Map("ds" -> "2008-04-08", "hr" -> "11"),
Map("ds" -> "2008-04-08", "hr" -> "12"),
Map("ds" -> "2008-04-09", "hr" -> "11")))
// add partition will not delete the data
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
sql(s"DROP TABLE $externalTab")
// drop table will not delete the data of external table
assert(dirSet.forall(dir => dir.listFiles.nonEmpty))
}
}
}
test("drop views") {
withTable("tab1") {
val tabName = "tab1"
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val viewName = "view1"
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"CREATE VIEW $viewName AS SELECT * FROM tab1")
assert(tableDirectoryExists(TableIdentifier(tabName)))
assert(!tableDirectoryExists(TableIdentifier(viewName)))
sql(s"DROP VIEW $viewName")
assert(tableDirectoryExists(TableIdentifier(tabName)))
sql(s"DROP VIEW IF EXISTS $viewName")
}
}
}
test("alter views - rename") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
sql(s"ALTER VIEW $oldViewName RENAME TO $newViewName")
assert(!catalog.tableExists(TableIdentifier(oldViewName)))
assert(catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("alter views - set/unset tblproperties") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val viewName = "view1"
withView(viewName) {
def checkProperties(expected: Map[String, String]): Boolean = {
val properties = spark.sessionState.catalog.getTableMetadata(TableIdentifier(viewName))
.properties
properties.filterNot { case (key, value) =>
Seq("transient_lastDdlTime", CatalogTable.VIEW_DEFAULT_DATABASE).contains(key) ||
key.startsWith(CatalogTable.VIEW_QUERY_OUTPUT_PREFIX)
} == expected
}
sql(s"CREATE VIEW $viewName AS SELECT * FROM $tabName")
checkProperties(Map())
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// no exception or message will be issued if we set it again
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'an')")
checkProperties(Map("p" -> "an"))
// the value will be updated if we set the same key to a different value
sql(s"ALTER VIEW $viewName SET TBLPROPERTIES ('p' = 'b')")
checkProperties(Map("p" -> "b"))
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
checkProperties(Map())
val message = intercept[AnalysisException] {
sql(s"ALTER VIEW $viewName UNSET TBLPROPERTIES ('p')")
}.getMessage
assert(message.contains(
"Attempted to unset non-existent property 'p' in table '`default`.`view1`'"))
}
}
}
private def assertErrorForAlterTableOnView(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a view with ALTER TABLE. Please use ALTER VIEW instead"))
}
private def assertErrorForAlterViewOnTable(sqlText: String): Unit = {
val message = intercept[AnalysisException](sql(sqlText)).getMessage
assert(message.contains("Cannot alter a table with ALTER VIEW. Please use ALTER TABLE instead"))
}
test("create table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val message = intercept[AnalysisException] {
sql(s"CREATE TABLE $tabName (height INT, length INT) TBLPROPERTIES('EXTERNAL'='TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
}
}
test("alter table - SET TBLPROPERTIES EXTERNAL to TRUE") {
val tabName = "tab1"
withTable(tabName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE TABLE $tabName (height INT, length INT)")
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
val message = intercept[AnalysisException] {
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('EXTERNAL' = 'TRUE')")
}.getMessage
assert(message.contains("Cannot set or change the preserved property key: 'EXTERNAL'"))
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
// The table property is case sensitive. Thus, external is allowed
sql(s"ALTER TABLE $tabName SET TBLPROPERTIES ('external' = 'TRUE')")
// The table type is not changed to external
assert(
catalog.getTableMetadata(TableIdentifier(tabName)).tableType == CatalogTableType.MANAGED)
}
}
test("alter views and alter table - misuse") {
val tabName = "tab1"
withTable(tabName) {
spark.range(10).write.saveAsTable(tabName)
val oldViewName = "view1"
val newViewName = "view2"
withView(oldViewName, newViewName) {
val catalog = spark.sessionState.catalog
sql(s"CREATE VIEW $oldViewName AS SELECT * FROM $tabName")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName RENAME TO $newViewName")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RENAME TO $newViewName")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET TBLPROPERTIES ('p' = 'an')")
assertErrorForAlterViewOnTable(s"ALTER VIEW $tabName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName UNSET TBLPROPERTIES ('p')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET LOCATION '/path/to/home'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDE 'whatever'")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a=1, b=2) SET SERDEPROPERTIES ('x' = 'y')")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName ADD IF NOT EXISTS PARTITION (a='4', b='8')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName DROP IF EXISTS PARTITION (a='2')")
assertErrorForAlterTableOnView(s"ALTER TABLE $oldViewName RECOVER PARTITIONS")
assertErrorForAlterTableOnView(
s"ALTER TABLE $oldViewName PARTITION (a='1') RENAME TO PARTITION (a='100')")
assert(catalog.tableExists(TableIdentifier(tabName)))
assert(catalog.tableExists(TableIdentifier(oldViewName)))
assert(!catalog.tableExists(TableIdentifier(newViewName)))
}
}
}
test("Insert overwrite Hive table should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2(ID long) USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
""".stripMargin)
spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("Create Hive table as select should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl", "tbl2") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
sql(
s"""
|CREATE TABLE tbl2 USING hive
|OPTIONS(fileFormat 'parquet')
|LOCATION '${path.toURI}'
|AS SELECT ID FROM view1
""".stripMargin)
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), Seq(Row(4)))
}
}
}
}
}
test("SPARK-25313 Insert overwrite directory should output correct schema") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "false") {
withTable("tbl") {
withView("view1") {
spark.sql("CREATE TABLE tbl(id long)")
spark.sql("INSERT OVERWRITE TABLE tbl VALUES 4")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
withTempPath { path =>
spark.sql(s"INSERT OVERWRITE LOCAL DIRECTORY '${path.getCanonicalPath}' " +
"STORED AS PARQUET SELECT ID FROM view1")
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(path.toString).schema == expectedSchema)
checkAnswer(spark.read.parquet(path.toString), Seq(Row(4)))
}
}
}
}
}
test("alter table partition - storage information") {
sql("CREATE TABLE boxes (height INT, length INT) PARTITIONED BY (width INT)")
sql("INSERT OVERWRITE TABLE boxes PARTITION (width=4) SELECT 4, 4")
val catalog = spark.sessionState.catalog
val expectedSerde = "com.sparkbricks.serde.ColumnarSerDe"
val expectedSerdeProps = Map("compress" -> "true")
val expectedSerdePropsString =
expectedSerdeProps.map { case (k, v) => s"'$k'='$v'" }.mkString(", ")
val oldPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assume(oldPart.storage.serde != Some(expectedSerde), "bad test: serde was already set")
assume(oldPart.storage.properties.filterKeys(expectedSerdeProps.contains) !=
expectedSerdeProps, "bad test: serde properties were already set")
sql(s"""ALTER TABLE boxes PARTITION (width=4)
| SET SERDE '$expectedSerde'
| WITH SERDEPROPERTIES ($expectedSerdePropsString)
|""".stripMargin)
val newPart = catalog.getPartition(TableIdentifier("boxes"), Map("width" -> "4"))
assert(newPart.storage.serde == Some(expectedSerde))
assert(newPart.storage.properties.filterKeys(expectedSerdeProps.contains) ==
expectedSerdeProps)
}
test("MSCK REPAIR RABLE") {
val catalog = spark.sessionState.catalog
val tableIdent = TableIdentifier("tab1")
sql("CREATE TABLE tab1 (height INT, length INT) PARTITIONED BY (a INT, b INT)")
val part1 = Map("a" -> "1", "b" -> "5")
val part2 = Map("a" -> "2", "b" -> "6")
val root = new Path(catalog.getTableMetadata(tableIdent).location)
val fs = root.getFileSystem(spark.sessionState.newHadoopConf())
// valid
fs.mkdirs(new Path(new Path(root, "a=1"), "b=5"))
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "a.csv")) // file
fs.createNewFile(new Path(new Path(root, "a=1/b=5"), "_SUCCESS")) // file
fs.mkdirs(new Path(new Path(root, "A=2"), "B=6"))
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "b.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), "c.csv")) // file
fs.createNewFile(new Path(new Path(root, "A=2/B=6"), ".hiddenFile")) // file
fs.mkdirs(new Path(new Path(root, "A=2/B=6"), "_temporary"))
// invalid
fs.mkdirs(new Path(new Path(root, "a"), "b")) // bad name
fs.mkdirs(new Path(new Path(root, "b=1"), "a=1")) // wrong order
fs.mkdirs(new Path(root, "a=4")) // not enough columns
fs.createNewFile(new Path(new Path(root, "a=1"), "b=4")) // file
fs.createNewFile(new Path(new Path(root, "a=1"), "_SUCCESS")) // _SUCCESS
fs.mkdirs(new Path(new Path(root, "a=1"), "_temporary")) // _temporary
fs.mkdirs(new Path(new Path(root, "a=1"), ".b=4")) // start with .
try {
sql("MSCK REPAIR TABLE tab1")
assert(catalog.listPartitions(tableIdent).map(_.spec).toSet ==
Set(part1, part2))
assert(catalog.getPartition(tableIdent, part1).parameters("numFiles") == "1")
assert(catalog.getPartition(tableIdent, part2).parameters("numFiles") == "2")
} finally {
fs.delete(root, true)
}
}
test("drop table using drop view") {
withTable("tab1") {
sql("CREATE TABLE tab1(c1 int)")
val message = intercept[AnalysisException] {
sql("DROP VIEW tab1")
}.getMessage
assert(message.contains("Cannot drop a table with DROP VIEW. Please use DROP TABLE instead"))
}
}
test("drop view using drop table") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
sql("CREATE VIEW view1 AS SELECT * FROM tab1")
val message = intercept[AnalysisException] {
sql("DROP TABLE view1")
}.getMessage
assert(message.contains("Cannot drop a view with DROP TABLE. Please use DROP VIEW instead"))
}
}
}
test("create view with mismatched schema") {
withTable("tab1") {
spark.range(10).write.saveAsTable("tab1")
withView("view1") {
val e = intercept[AnalysisException] {
sql("CREATE VIEW view1 (col1, col3) AS SELECT * FROM tab1")
}.getMessage
assert(e.contains("the SELECT clause (num: `1`) does not match")
&& e.contains("CREATE VIEW (num: `2`)"))
}
}
}
test("create view with specified schema") {
withView("view1") {
sql("CREATE VIEW view1 (col1, col2) AS SELECT 1, 2")
checkAnswer(
sql("SELECT * FROM view1"),
Row(1, 2) :: Nil
)
}
}
test("desc table for Hive table - partitioned table") {
withTable("tbl") {
sql("CREATE TABLE tbl(a int) PARTITIONED BY (b int)")
assert(sql("DESC tbl").collect().containsSlice(
Seq(
Row("a", "int", null),
Row("b", "int", null),
Row("# Partition Information", "", ""),
Row("# col_name", "data_type", "comment"),
Row("b", "int", null)
)
))
}
}
test("desc table for Hive table - bucketed + sorted table") {
withTable("tbl") {
sql(
s"""
|CREATE TABLE tbl (id int, name string)
|CLUSTERED BY(id)
|SORTED BY(id, name) INTO 1024 BUCKETS
|PARTITIONED BY (ds string)
""".stripMargin)
val x = sql("DESC FORMATTED tbl").collect()
assert(x.containsSlice(
Seq(
Row("Num Buckets", "1024", ""),
Row("Bucket Columns", "[`id`]", ""),
Row("Sort Columns", "[`id`, `name`]", "")
)
))
}
}
test("desc table for data source table using Hive Metastore") {
assume(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive")
val tabName = "tab1"
withTable(tabName) {
sql(s"CREATE TABLE $tabName(a int comment 'test') USING parquet ")
checkAnswer(
sql(s"DESC $tabName").select("col_name", "data_type", "comment"),
Row("a", "int", "test") :: Nil
)
}
}
private def createDatabaseWithLocation(tmpDir: File, dirExists: Boolean): Unit = {
val catalog = spark.sessionState.catalog
val dbName = "db1"
val tabName = "tab1"
val fs = new Path(tmpDir.toString).getFileSystem(spark.sessionState.newHadoopConf())
withTable(tabName) {
if (dirExists) {
assert(tmpDir.listFiles.isEmpty)
} else {
assert(!fs.exists(new Path(tmpDir.toString)))
}
sql(s"CREATE DATABASE $dbName Location '${tmpDir.toURI.getPath.stripSuffix("/")}'")
val db1 = catalog.getDatabaseMetadata(dbName)
val dbPath = new URI(tmpDir.toURI.toString.stripSuffix("/"))
assert(db1 == CatalogDatabase(dbName, "", dbPath, Map.empty))
sql("USE db1")
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(tmpDir.toString)))
assert(tmpDir.listFiles.nonEmpty)
sql(s"DROP TABLE $tabName")
assert(tmpDir.listFiles.isEmpty)
sql("USE default")
sql(s"DROP DATABASE $dbName")
assert(!fs.exists(new Path(tmpDir.toString)))
}
}
test("create/drop database - location without pre-created directory") {
withTempPath { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = false)
}
}
test("create/drop database - location with pre-created directory") {
withTempDir { tmpDir =>
createDatabaseWithLocation(tmpDir, dirExists = true)
}
}
private def dropDatabase(cascade: Boolean, tableExists: Boolean): Unit = {
val dbName = "db1"
val dbPath = new Path(spark.sessionState.conf.warehousePath)
val fs = dbPath.getFileSystem(spark.sessionState.newHadoopConf())
sql(s"CREATE DATABASE $dbName")
val catalog = spark.sessionState.catalog
val expectedDBLocation = s"file:${dbPath.toUri.getPath.stripSuffix("/")}/$dbName.db"
val expectedDBUri = CatalogUtils.stringToURI(expectedDBLocation)
val db1 = catalog.getDatabaseMetadata(dbName)
assert(db1 == CatalogDatabase(
dbName,
"",
expectedDBUri,
Map.empty))
// the database directory was created
assert(fs.exists(dbPath) && fs.isDirectory(dbPath))
sql(s"USE $dbName")
val tabName = "tab1"
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
sql(s"CREATE TABLE $tabName as SELECT 1")
assert(tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
if (!tableExists) {
sql(s"DROP TABLE $tabName")
assert(!tableDirectoryExists(TableIdentifier(tabName), Option(expectedDBLocation)))
}
sql(s"USE default")
val sqlDropDatabase = s"DROP DATABASE $dbName ${if (cascade) "CASCADE" else "RESTRICT"}"
if (tableExists && !cascade) {
val message = intercept[AnalysisException] {
sql(sqlDropDatabase)
}.getMessage
assert(message.contains(s"Database $dbName is not empty. One or more tables exist."))
// the database directory was not removed
assert(fs.exists(new Path(expectedDBLocation)))
} else {
sql(sqlDropDatabase)
// the database directory was removed and the inclusive table directories are also removed
assert(!fs.exists(new Path(expectedDBLocation)))
}
}
test("drop database containing tables - CASCADE") {
dropDatabase(cascade = true, tableExists = true)
}
test("drop an empty database - CASCADE") {
dropDatabase(cascade = true, tableExists = false)
}
test("drop database containing tables - RESTRICT") {
dropDatabase(cascade = false, tableExists = true)
}
test("drop an empty database - RESTRICT") {
dropDatabase(cascade = false, tableExists = false)
}
test("drop default database") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
var message = intercept[AnalysisException] {
sql("DROP DATABASE default")
}.getMessage
assert(message.contains("Can not drop default database"))
// SQLConf.CASE_SENSITIVE does not affect the result
// because the Hive metastore is not case sensitive.
message = intercept[AnalysisException] {
sql("DROP DATABASE DeFault")
}.getMessage
assert(message.contains("Can not drop default database"))
}
}
}
test("Create Cataloged Table As Select - Drop Table After Runtime Exception") {
withTable("tab") {
intercept[SparkException] {
sql(
"""
|CREATE TABLE tab
|STORED AS TEXTFILE
|SELECT 1 AS a, (SELECT a FROM (SELECT 1 AS a UNION ALL SELECT 2 AS a) t) AS b
""".stripMargin)
}
// After hitting runtime exception, we should drop the created table.
assert(!spark.sessionState.catalog.tableExists(TableIdentifier("tab")))
}
}
test("CREATE TABLE LIKE a temporary view") {
// CREATE TABLE LIKE a temporary view.
withCreateTableLikeTempView(location = None)
// CREATE TABLE LIKE a temporary view location ...
withTempDir { tmpDir =>
withCreateTableLikeTempView(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeTempView(location : Option[String]): Unit = {
val sourceViewName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempView(sourceViewName) {
withTable(targetTabName) {
spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd)
.createTempView(sourceViewName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $locationClause")
val sourceTable = spark.sessionState.catalog.getTempViewOrPermanentTableMetadata(
TableIdentifier(sourceViewName))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType)
}
}
}
test("CREATE TABLE LIKE a data source table") {
// CREATE TABLE LIKE a data source table.
withCreateTableLikeDSTable(location = None)
// CREATE TABLE LIKE a data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeDSTable(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeDSTable(location : Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd)
.write.format("json").saveAsTable(sourceTabName)
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $locationClause")
val sourceTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable =
spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be a Hive-managed data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
checkCreateTableLike(sourceTable, targetTable, tableType)
}
}
test("CREATE TABLE LIKE an external data source table") {
// CREATE TABLE LIKE an external data source table.
withCreateTableLikeExtDSTable(location = None)
// CREATE TABLE LIKE an external data source table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtDSTable(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeExtDSTable(location : Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd)
.write.format("parquet").save(path)
sql(s"CREATE TABLE $sourceTabName USING parquet OPTIONS (PATH '${dir.toURI}')")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $locationClause")
// The source table should be an external data source table
val sourceTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
// The table type of the source table should be an external data source table
assert(DDLUtils.isDatasourceTable(sourceTable))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
checkCreateTableLike(sourceTable, targetTable, tableType)
}
}
}
test("CREATE TABLE LIKE a managed Hive serde table") {
// CREATE TABLE LIKE a managed Hive serde table.
withCreateTableLikeManagedHiveTable(location = None)
// CREATE TABLE LIKE a managed Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeManagedHiveTable(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeManagedHiveTable(location : Option[String]): Unit = {
val sourceTabName = "tab1"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
val catalog = spark.sessionState.catalog
withTable(sourceTabName, targetTabName) {
sql(s"CREATE TABLE $sourceTabName TBLPROPERTIES('prop1'='value1') AS SELECT 1 key, 'a'")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.MANAGED)
assert(sourceTable.properties.get("prop1").nonEmpty)
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType)
}
}
test("CREATE TABLE LIKE an external Hive serde table") {
// CREATE TABLE LIKE an external Hive serde table.
withCreateTableLikeExtHiveTable(location = None)
// CREATE TABLE LIKE an external Hive serde table location ...
withTempDir { tmpDir =>
withCreateTableLikeExtHiveTable(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeExtHiveTable(location : Option[String]): Unit = {
val catalog = spark.sessionState.catalog
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTempDir { tmpDir =>
val basePath = tmpDir.toURI
val sourceTabName = "tab1"
val targetTabName = "tab2"
withTable(sourceTabName, targetTabName) {
assert(tmpDir.listFiles.isEmpty)
sql(
s"""
|CREATE EXTERNAL TABLE $sourceTabName (key INT comment 'test', value STRING)
|COMMENT 'Apache Spark'
|PARTITIONED BY (ds STRING, hr STRING)
|LOCATION '$basePath'
""".stripMargin)
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
sql(
s"""
|INSERT OVERWRITE TABLE $sourceTabName
|partition (ds='$ds',hr='$hr')
|SELECT 1, 'a'
""".stripMargin)
}
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceTabName $locationClause")
val sourceTable = catalog.getTableMetadata(
TableIdentifier(sourceTabName, Some("default")))
assert(sourceTable.tableType == CatalogTableType.EXTERNAL)
assert(sourceTable.comment == Option("Apache Spark"))
val targetTable = catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceTable, targetTable, tableType)
}
}
}
test("CREATE TABLE LIKE a view") {
// CREATE TABLE LIKE a view.
withCreateTableLikeView(location = None)
// CREATE TABLE LIKE a view location ...
withTempDir { tmpDir =>
withCreateTableLikeView(Some(tmpDir.toURI.toString))
}
}
private def withCreateTableLikeView(location : Option[String]): Unit = {
val sourceTabName = "tab1"
val sourceViewName = "view"
val targetTabName = "tab2"
val tableType = if (location.isDefined) CatalogTableType.EXTERNAL else CatalogTableType.MANAGED
withTable(sourceTabName, targetTabName) {
withView(sourceViewName) {
spark.range(10).select('id as 'a, 'id as 'b, 'id as 'c, 'id as 'd)
.write.format("json").saveAsTable(sourceTabName)
sql(s"CREATE VIEW $sourceViewName AS SELECT * FROM $sourceTabName")
val locationClause = if (location.nonEmpty) s"LOCATION '${location.getOrElse("")}'" else ""
sql(s"CREATE TABLE $targetTabName LIKE $sourceViewName $locationClause")
val sourceView = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(sourceViewName, Some("default")))
// The original source should be a VIEW with an empty path
assert(sourceView.tableType == CatalogTableType.VIEW)
assert(sourceView.viewText.nonEmpty)
assert(sourceView.viewDefaultDatabase == Some("default"))
assert(sourceView.viewQueryColumnNames == Seq("a", "b", "c", "d"))
val targetTable = spark.sessionState.catalog.getTableMetadata(
TableIdentifier(targetTabName, Some("default")))
checkCreateTableLike(sourceView, targetTable, tableType)
}
}
}
private def checkCreateTableLike(
sourceTable: CatalogTable,
targetTable: CatalogTable,
tableType: CatalogTableType): Unit = {
// The created table should be a MANAGED table or EXTERNAL table with empty view text
// and original text.
assert(targetTable.tableType == tableType,
s"the created table must be a/an ${tableType.name} table")
assert(targetTable.viewText.isEmpty,
"the view text in the created table must be empty")
assert(targetTable.viewDefaultDatabase.isEmpty,
"the view default database in the created table must be empty")
assert(targetTable.viewQueryColumnNames.isEmpty,
"the view query output columns in the created table must be empty")
assert(targetTable.comment.isEmpty,
"the comment in the created table must be empty")
assert(targetTable.unsupportedFeatures.isEmpty,
"the unsupportedFeatures in the create table must be empty")
val metastoreGeneratedProperties = Seq(
"CreateTime",
"transient_lastDdlTime",
"grantTime",
"lastUpdateTime",
"last_modified_by",
"last_modified_time",
"Owner:",
"totalNumberFiles",
"maxFileSize",
"minFileSize"
)
assert(targetTable.properties.filterKeys(!metastoreGeneratedProperties.contains(_)).isEmpty,
"the table properties of source tables should not be copied in the created table")
if (DDLUtils.isDatasourceTable(sourceTable) ||
sourceTable.tableType == CatalogTableType.VIEW) {
assert(DDLUtils.isDatasourceTable(targetTable),
"the target table should be a data source table")
} else {
assert(!DDLUtils.isDatasourceTable(targetTable),
"the target table should be a Hive serde table")
}
if (sourceTable.tableType == CatalogTableType.VIEW) {
// Source table is a temporary/permanent view, which does not have a provider. The created
// target table uses the default data source format
assert(targetTable.provider == Option(spark.sessionState.conf.defaultDataSourceName))
} else {
assert(targetTable.provider == sourceTable.provider)
}
assert(targetTable.storage.locationUri.nonEmpty, "target table path should not be empty")
// User-specified location and sourceTable's location can be same or different,
// when we creating an external table. So we don't need to do this check
if (tableType != CatalogTableType.EXTERNAL) {
assert(sourceTable.storage.locationUri != targetTable.storage.locationUri,
"source table/view path should be different from target table path")
}
// The source table contents should not been seen in the target table.
assert(spark.table(sourceTable.identifier).count() != 0, "the source table should be nonempty")
assert(spark.table(targetTable.identifier).count() == 0, "the target table should be empty")
// Their schema should be identical
checkAnswer(
sql(s"DESC ${sourceTable.identifier}"),
sql(s"DESC ${targetTable.identifier}"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
// Check whether the new table can be inserted using the data from the original table
sql(s"INSERT INTO TABLE ${targetTable.identifier} SELECT * FROM ${sourceTable.identifier}")
}
// After insertion, the data should be identical
checkAnswer(
sql(s"SELECT * FROM ${sourceTable.identifier}"),
sql(s"SELECT * FROM ${targetTable.identifier}"))
}
test("create table with the same name as an index table") {
val tabName = "tab1"
val indexName = tabName + "_index"
withTable(tabName) {
// Spark SQL does not support creating index. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
sql(s"CREATE TABLE $tabName(a int)")
try {
client.runSqlHive(
s"CREATE INDEX $indexName ON TABLE $tabName (a) AS 'COMPACT' WITH DEFERRED REBUILD")
val indexTabName =
spark.sessionState.catalog.listTables("default", s"*$indexName*").head.table
// Even if index tables exist, listTables and getTable APIs should still work
checkAnswer(
spark.catalog.listTables().toDF(),
Row(indexTabName, "default", null, null, false) ::
Row(tabName, "default", null, "MANAGED", false) :: Nil)
assert(spark.catalog.getTable("default", indexTabName).name === indexTabName)
intercept[TableAlreadyExistsException] {
sql(s"CREATE TABLE $indexTabName(b int)")
}
intercept[TableAlreadyExistsException] {
sql(s"ALTER TABLE $tabName RENAME TO $indexTabName")
}
// When tableExists is not invoked, we still can get an AnalysisException
val e = intercept[AnalysisException] {
sql(s"DESCRIBE $indexTabName")
}.getMessage
assert(e.contains("Hive index table is not supported."))
} finally {
client.runSqlHive(s"DROP INDEX IF EXISTS $indexName ON $tabName")
}
}
}
test("insert skewed table") {
val tabName = "tab1"
withTable(tabName) {
// Spark SQL does not support creating skewed table. Thus, we have to use Hive client.
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
client.runSqlHive(
s"""
|CREATE Table $tabName(col1 int, col2 int)
|PARTITIONED BY (part1 string, part2 string)
|SKEWED BY (col1) ON (3, 4) STORED AS DIRECTORIES
""".stripMargin)
val hiveTable =
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tabName, Some("default")))
assert(hiveTable.unsupportedFeatures.contains("skewed columns"))
// Call loadDynamicPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT OVERWRITE TABLE $tabName
|PARTITION (part1='a', part2)
|SELECT 3, 4, 'b'
""".stripMargin)
// Call loadPartitions against a skewed table with enabling list bucketing
sql(
s"""
|INSERT INTO TABLE $tabName
|PARTITION (part1='a', part2='b')
|SELECT 1, 2
""".stripMargin)
checkAnswer(
sql(s"SELECT * from $tabName"),
Row(3, 4, "a", "b") :: Row(1, 2, "a", "b") :: Nil)
}
}
test("desc table for data source table - no user-defined schema") {
Seq("parquet", "json", "orc").foreach { fileFormat =>
withTable("t1") {
withTempPath { dir =>
val path = dir.toURI.toString
spark.range(1).write.format(fileFormat).save(path)
sql(s"CREATE TABLE t1 USING $fileFormat OPTIONS (PATH '$path')")
val desc = sql("DESC FORMATTED t1").collect().toSeq
assert(desc.contains(Row("id", "bigint", null)))
}
}
}
}
test("datasource and statistics table property keys are not allowed") {
import org.apache.spark.sql.hive.HiveExternalCatalog.DATASOURCE_PREFIX
import org.apache.spark.sql.hive.HiveExternalCatalog.STATISTICS_PREFIX
withTable("tbl") {
sql("CREATE TABLE tbl(a INT) STORED AS parquet")
Seq(DATASOURCE_PREFIX, STATISTICS_PREFIX).foreach { forbiddenPrefix =>
val e = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl SET TBLPROPERTIES ('${forbiddenPrefix}foo' = 'loser')")
}
assert(e.getMessage.contains(forbiddenPrefix + "foo"))
val e2 = intercept[AnalysisException] {
sql(s"ALTER TABLE tbl UNSET TBLPROPERTIES ('${forbiddenPrefix}foo')")
}
assert(e2.getMessage.contains(forbiddenPrefix + "foo"))
val e3 = intercept[AnalysisException] {
sql(s"CREATE TABLE tbl2 (a INT) TBLPROPERTIES ('${forbiddenPrefix}foo'='anything')")
}
assert(e3.getMessage.contains(forbiddenPrefix + "foo"))
}
}
}
test("truncate table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i, i) }.toDF("width", "length")
// Test both a Hive compatible and incompatible code path.
Seq("json", "parquet").foreach { format =>
withTable("rectangles") {
data.write.format(format).saveAsTable("rectangles")
assume(spark.table("rectangles").collect().nonEmpty,
"bad test; table was empty to begin with")
sql("TRUNCATE TABLE rectangles")
assert(spark.table("rectangles").collect().isEmpty)
// not supported since the table is not partitioned
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE rectangles PARTITION (width=1)")
}
assert(e.message.contains("Operation not allowed"))
}
}
}
test("truncate partitioned table - datasource table") {
import testImplicits._
val data = (1 to 10).map { i => (i % 3, i % 5, i) }.toDF("width", "length", "height")
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// supported since partitions are stored in the metastore
sql("TRUNCATE TABLE partTable PARTITION (width=1, length=1)")
assert(spark.table("partTable").filter($"width" === 1).collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1 && $"length" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// support partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=1)")
assert(spark.table("partTable").collect().nonEmpty)
assert(spark.table("partTable").filter($"width" === 1).collect().isEmpty)
}
withTable("partTable") {
data.write.partitionBy("width", "length").saveAsTable("partTable")
// do nothing if no partition is matched for the given partial partition spec
sql("TRUNCATE TABLE partTable PARTITION (width=100)")
assert(spark.table("partTable").count() == data.count())
// throw exception if no partition is matched for the given non-partial partition spec.
intercept[NoSuchPartitionException] {
sql("TRUNCATE TABLE partTable PARTITION (width=100, length=100)")
}
// throw exception if the column in partition spec is not a partition column.
val e = intercept[AnalysisException] {
sql("TRUNCATE TABLE partTable PARTITION (unknown=1)")
}
assert(e.message.contains("unknown is not a valid partition column"))
}
}
test("create hive serde table with new syntax") {
withTable("t", "t2", "t3") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat 'orc', compression 'Zlib')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde == Some("org.apache.hadoop.hive.ql.io.orc.OrcSerde"))
assert(table.storage.properties.get("compression") == Some("Zlib"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
// Check if this is compressed as ZLIB.
val maybeOrcFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeOrcFile, "orc", "ZLIB")
sql("CREATE TABLE t2 USING HIVE AS SELECT 1 AS c1, 'a' AS c2")
val table2 = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t2"))
assert(DDLUtils.isHiveTable(table2))
assert(table2.storage.serde == Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"))
checkAnswer(spark.table("t2"), Row(1, "a"))
sql("CREATE TABLE t3(a int, p int) USING hive PARTITIONED BY (p)")
sql("INSERT INTO t3 PARTITION(p=1) SELECT 0")
checkAnswer(spark.table("t3"), Row(0, 1))
}
}
}
test("create hive serde table with Catalog") {
withTable("t") {
withTempDir { dir =>
val df = spark.catalog.createExternalTable(
"t",
"hive",
new StructType().add("i", "int"),
Map("path" -> dir.getCanonicalPath, "fileFormat" -> "parquet"))
assert(df.collect().isEmpty)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
}
}
}
test("create hive serde table with DataFrameWriter.saveAsTable") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
Seq("c" -> 1).toDF("i", "j").write.format("hive")
.mode(SaveMode.Overwrite).option("fileFormat", "parquet").saveAsTable("t")
checkAnswer(spark.table("t"), Row("c", 1))
var table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
Seq(9 -> "x").toDF("i", "j")
.write.format("hive").mode(SaveMode.Overwrite).option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(9, "x"))
table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat"))
assert(table.storage.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat"))
assert(table.storage.serde ==
Some("org.apache.hadoop.hive.serde2.avro.AvroSerDe"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").bucketBy(4, "i").saveAsTable("t1")
}
assert(e2.message.contains("Creating bucketed Hive serde table is not supported yet"))
val e3 = intercept[AnalysisException] {
spark.table("t").write.format("hive").mode("overwrite").saveAsTable("t")
}
assert(e3.message.contains("Cannot overwrite table default.t that is also being read from"))
}
}
test("append data to hive serde table") {
withTable("t", "t1") {
Seq(1 -> "a").toDF("i", "j")
.write.format("hive").option("fileFormat", "avro").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a"))
sql("INSERT INTO t SELECT 2, 'b'")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil)
Seq(3 -> "c").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c") :: Nil)
Seq("c" -> 3).toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Row(3, "c")
:: Row(null, "3") :: Nil)
Seq(4 -> "d").toDF("i", "j").write.saveAsTable("t1")
val e = intercept[AnalysisException] {
Seq(5 -> "e").toDF("i", "j")
.write.format("hive").mode("append").saveAsTable("t1")
}
assert(e.message.contains("The format of the existing table default.t1 is "))
assert(e.message.contains("It doesn't match the specified format `HiveFileFormat`."))
}
}
test("create partitioned hive serde table as select") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
Seq(10 -> "y").toDF("i", "j").write.format("hive").partitionBy("i").saveAsTable("t")
checkAnswer(spark.table("t"), Row("y", 10) :: Nil)
Seq((1, 2, 3)).toDF("i", "j", "k").write.mode("overwrite").format("hive")
.partitionBy("j", "k").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, 2, 3) :: Nil)
spark.sql("create table t1 using hive partitioned by (i) as select 1 as i, 'a' as j")
checkAnswer(spark.table("t1"), Row("a", 1) :: Nil)
}
}
}
test("read/write files with hive data source is not allowed") {
withTempDir { dir =>
val e = intercept[AnalysisException] {
spark.read.format("hive").load(dir.getAbsolutePath)
}
assert(e.message.contains("Hive data source can only be used with tables"))
val e2 = intercept[AnalysisException] {
Seq(1 -> "a").toDF("i", "j").write.format("hive").save(dir.getAbsolutePath)
}
assert(e2.message.contains("Hive data source can only be used with tables"))
val e3 = intercept[AnalysisException] {
spark.readStream.format("hive").load(dir.getAbsolutePath)
}
assert(e3.message.contains("Hive data source can only be used with tables"))
val e4 = intercept[AnalysisException] {
spark.readStream.schema(new StructType()).parquet(dir.getAbsolutePath)
.writeStream.format("hive").start(dir.getAbsolutePath)
}
assert(e4.message.contains("Hive data source can only be used with tables"))
}
}
test("partitioned table should always put partition columns at the end of table schema") {
def getTableColumns(tblName: String): Seq[String] = {
spark.sessionState.catalog.getTableMetadata(TableIdentifier(tblName)).schema.map(_.name)
}
val provider = spark.sessionState.conf.defaultDataSourceName
withTable("t", "t1", "t2", "t3", "t4", "t5", "t6") {
sql(s"CREATE TABLE t(a int, b int, c int, d int) USING $provider PARTITIONED BY (d, b)")
assert(getTableColumns("t") == Seq("a", "c", "d", "b"))
sql(s"CREATE TABLE t1 USING $provider PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t1") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.partitionBy("d", "b").saveAsTable("t2")
assert(getTableColumns("t2") == Seq("a", "c", "d", "b"))
withTempPath { path =>
val dataPath = new File(new File(path, "d=1"), "b=1").getCanonicalPath
Seq(1 -> 1).toDF("a", "c").write.save(dataPath)
sql(s"CREATE TABLE t3 USING $provider LOCATION '${path.toURI}'")
assert(getTableColumns("t3") == Seq("a", "c", "d", "b"))
}
sql("CREATE TABLE t4(a int, b int, c int, d int) USING hive PARTITIONED BY (d, b)")
assert(getTableColumns("t4") == Seq("a", "c", "d", "b"))
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
sql("CREATE TABLE t5 USING hive PARTITIONED BY (d, b) AS SELECT 1 a, 1 b, 1 c, 1 d")
assert(getTableColumns("t5") == Seq("a", "c", "d", "b"))
Seq((1, 1, 1, 1)).toDF("a", "b", "c", "d").write.format("hive")
.partitionBy("d", "b").saveAsTable("t6")
assert(getTableColumns("t6") == Seq("a", "c", "d", "b"))
}
}
}
test("create hive table with a non-existing location") {
withTable("t", "t1") {
withTempPath { dir =>
spark.sql(s"CREATE TABLE t(a int, b int) USING hive LOCATION '${dir.toURI}'")
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t SELECT 1, 2")
assert(dir.exists())
checkAnswer(spark.table("t"), Row(1, 2))
}
// partition table
withTempPath { dir =>
spark.sql(
s"""
|CREATE TABLE t1(a int, b int)
|USING hive
|PARTITIONED BY(a)
|LOCATION '${dir.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
spark.sql("INSERT INTO TABLE t1 PARTITION(a=1) SELECT 2")
val partDir = new File(dir, "a=1")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(2, 1))
}
}
}
Seq(true, false).foreach { shouldDelete =>
val tcName = if (shouldDelete) "non-existing" else "existed"
test(s"CTAS for external hive table with a $tcName location") {
withTable("t", "t1") {
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t
|USING hive
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
checkAnswer(spark.table("t"), Row(3, 4, 1, 2))
}
// partition table
withTempDir { dir =>
if (shouldDelete) dir.delete()
spark.sql(
s"""
|CREATE TABLE t1
|USING hive
|PARTITIONED BY(a, b)
|LOCATION '${dir.toURI}'
|AS SELECT 3 as a, 4 as b, 1 as c, 2 as d
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(dir.getAbsolutePath))
val partDir = new File(dir, "a=3")
assert(partDir.exists())
checkAnswer(spark.table("t1"), Row(1, 2, 3, 4))
}
}
}
}
}
Seq("parquet", "hive").foreach { datasource =>
Seq("a b", "a:b", "a%b", "a,b").foreach { specialChars =>
test(s"partition column name of $datasource table containing $specialChars") {
withTable("t") {
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE t(a string, `$specialChars` string)
|USING $datasource
|PARTITIONED BY(`$specialChars`)
|LOCATION '${dir.toURI}'
""".stripMargin)
assert(dir.listFiles().isEmpty)
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`=2) SELECT 1")
val partEscaped = s"${ExternalCatalogUtils.escapePathName(specialChars)}=2"
val partFile = new File(dir, partEscaped)
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Nil)
withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
spark.sql(s"INSERT INTO TABLE t PARTITION(`$specialChars`) SELECT 3, 4")
val partEscaped1 = s"${ExternalCatalogUtils.escapePathName(specialChars)}=4"
val partFile1 = new File(dir, partEscaped1)
assert(partFile1.listFiles().nonEmpty)
checkAnswer(spark.table("t"), Row("1", "2") :: Row("3", "4") :: Nil)
}
}
}
}
}
}
Seq("a b", "a:b", "a%b").foreach { specialChars =>
test(s"hive table: location uri contains $specialChars") {
// On Windows, it looks colon in the file name is illegal by default. See
// https://support.microsoft.com/en-us/help/289627
assume(!Utils.isWindows || specialChars != "a:b")
withTable("t") {
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
// The parser does not recognize the backslashes on Windows as they are.
// These currently should be escaped.
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t(a string)
|USING hive
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t SELECT 1")
assert(loc.listFiles().length >= 1)
checkAnswer(spark.table("t"), Row("1") :: Nil)
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
withTempDir { dir =>
val loc = new File(dir, specialChars)
loc.mkdir()
val escapedLoc = loc.getAbsolutePath.replace("\\\\", "\\\\\\\\")
spark.sql(
s"""
|CREATE TABLE t1(a string, b string)
|USING hive
|PARTITIONED BY(b)
|LOCATION '$escapedLoc'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t1"))
assert(table.location == makeQualifiedPath(loc.getAbsolutePath))
assert(new Path(table.location).toString.contains(specialChars))
assert(loc.listFiles().isEmpty)
if (specialChars != "a:b") {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
val partFile = new File(loc, "b=2")
assert(partFile.listFiles().nonEmpty)
checkAnswer(spark.table("t1"), Row("1", "2") :: Nil)
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
val partFile1 = new File(loc, "b=2017-03-03 12:13%3A14")
assert(!partFile1.exists())
if (!Utils.isWindows) {
// Actual path becomes "b=2017-03-03%2012%3A13%253A14" on Windows.
val partFile2 = new File(loc, "b=2017-03-03 12%3A13%253A14")
assert(partFile2.listFiles().nonEmpty)
checkAnswer(spark.table("t1"),
Row("1", "2") :: Row("1", "2017-03-03 12:13%3A14") :: Nil)
}
} else {
val e = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b=2) SELECT 1")
}.getMessage
assert(e.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
val e1 = intercept[AnalysisException] {
spark.sql("INSERT INTO TABLE t1 PARTITION(b='2017-03-03 12:13%3A14') SELECT 1")
}.getMessage
assert(e1.contains("java.net.URISyntaxException: Relative path in absolute URI: a:b"))
}
}
}
}
}
test("SPARK-19905: Hive SerDe table input paths") {
withTable("spark_19905") {
withTempView("spark_19905_view") {
spark.range(10).createOrReplaceTempView("spark_19905_view")
sql("CREATE TABLE spark_19905 STORED AS RCFILE AS SELECT * FROM spark_19905_view")
assert(spark.table("spark_19905").inputFiles.nonEmpty)
assert(sql("SELECT input_file_name() FROM spark_19905").count() > 0)
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- partitioned - $tableType") {
withTable("tab") {
sql(
s"""
|CREATE TABLE tab (c1 int, c2 int)
|PARTITIONED BY (c3 int) STORED AS $tableType
""".stripMargin)
sql("INSERT INTO tab PARTITION (c3=1) VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 1"),
Seq(Row(1, 2, null, 1))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab PARTITION (c3=2) VALUES (2, 3, 4)")
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null, 1), Row(2, 3, 4, 2))
)
checkAnswer(
sql("SELECT * FROM tab WHERE c3 = 2 AND c4 IS NOT NULL"),
Seq(Row(2, 3, 4, 2))
)
sql("ALTER TABLE tab ADD COLUMNS (c5 char(10))")
assert(spark.table("tab").schema.find(_.name == "c5")
.get.metadata.getString("HIVE_TYPE_STRING") == "char(10)")
}
}
}
hiveFormats.foreach { tableType =>
test(s"alter hive serde table add columns -- with predicate - $tableType ") {
withTable("tab") {
sql(s"CREATE TABLE tab (c1 int, c2 int) STORED AS $tableType")
sql("INSERT INTO tab VALUES (1, 2)")
sql("ALTER TABLE tab ADD COLUMNS (c4 int)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 IS NULL"),
Seq(Row(1, 2, null))
)
assert(spark.table("tab").schema
.contains(StructField("c4", IntegerType)))
sql("INSERT INTO tab VALUES (2, 3, 4)")
checkAnswer(
sql("SELECT * FROM tab WHERE c4 = 4 "),
Seq(Row(2, 3, 4))
)
checkAnswer(
spark.table("tab"),
Seq(Row(1, 2, null), Row(2, 3, 4))
)
}
}
}
Seq(true, false).foreach { caseSensitive =>
test(s"alter add columns with existing column name - caseSensitive $caseSensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> s"$caseSensitive") {
withTable("tab") {
sql("CREATE TABLE tab (c1 int) PARTITIONED BY (c2 int) STORED AS PARQUET")
if (!caseSensitive) {
// duplicating partitioning column name
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("Found duplicate column(s)"))
// duplicating data column name
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("Found duplicate column(s)"))
} else {
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e1 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C2 string)")
}.getMessage
assert(e1.contains("HiveException"))
// hive catalog will still complains that c1 is duplicate column name because hive
// identifiers are case insensitive.
val e2 = intercept[AnalysisException] {
sql("ALTER TABLE tab ADD COLUMNS (C1 string)")
}.getMessage
assert(e2.contains("HiveException"))
}
}
}
}
}
test("SPARK-21216: join with a streaming DataFrame") {
import org.apache.spark.sql.execution.streaming.MemoryStream
import testImplicits._
implicit val _sqlContext = spark.sqlContext
Seq((1, "one"), (2, "two"), (4, "four")).toDF("number", "word").createOrReplaceTempView("t1")
// Make a table and ensure it will be broadcast.
sql("""CREATE TABLE smallTable(word string, number int)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|STORED AS TEXTFILE
""".stripMargin)
sql(
"""INSERT INTO smallTable
|SELECT word, number from t1
""".stripMargin)
val inputData = MemoryStream[Int]
val joined = inputData.toDS().toDF()
.join(spark.table("smallTable"), $"value" === $"number")
val sq = joined.writeStream
.format("memory")
.queryName("t2")
.start()
try {
inputData.addData(1, 2)
sq.processAllAvailable()
checkAnswer(
spark.table("t2"),
Seq(Row(1, "one", 1), Row(2, "two", 2))
)
} finally {
sq.stop()
}
}
test("table name with schema") {
// regression test for SPARK-11778
withDatabase("usrdb") {
spark.sql("create schema usrdb")
withTable("usrdb.test") {
spark.sql("create table usrdb.test(c int)")
spark.read.table("usrdb.test")
}
}
}
private def assertCompression(maybeFile: Option[File], format: String, compression: String) = {
assert(maybeFile.isDefined)
val actualCompression = format match {
case "orc" =>
OrcFileOperator.getFileReader(maybeFile.get.toPath.toString).get.getCompression.name
case "parquet" =>
val footer = ParquetFileReader.readFooter(
sparkContext.hadoopConfiguration, new Path(maybeFile.get.getPath), NO_FILTER)
footer.getBlocks.get(0).getColumns.get(0).getCodec.toString
}
assert(compression === actualCompression)
}
Seq(("orc", "ZLIB"), ("parquet", "GZIP")).foreach { case (fileFormat, compression) =>
test(s"SPARK-22158 convertMetastore should not ignore table property - $fileFormat") {
withSQLConf(CONVERT_METASTORE_ORC.key -> "true", CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) USING hive
|OPTIONS(fileFormat '$fileFormat', compression '$compression')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains(fileFormat))
assert(table.storage.properties.get("compression") == Some(compression))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, fileFormat, compression)
}
}
}
}
}
private def getReader(path: String): org.apache.orc.Reader = {
val conf = spark.sessionState.newHadoopConf()
val files = org.apache.spark.sql.execution.datasources.orc.OrcUtils.listOrcFiles(path, conf)
assert(files.length == 1)
val file = files.head
val fs = file.getFileSystem(conf)
val readerOptions = org.apache.orc.OrcFile.readerOptions(conf).filesystem(fs)
org.apache.orc.OrcFile.createReader(file, readerOptions)
}
test("SPARK-23355 convertMetastoreOrc should not ignore table properties - STORED AS") {
Seq("native", "hive").foreach { orcImpl =>
withSQLConf(ORC_IMPLEMENTATION.key -> orcImpl, CONVERT_METASTORE_ORC.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS ORC
|TBLPROPERTIES (
| orc.compress 'ZLIB',
| orc.compress.size '1001',
| orc.row.index.stride '2002',
| hive.exec.orc.default.block.size '3003',
| hive.exec.orc.compression.strategy 'COMPRESSION')
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("orc"))
val properties = table.properties
assert(properties.get("orc.compress") == Some("ZLIB"))
assert(properties.get("orc.compress.size") == Some("1001"))
assert(properties.get("orc.row.index.stride") == Some("2002"))
assert(properties.get("hive.exec.orc.default.block.size") == Some("3003"))
assert(properties.get("hive.exec.orc.compression.strategy") == Some("COMPRESSION"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
val reader = getReader(maybeFile.head.getCanonicalPath)
assert(reader.getCompressionKind.name === "ZLIB")
assert(reader.getCompressionSize == 1001)
assert(reader.getRowIndexStride == 2002)
}
}
}
}
}
test("SPARK-23355 convertMetastoreParquet should not ignore table properties - STORED AS") {
withSQLConf(CONVERT_METASTORE_PARQUET.key -> "true") {
withTable("t") {
withTempPath { path =>
sql(
s"""
|CREATE TABLE t(id int) STORED AS PARQUET
|TBLPROPERTIES (
| parquet.compression 'GZIP'
|)
|LOCATION '${path.toURI}'
""".stripMargin)
val table = spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
assert(DDLUtils.isHiveTable(table))
assert(table.storage.serde.get.contains("parquet"))
val properties = table.properties
assert(properties.get("parquet.compression") == Some("GZIP"))
assert(spark.table("t").collect().isEmpty)
sql("INSERT INTO t SELECT 1")
checkAnswer(spark.table("t"), Row(1))
val maybeFile = path.listFiles().find(_.getName.startsWith("part"))
assertCompression(maybeFile, "parquet", "GZIP")
}
}
}
}
test("load command for non local invalid path validation") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING)")
val e = intercept[AnalysisException](
sql("load data inpath '/doesnotexist.csv' into table tbl"))
assert(e.message.contains("LOAD DATA input path does not exist"))
}
}
test("SPARK-22252: FileFormatWriter should respect the input query schema in HIVE") {
withTable("t1", "t2", "t3", "t4") {
spark.range(1).select('id as 'col1, 'id as 'col2).write.saveAsTable("t1")
spark.sql("select COL1, COL2 from t1").write.format("hive").saveAsTable("t2")
checkAnswer(spark.table("t2"), Row(0, 0))
// Test picking part of the columns when writing.
spark.range(1).select('id, 'id as 'col1, 'id as 'col2).write.saveAsTable("t3")
spark.sql("select COL1, COL2 from t3").write.format("hive").saveAsTable("t4")
checkAnswer(spark.table("t4"), Row(0, 0))
}
}
test("SPARK-24812: desc formatted table for last access verification") {
withTable("t1") {
sql(
"CREATE TABLE IF NOT EXISTS t1 (c1_int INT, c2_string STRING, c3_float FLOAT)")
val desc = sql("DESC FORMATTED t1").filter($"col_name".startsWith("Last Access"))
.select("data_type")
// check if the last access time doesnt have the default date of year
// 1970 as its a wrong access time
assert(!(desc.first.toString.contains("1970")))
}
}
test("SPARK-24681 checks if nested column names do not include ',', ':', and ';'") {
val expectedMsg = "Cannot create a table having a nested column whose name contains invalid " +
"characters (',', ':', ';') in Hive metastore."
Seq("nested,column", "nested:column", "nested;column").foreach { nestedColumnName =>
withTable("t") {
val e = intercept[AnalysisException] {
spark.range(1)
.select(struct(lit(0).as(nestedColumnName)).as("toplevel"))
.write
.format("hive")
.saveAsTable("t")
}.getMessage
assert(e.contains(expectedMsg))
}
}
}
}
|
rekhajoshm/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveDDLSuite.scala
|
Scala
|
apache-2.0
| 92,238 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import cats.effect.IO
import monix.catnap.SchedulerEffect
import monix.execution.exceptions.DummyException
import scala.util.{Failure, Success}
object TaskLiftSuite extends BaseTestSuite {
import TaskConversionsSuite.{CIO, CustomConcurrentEffect, CustomEffect}
test("task.to[Task]") { _ =>
val task = Task(1)
val conv = task.to[Task]
assertEquals(task, conv)
}
test("task.to[IO]") { implicit s =>
val task = Task(1)
val io = task.to[IO]
val f = io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[IO] for errors") { implicit s =>
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[IO]
val f = io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
test("task.to[Effect]") { implicit s =>
implicit val cs = SchedulerEffect.contextShift[IO](s)
implicit val F = new CustomEffect()
val task = Task(1)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[Effect] for errors") { implicit s =>
implicit val cs = SchedulerEffect.contextShift[IO](s)
implicit val F = new CustomEffect()
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
test("task.to[ConcurrentEffect]") { implicit s =>
implicit val cs = SchedulerEffect.contextShift[IO](s)
implicit val F = new CustomConcurrentEffect()
val task = Task(1)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Success(1)))
}
test("task.to[ConcurrentEffect] for errors") { implicit s =>
implicit val cs = SchedulerEffect.contextShift[IO](s)
implicit val F = new CustomConcurrentEffect()
val dummy = DummyException("dummy")
val task = Task.raiseError(dummy)
val io = task.to[CIO]
val f = io.io.unsafeToFuture()
s.tick()
assertEquals(f.value, Some(Failure(dummy)))
}
}
|
alexandru/monifu
|
monix-eval/shared/src/test/scala/monix/eval/TaskLiftSuite.scala
|
Scala
|
apache-2.0
| 2,842 |
package pl.writeonly.son2.path.providers
import pl.writeonly.son2.apis.converters.Converter
import pl.writeonly.son2.funs.liners.{Liner, LinerOpt}
import pl.writeonly.son2.funs.streamers.{Streamer, StreamerPipeForeach}
import pl.writeonly.son2.path.chain.ChainNotationPairPath
import pl.writeonly.son2.path.core.{ConfigPath, ProvidersPath}
import pl.writeonly.scalaops.specs.GrayVectorSpec
class StrictVectorSpec extends GrayVectorSpec {
val toSuccess = Table(
("in", "out"),
("0", "0"),
("\"a\"", "\"a\""),
("[]", "[]"),
("[0]", "[0]"),
("[0,1]", "[0,1]"),
("{}", "{}"),
("{\"a\":0}", "{\"a\":0}"),
("{\"a\":0,\"b\":1}", "{\"a\":0,\"b\":1}"),
("[{}]", "[{}]"),
("{\"a\":[]}", "{\"a\":[]}")
)
val toFailure = Table("in", "a")
val provider: Converter = ChainNotationPairPath(
ConfigPath(provider = ProvidersPath.STRICT)
).get
val liner: Liner = new LinerOpt(provider)
property("convert son to smart by provider") {
forAll(toSuccess) { (in, out) =>
provider convert in shouldBe out
}
}
val streamer: Streamer = new StreamerPipeForeach(liner)
property("convert son to smart by liner") {
forAll(toSuccess) { (in, out) =>
liner.apply(in) should be(out + "\n")
}
}
property("fail convert son to smart by liner") {
forAll(toFailure) { in =>
liner.apply(in) should be(provider.comment(in) + "\n")
}
}
property("convert son to smart by streamer") {
forAll(toSuccess) { (in, out) =>
streamer.convertString(in) should be(out + "\n")
}
}
property("fail convert son to smart by streamer") {
forAll(toFailure) { in =>
streamer.convertString(in) should be(provider.comment(in) + "\n")
}
}
property("convert son to smart by native streamer") {
forAll(toSuccess) { (in, out) =>
streamer.convertStringNative(in) should be(out + "\n")
}
}
property("fail convert son to smart by native streamer") {
forAll(toFailure) { in =>
streamer.convertStringNative(in) should be(provider.comment(in) + "\n")
}
}
}
|
writeonly/son2
|
scallions-impl/scallions-path/src/test/scala/pl/writeonly/son2/path/providers/StrictVectorSpec.scala
|
Scala
|
apache-2.0
| 2,077 |
package com.tribetron.editor.io
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import com.tribetron.editor.objects.TribetronMap
import java.io.{ FileNotFoundException, IOException, File, FileOutputStream }
object MapFileUtil {
private val folder = "maps/"
private val postFix = ".json"
private def createFolderIfItDoesNotExist = {
val file = new File(folder)
if(!file.isDirectory())
file.mkdir()
}
def writeMap(map: TribetronMap, name: String, story: String) = {
createFolderIfItDoesNotExist
val fos = new FileOutputStream(folder + name + postFix)
fos.write(pretty(render(map.toJson(story))).getBytes)
fos.close()
}
}
|
Humakt83/tribetron-editor
|
src/main/scala/com/tribetron/editor/io/MapFileUtil.scala
|
Scala
|
mit
| 693 |
package bifrost.tokenBoxRegistry
import java.io.File
import bifrost.forging.ForgingSettings
import bifrost.scorexMod.GenericMinimalState.VersionTag
import bifrost.state.BifrostState.{BX, GSC}
import bifrost.transaction.box._
import bifrost.transaction.box.proposition.PublicKey25519Proposition
import bifrost.utils.ScorexLogging
import io.iohk.iodb.{ByteArrayWrapper, LSMStore}
import scorex.crypto.encode.Base58
import scala.util.Try
case class TokenBoxRegistry(tbrStore: LSMStore, stateStore: LSMStore) extends ScorexLogging {
def closedBox(boxId: Array[Byte]): Option[BifrostBox] =
stateStore.get(ByteArrayWrapper(boxId))
.map(_.data)
.map(BifrostBoxSerializer.parseBytes)
.flatMap(_.toOption)
def boxIdsByKey(publicKey: PublicKey25519Proposition): Seq[Array[Byte]] =
boxIdsByKey(publicKey.pubKeyBytes)
//Assumes that boxIds are fixed length equal to store's keySize (32 bytes)
def boxIdsByKey(pubKeyBytes: Array[Byte]): Seq[Array[Byte]] =
tbrStore
.get(ByteArrayWrapper(pubKeyBytes))
.map(_
.data
.grouped(stateStore.keySize)
.toSeq)
.getOrElse(Seq[Array[Byte]]())
def boxesByKey(publicKey: PublicKey25519Proposition): Seq[BifrostBox] =
boxesByKey(publicKey.pubKeyBytes)
def boxesByKey(pubKeyBytes: Array[Byte]): Seq[BifrostBox] = {
boxIdsByKey(pubKeyBytes)
.map(id => closedBox(id))
.filter {
case box: Some[BifrostBox] => true
case None => false
}
.map(_.get)
}
/**
* @param newVersion - block id
* @param changes - key filtered boxIdsToRemove and boxesToAppend extracted from block (in BifrostState)
* @return - instance of updated TokenBoxRegistry
* (Note - makes use of vars for local variables since function remains a pure function and helps achieve better runtime)
*
* Runtime complexity of below function is O(MN) + O(L)
* where M = Number of boxes to remove
* N = Number of boxes owned by a public key
* L = Number of boxes to append
*
*/
//noinspection ScalaStyle
def updateFromState(newVersion: VersionTag, keyFilteredBoxIdsToRemove: Set[Array[Byte]], keyFilteredBoxesToAdd: Set[BifrostBox]): Try[TokenBoxRegistry] = Try {
log.debug(s"${Console.GREEN} Update TokenBoxRegistry to version: ${Base58.encode(newVersion)}${Console.RESET}")
/* This seeks to avoid the scenario where there is remove and then update of the same keys */
val boxIdsToRemove: Set[ByteArrayWrapper] = (keyFilteredBoxIdsToRemove -- keyFilteredBoxesToAdd.map(b => b.id)).map(ByteArrayWrapper.apply)
var boxesToRemove: Map[Array[Byte], Array[Byte]] = Map()
var boxesToAppend: Map[Array[Byte], Array[Byte]] = Map()
//Getting set of public keys for boxes being removed and appended
//Using ByteArrayWrapper for sets since equality method uses a deep compare unlike a set of byte arrays
val keysSet: Set[ByteArrayWrapper] = {
boxIdsToRemove
.flatMap(boxId => closedBox(boxId.data))
.foreach(box => box match {
case box: BifrostPublic25519NoncedBox =>
boxesToRemove += (box.id -> box.proposition.pubKeyBytes)
//For boxes that do not follow the BifrostPublicKey25519NoncedBox (are not token boxes) - do nothing
case _ =>
})
keyFilteredBoxesToAdd
.foreach({
case box: BifrostPublic25519NoncedBox =>
boxesToAppend += (box.id -> box.proposition.pubKeyBytes)
//For boxes that do not follow the BifrostPublicKey25519NoncedBox (are not token boxes) - do nothing
case _ =>
})
(boxesToRemove.map(boxToKey => ByteArrayWrapper(boxToKey._2)) ++ boxesToAppend.map(boxToKey => ByteArrayWrapper(boxToKey._2))).toSet
}
//Get old boxIds list for each of the above public keys
var keysToBoxIds: Map[ByteArrayWrapper, Seq[Array[Byte]]] = keysSet.map(
publicKey => publicKey -> boxIdsByKey(publicKey.data)
).toMap
//For each box in temporary map match against public key and remove/append to boxIdsList in original keysToBoxIds map
for((boxId, publicKey) <- boxesToRemove) {
keysToBoxIds += (ByteArrayWrapper(publicKey) -> keysToBoxIds(ByteArrayWrapper(publicKey)).filterNot(_ sameElements boxId))
}
for((boxId, publicKey) <- boxesToAppend) {
//Prepending to list is O(1) while appending is O(n)
keysToBoxIds += (ByteArrayWrapper(publicKey) -> (boxId +: keysToBoxIds(ByteArrayWrapper(publicKey))))
}
tbrStore.update(
ByteArrayWrapper(newVersion),
Seq(),
keysToBoxIds.map(element =>
element._1 -> ByteArrayWrapper(element._2.flatten.toArray))
)
TokenBoxRegistry(tbrStore, stateStore)
}
def rollbackTo(version: VersionTag, stateStore: LSMStore): Try[TokenBoxRegistry] = Try {
if (tbrStore.lastVersionID.exists(_.data sameElements version)) {
this
} else {
log.debug(s"Rolling back TokenBoxRegistry to: ${Base58.encode(version)}")
tbrStore.rollback(ByteArrayWrapper(version))
TokenBoxRegistry(tbrStore, stateStore)
}
}
}
object TokenBoxRegistry extends ScorexLogging {
def apply(s1: LSMStore, s2: LSMStore) : TokenBoxRegistry = {
new TokenBoxRegistry (s1, s2)
}
def readOrGenerate(settings: ForgingSettings, stateStore: LSMStore): Option[TokenBoxRegistry] = {
val tbrDirOpt = settings.tbrDirOpt
val logDirOpt = settings.logDirOpt
tbrDirOpt.map(readOrGenerate(_, logDirOpt, settings, stateStore))
}
def readOrGenerate(tbrDir: String, logDirOpt: Option[String], settings: ForgingSettings, stateStore: LSMStore): TokenBoxRegistry = {
val iFile = new File(s"$tbrDir")
iFile.mkdirs()
val tbrStore = new LSMStore(iFile)
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run(): Unit = {
log.info("Closing tokenBoxRegistry storage...")
tbrStore.close()
}
})
TokenBoxRegistry(tbrStore, stateStore)
}
}
|
Topl/Project-Bifrost
|
src/main/scala/bifrost/tokenBoxRegistry/TokenBoxRegistry.scala
|
Scala
|
mpl-2.0
| 5,991 |
/* Title: Pure/Tools/update_comments.scala
Author: Makarius
Update formal comments in outer syntax: \\<comment> \\<open>...\\<close>
*/
package isabelle
import scala.annotation.tailrec
object Update_Comments
{
def update_comments(path: Path)
{
def make_comment(tok: Token): String =
Symbol.comment + Symbol.space + Symbol.cartouche(Symbol.trim_blanks(tok.content))
@tailrec def update(toks: List[Token], result: List[String]): String =
{
toks match {
case tok :: rest
if tok.source == "--" || tok.source == Symbol.comment =>
rest.dropWhile(_.is_space) match {
case tok1 :: rest1 if tok1.is_text =>
update(rest1, make_comment(tok1) :: result)
case _ => update(rest, tok.source :: result)
}
case tok :: rest if tok.is_formal_comment && tok.source.startsWith(Symbol.comment) =>
update(rest, make_comment(tok) :: result)
case tok :: rest => update(rest, tok.source :: result)
case Nil => result.reverse.mkString
}
}
val text0 = File.read(path)
val text1 = update(Token.explode(Keyword.Keywords.empty, text0), Nil)
if (text0 != text1) {
Output.writeln("changing " + path)
File.write_backup2(path, text1)
}
}
/* Isabelle tool wrapper */
val isabelle_tool =
Isabelle_Tool("update_comments", "update formal comments in outer syntax", args =>
{
val getopts = Getopts("""
Usage: isabelle update_comments [FILES|DIRS...]
Recursively find .thy files and update formal comments in outer syntax.
Old versions of files are preserved by appending "~~".
""")
val specs = getopts(args)
if (specs.isEmpty) getopts.usage()
for {
spec <- specs
file <- File.find_files(Path.explode(spec).file, file => file.getName.endsWith(".thy"))
} update_comments(File.path(file))
})
}
|
larsrh/libisabelle
|
modules/pide/2019-RC4/src/main/scala/Tools/update_comments.scala
|
Scala
|
apache-2.0
| 1,920 |
package test
import org.scalatest.WordSpec
class ExampleWordSpec extends WordSpec {
"A Set" when {
"empty" should {
"have size 0" in {
assert(Set.empty.size == 0)
}
"produce NoSuchElementException when head is invoked" in {
intercept[NoSuchElementException] {
Set.empty.head
}
}
}
}
}
|
SRGOM/scalatest
|
scalatest-test.js/src/test/scala/test/ExampleWordSpec.scala
|
Scala
|
apache-2.0
| 358 |
package breeze.stats.distributions
/**
*
* @author dlwh
*/
trait SufficientStatistic[T<:SufficientStatistic[T]] { this: T=>
def +(t: T):T
def *(weight: Double):T
}
|
ktakagaki/breeze
|
src/main/scala/breeze/stats/distributions/SufficientStatistic.scala
|
Scala
|
apache-2.0
| 173 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.extractor.jsonpath
import io.gatling.core.json.JsonParsers
trait JsonSample {
def value: String
def boonAST(jsonParsers: JsonParsers) = jsonParsers.boon.parse(value)
def jacksonAST(jsonParsers: JsonParsers) = jsonParsers.jackson.parse(value)
}
|
MykolaB/gatling
|
gatling-core/src/test/scala/io/gatling/core/check/extractor/jsonpath/JsonSample.scala
|
Scala
|
apache-2.0
| 903 |
// Copyright 2012 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.twofishes.util
trait TwofishesLogger {
def ifDebug(formatSpecifier: String, va: Any*): Unit
def logDuration[T](ostrichKey: String, what: String)(f: => T): T
def ifLevelDebug(level: Int, formatSpecifier: String, va: Any*): Unit
def getLines: List[String]
}
|
foursquare/fsqio
|
src/jvm/io/fsq/twofishes/util/Logger.scala
|
Scala
|
apache-2.0
| 340 |
package common.build
package object controllers {
type AssetsBuilder = _root_.controllers.AssetsBuilder
type Assets = _root_.controllers.Assets
}
|
wsargent/playframework
|
documentation/manual/working/commonGuide/build/code/SubProjectAssets.scala
|
Scala
|
apache-2.0
| 152 |
package controllers
import javax.inject.Inject
import java.util.UUID
import play.twirl.api.Html
import play.api.mvc.{Action, Controller}
import play.api.libs.ws._
import play.api.libs.json.{ Json, JsObject }
import play.api.Configuration
import scala.concurrent.{ ExecutionContext, Future }
import db.dao.UsersDao
import com.trifectalabs.roadquality.v0.models.json._
import util.actions.Authenticated
import util._
import models.EmailSignup
class Main @Inject() (jwtUtil: JwtUtil, wsClient: WSClient, config: Configuration)(implicit ec: ExecutionContext) extends Controller with Metrics {
def app(tokenOpt: Option[String]) = Action { request =>
tokenOpt flatMap { token =>
jwtUtil.decodeToken(token) map { user =>
Ok(views.html.index((Json.toJson(user).as[JsObject] + ("token" -> Json.toJson(token))).toString))
}
} getOrElse(Ok(views.html.index("")))
}
def notFound(path: String) = Action { request =>
NotFound(views.html.notFound())
}
def addEmail = Action.async(parse.json[EmailSignup]) { request =>
lazy val mailchimpToken = config.getString("mailchimp.token").get
val signup = request.body
(wsClient
.url("https://us12.api.mailchimp.com/3.0/lists/8271d03ba2/members")
.withAuth("", mailchimpToken, WSAuthScheme.BASIC)
.post(Json.toJson(signup))) map { resp =>
if (resp.status == 200) {
webMetrics.counter("email_signups") += 1
Ok(s"Signed up ${signup.email_address}")
} else if (resp.status == 400) {
BadRequest(Json.parse(resp.body)("title"))
} else {
InternalServerError("Something went wrong.")
}
}
}
}
|
trifectalabs/roadquality
|
web/app/controllers/Main.scala
|
Scala
|
bsd-3-clause
| 1,672 |
package com.novocode.junit
import com.novocode.junit.Ansi._
import java.util.HashSet
import scala.util.Try
class RunSettings private (val color: Boolean, val decodeScalaNames: Boolean,
val quiet: Boolean, val verbose: Boolean, val logAssert: Boolean,
val logExceptionClass: Boolean) {
private val ignoreRunnersSet = new HashSet[String]
def this(color: Boolean, decodeScalaNames: Boolean, quiet: Boolean,
verbose: Boolean, logAssert: Boolean, ignoreRunners: String,
logExceptionClass: Boolean) = {
this(color, decodeScalaNames, quiet, verbose, logAssert, logExceptionClass)
for (s <- ignoreRunners.split(","))
ignoreRunnersSet.add(s.trim)
}
def decodeName(name: String): String =
if (decodeScalaNames) RunSettings.decodeScalaName(name) else name
def buildColoredMessage(t: Throwable, c1: String): String = {
if (t == null) "null" else {
if (!logExceptionClass || (!logAssert && t.isInstanceOf[AssertionError])) {
t.getMessage
} else {
val b = new StringBuilder()
val cn = decodeName(t.getClass.getName)
val pos1 = cn.indexOf('$')
val pos2 = {
if (pos1 == -1) cn.lastIndexOf('.')
else cn.lastIndexOf('.', pos1)
}
if (pos2 == -1) b.append(c(cn, c1))
else {
b.append(cn.substring(0, pos2))
b.append('.')
b.append(c(cn.substring(pos2 + 1), c1))
}
b.append(": ").append(t.getMessage)
b.toString()
}
}
}
def buildInfoMessage(t: Throwable): String =
buildColoredMessage(t, NNAME2)
def buildErrorMessage(t: Throwable): String =
buildColoredMessage(t, ENAME2)
}
object RunSettings {
private[RunSettings] def decodeScalaName(name: String): String =
Try(scala.reflect.NameTransformer.decode(name)).getOrElse(name)
}
|
mdedetrich/scala-js
|
junit-runtime/src/main/scala/com/novocode/junit/RunSettings.scala
|
Scala
|
bsd-3-clause
| 1,847 |
package sgwlpr.outpost
import sgwlpr.packets._
import sgwlpr.events._
import sgwlpr.DistrictInfo
import g2c._
import c2g._
import sgwlpr.db._
import sgwlpr.types._
import sgwlpr.world.Request
class RequestHandler extends Handler {
val world = context.actorFor("../world")
private def inventoryPagePackets(streamId: Int, pages: List[InventoryPage]) : List[Packet] = pages.map { page =>
new CreateInventoryPagePacket(
streamId,
page.inventoryType,
page.storageType,
page.id,
page.slots,
page.associatedItemId)
}
def handleCharacterDataRequest(session: OutpostSession, packet: RequestCharacterDataPacket) = session.player.get ! Request('CharacterData)
def handleSpawnPointRequest(session: OutpostSession, packet: RequestSpawnPointPacket)= session.player.get ! Request('SpawnPoint)
/*
val streamId = 0x42
session.write(new ItemStreamCreatePacket(streamId = streamId))
session.inventory.map { inventory =>
session.write(new UpdateActiveWeaponsetPacket(streamId = streamId, activeSet = 0))
session.write(inventoryPagePackets(streamId, inventory.pages))
session.write(new UpdateGoldOnCharacterPacket(streamId = streamId, amount = inventory.gold))
}
session.write(new ItemStreamTerminatorPacket(mapId = mapId))*/
addMessageHandler(manifest[RequestCharacterDataPacketEvent], handleCharacterDataRequest)
addMessageHandler(manifest[RequestSpawnPointPacketEvent], handleSpawnPointRequest)
}
|
th0br0/sgwlpr
|
outpost/src/main/scala/sgwlpr/outpost/RequestHandler.scala
|
Scala
|
agpl-3.0
| 1,486 |
package org.neo4j.spark
import java.util
import java.util.{Collections, NoSuchElementException}
import org.apache.spark.Partition
import org.apache.spark.SparkContext
import org.apache.spark.TaskContext
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.RDD
import org.neo4j.driver.v1.Driver
import org.neo4j.driver.v1.GraphDatabase
import org.neo4j.driver.v1.Value
import org.neo4j.driver.v1.Values
import org.neo4j.driver.v1.{Driver, Value, GraphDatabase, Values}
import scala.collection.JavaConverters._
class Neo4jTupleRDD(@transient sc: SparkContext, val query: String, val parameters: Seq[(String, AnyRef)])
extends RDD[Seq[(String, AnyRef)]](sc, Nil) {
private val config = Neo4jConfig(sc.getConf)
override def compute(split: Partition, context: TaskContext): Iterator[Seq[(String, AnyRef)]] = {
val driver: Driver = config.driver()
val session = driver.session()
val result = session.run(query, parameters.toMap.asJava)
result.asScala.map( (record) => {
val res = record.asMap().asScala.toSeq
if (!result.hasNext) {
session.close()
driver.close()
}
res
})
}
override protected def getPartitions: Array[Partition] = Array(new DummyPartition())
}
object Neo4jTupleRDD {
def apply(sc: SparkContext, query: String, parameters: Seq[(String,AnyRef)] = Seq.empty) = new Neo4jTupleRDD(sc, query, parameters)
}
|
Mageswaran1989/aja
|
src/main/scala/org/neo4j/spark/TupleRDD.scala
|
Scala
|
apache-2.0
| 1,429 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.Duration
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.execution.joins.{HashedRelation, HashJoin, LongHashedRelation}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.util.ThreadUtils
/**
* Physical plan for a custom subquery that collects and transforms the broadcast key values.
* This subquery retrieves the partition key from the broadcast results based on the type of
* [[HashedRelation]] returned. If the key is packed inside a Long, we extract it through
* bitwise operations, otherwise we return it from the appropriate index of the [[UnsafeRow]].
*
* @param index the index of the join key in the list of keys from the build side
* @param buildKeys the join keys from the build side of the join used
* @param child the BroadcastExchange or the AdaptiveSparkPlan with BroadcastQueryStageExec
* from the build side of the join
*/
case class SubqueryBroadcastExec(
name: String,
index: Int,
buildKeys: Seq[Expression],
child: SparkPlan) extends BaseSubqueryExec with UnaryExecNode {
// `SubqueryBroadcastExec` is only used with `InSubqueryExec`. No one would reference this output,
// so the exprId doesn't matter here. But it's important to correctly report the output length, so
// that `InSubqueryExec` can know it's the single-column execution mode, not multi-column.
override def output: Seq[Attribute] = {
val key = buildKeys(index)
val name = key match {
case n: NamedExpression => n.name
case Cast(n: NamedExpression, _, _) => n.name
case _ => "key"
}
Seq(AttributeReference(name, key.dataType, key.nullable)())
}
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createMetric(sparkContext, "data size (bytes)"),
"collectTime" -> SQLMetrics.createMetric(sparkContext, "time to collect (ms)"))
override def doCanonicalize(): SparkPlan = {
val keys = buildKeys.map(k => QueryPlan.normalizeExpressions(k, child.output))
SubqueryBroadcastExec("dpp", index, keys, child.canonicalized)
}
@transient
private lazy val relationFuture: Future[Array[InternalRow]] = {
// relationFuture is used in "doExecute". Therefore we can get the execution id correctly here.
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
Future {
// This will run in another thread. Set the execution id so that we can connect these jobs
// with the correct execution.
SQLExecution.withExecutionId(sqlContext.sparkSession, executionId) {
val beforeCollect = System.nanoTime()
val broadcastRelation = child.executeBroadcast[HashedRelation]().value
val (iter, expr) = if (broadcastRelation.isInstanceOf[LongHashedRelation]) {
(broadcastRelation.keys(), HashJoin.extractKeyExprAt(buildKeys, index))
} else {
(broadcastRelation.keys(),
BoundReference(index, buildKeys(index).dataType, buildKeys(index).nullable))
}
val proj = UnsafeProjection.create(expr)
val keyIter = iter.map(proj).map(_.copy())
val rows = keyIter.toArray[InternalRow].distinct
val beforeBuild = System.nanoTime()
longMetric("collectTime") += (beforeBuild - beforeCollect) / 1000000
val dataSize = rows.map(_.asInstanceOf[UnsafeRow].getSizeInBytes).sum
longMetric("dataSize") += dataSize
SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.values.toSeq)
rows
}
}(SubqueryBroadcastExec.executionContext)
}
protected override def doPrepare(): Unit = {
relationFuture
}
protected override def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException(
"SubqueryBroadcastExec does not support the execute() code path.")
}
override def executeCollect(): Array[InternalRow] = {
ThreadUtils.awaitResult(relationFuture, Duration.Inf)
}
override def stringArgs: Iterator[Any] = super.stringArgs ++ Iterator(s"[id=#$id]")
override protected def withNewChildInternal(newChild: SparkPlan): SubqueryBroadcastExec =
copy(child = newChild)
}
object SubqueryBroadcastExec {
private[execution] val executionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("dynamicpruning", 16))
}
|
cloud-fan/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/SubqueryBroadcastExec.scala
|
Scala
|
apache-2.0
| 5,396 |
package scrawler.actors.parser
import java.net.URL
import org.jsoup.Jsoup
import org.jsoup.select.Elements
class JSoupParser extends PageParser
{
override def fetchElements(url: URL, element: String): Elements =
{
Jsoup.connect(url.toString).timeout(0).get().select(element)
}
}
|
defpearlpilot/webcrawler
|
app/scrawler/actors/parser/JSoupParser.scala
|
Scala
|
gpl-3.0
| 292 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.migration
import java.util
import org.apache.flink.api.common.accumulators.IntCounter
import org.apache.flink.api.common.functions.RichFlatMapFunction
import org.apache.flink.api.common.state.{ListState, ListStateDescriptor, ValueState, ValueStateDescriptor}
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.configuration.Configuration
import org.apache.flink.runtime.state.memory.MemoryStateBackend
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.test.checkpointing.utils.SavepointMigrationTestBase
import org.apache.flink.util.Collector
import org.apache.flink.api.java.tuple.Tuple2
import org.apache.flink.runtime.state.{StateBackendLoader, FunctionInitializationContext, FunctionSnapshotContext}
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.migration.CustomEnum.CustomEnum
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend
import org.apache.flink.streaming.util.migration.MigrationVersion
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Ignore, Test}
import scala.util.{Failure, Properties, Try}
object StatefulJobSavepointMigrationITCase {
@Parameterized.Parameters(name = "Migrate Savepoint / Backend: {0}")
def parameters: util.Collection[(MigrationVersion, String)] = {
util.Arrays.asList(
(MigrationVersion.v1_2, StateBackendLoader.MEMORY_STATE_BACKEND_NAME),
(MigrationVersion.v1_2, StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME),
(MigrationVersion.v1_3, StateBackendLoader.MEMORY_STATE_BACKEND_NAME),
(MigrationVersion.v1_3, StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME),
(MigrationVersion.v1_4, StateBackendLoader.MEMORY_STATE_BACKEND_NAME),
(MigrationVersion.v1_4, StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME),
(MigrationVersion.v1_6, StateBackendLoader.MEMORY_STATE_BACKEND_NAME),
(MigrationVersion.v1_6, StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME))
}
// TODO to generate savepoints for a specific Flink version / backend type,
// TODO change these values accordingly, e.g. to generate for 1.3 with RocksDB,
// TODO set as (MigrationVersion.v1_3, StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME)
val GENERATE_SAVEPOINT_VER: MigrationVersion = MigrationVersion.v1_4
val GENERATE_SAVEPOINT_BACKEND_TYPE: String = StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME
val NUM_ELEMENTS = 4
}
/**
* ITCase for migration Scala state types across different Flink versions.
*/
@RunWith(classOf[Parameterized])
class StatefulJobSavepointMigrationITCase(
migrationVersionAndBackend: (MigrationVersion, String))
extends SavepointMigrationTestBase with Serializable {
@Ignore
@Test
def testCreateSavepoint(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
StatefulJobSavepointMigrationITCase.GENERATE_SAVEPOINT_BACKEND_TYPE match {
case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME =>
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()))
case StateBackendLoader.MEMORY_STATE_BACKEND_NAME =>
env.setStateBackend(new MemoryStateBackend())
case _ => throw new UnsupportedOperationException
}
env.setStateBackend(new MemoryStateBackend)
env.enableCheckpointing(500)
env.setParallelism(4)
env.setMaxParallelism(4)
env
.addSource(
new CheckpointedSource(4)).setMaxParallelism(1).uid("checkpointedSource")
.keyBy(
new KeySelector[(Long, Long), Long] {
override def getKey(value: (Long, Long)): Long = value._1
}
)
.flatMap(new StatefulFlatMapper)
.addSink(new AccumulatorCountingSink)
executeAndSavepoint(
env,
s"src/test/resources/stateful-scala-udf-migration-itcase-flink" +
s"${StatefulJobSavepointMigrationITCase.GENERATE_SAVEPOINT_VER}" +
s"-${StatefulJobSavepointMigrationITCase.GENERATE_SAVEPOINT_BACKEND_TYPE}-savepoint",
new Tuple2(
AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR,
StatefulJobSavepointMigrationITCase.NUM_ELEMENTS
)
)
}
@Test
def testRestoreSavepoint(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
migrationVersionAndBackend._2 match {
case StateBackendLoader.ROCKSDB_STATE_BACKEND_NAME =>
env.setStateBackend(new RocksDBStateBackend(new MemoryStateBackend()))
case StateBackendLoader.MEMORY_STATE_BACKEND_NAME =>
env.setStateBackend(new MemoryStateBackend())
case _ => throw new UnsupportedOperationException
}
env.setStateBackend(new MemoryStateBackend)
env.enableCheckpointing(500)
env.setParallelism(4)
env.setMaxParallelism(4)
env
.addSource(
new CheckpointedSource(4)).setMaxParallelism(1).uid("checkpointedSource")
.keyBy(
new KeySelector[(Long, Long), Long] {
override def getKey(value: (Long, Long)): Long = value._1
}
)
.flatMap(new StatefulFlatMapper)
.addSink(new AccumulatorCountingSink)
restoreAndExecute(
env,
SavepointMigrationTestBase.getResourceFilename(
s"stateful-scala" +
s"-udf-migration-itcase-flink${migrationVersionAndBackend._1}" +
s"-${migrationVersionAndBackend._2}-savepoint"),
new Tuple2(
AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR,
StatefulJobSavepointMigrationITCase.NUM_ELEMENTS)
)
}
@SerialVersionUID(1L)
private object CheckpointedSource {
var CHECKPOINTED_STRING = "Here be dragons!"
}
@SerialVersionUID(1L)
private class CheckpointedSource(val numElements: Int)
extends SourceFunction[(Long, Long)] with CheckpointedFunction {
private var isRunning = true
private var state: ListState[CustomCaseClass] = _
@throws[Exception]
override def run(ctx: SourceFunction.SourceContext[(Long, Long)]) {
ctx.emitWatermark(new Watermark(0))
ctx.getCheckpointLock synchronized {
var i = 0
while (i < numElements) {
ctx.collect(i, i)
i += 1
}
}
// don't emit a final watermark so that we don't trigger the registered event-time
// timers
while (isRunning) Thread.sleep(20)
}
def cancel() {
isRunning = false
}
override def initializeState(context: FunctionInitializationContext): Unit = {
state = context.getOperatorStateStore.getOperatorState(
new ListStateDescriptor[CustomCaseClass](
"sourceState", createTypeInformation[CustomCaseClass]))
}
override def snapshotState(context: FunctionSnapshotContext): Unit = {
state.clear()
state.add(CustomCaseClass("Here be dragons!", 123))
}
}
@SerialVersionUID(1L)
private object AccumulatorCountingSink {
var NUM_ELEMENTS_ACCUMULATOR = classOf[AccumulatorCountingSink[_]] + "_NUM_ELEMENTS"
}
@SerialVersionUID(1L)
private class AccumulatorCountingSink[T] extends RichSinkFunction[T] {
private var count: Int = 0
@throws[Exception]
override def open(parameters: Configuration) {
super.open(parameters)
getRuntimeContext.addAccumulator(
AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR, new IntCounter)
}
@throws[Exception]
override def invoke(value: T) {
count += 1
getRuntimeContext.getAccumulator(
AccumulatorCountingSink.NUM_ELEMENTS_ACCUMULATOR).add(1)
}
}
class StatefulFlatMapper extends RichFlatMapFunction[(Long, Long), (Long, Long)] {
private var caseClassState: ValueState[CustomCaseClass] = _
private var caseClassWithNestingState: ValueState[CustomCaseClassWithNesting] = _
private var collectionState: ValueState[List[CustomCaseClass]] = _
private var tryState: ValueState[Try[CustomCaseClass]] = _
private var tryFailureState: ValueState[Try[CustomCaseClass]] = _
private var optionState: ValueState[Option[CustomCaseClass]] = _
private var optionNoneState: ValueState[Option[CustomCaseClass]] = _
private var eitherLeftState: ValueState[Either[CustomCaseClass, String]] = _
private var eitherRightState: ValueState[Either[CustomCaseClass, String]] = _
private var enumOneState: ValueState[CustomEnum] = _
private var enumThreeState: ValueState[CustomEnum] = _
override def open(parameters: Configuration): Unit = {
caseClassState = getRuntimeContext.getState(
new ValueStateDescriptor[CustomCaseClass](
"caseClassState", createTypeInformation[CustomCaseClass]))
caseClassWithNestingState = getRuntimeContext.getState(
new ValueStateDescriptor[CustomCaseClassWithNesting](
"caseClassWithNestingState", createTypeInformation[CustomCaseClassWithNesting]))
collectionState = getRuntimeContext.getState(
new ValueStateDescriptor[List[CustomCaseClass]](
"collectionState", createTypeInformation[List[CustomCaseClass]]))
tryState = getRuntimeContext.getState(
new ValueStateDescriptor[Try[CustomCaseClass]](
"tryState", createTypeInformation[Try[CustomCaseClass]]))
tryFailureState = getRuntimeContext.getState(
new ValueStateDescriptor[Try[CustomCaseClass]](
"tryFailureState", createTypeInformation[Try[CustomCaseClass]]))
optionState = getRuntimeContext.getState(
new ValueStateDescriptor[Option[CustomCaseClass]](
"optionState", createTypeInformation[Option[CustomCaseClass]]))
optionNoneState = getRuntimeContext.getState(
new ValueStateDescriptor[Option[CustomCaseClass]](
"optionNoneState", createTypeInformation[Option[CustomCaseClass]]))
eitherLeftState = getRuntimeContext.getState(
new ValueStateDescriptor[Either[CustomCaseClass, String]](
"eitherLeftState", createTypeInformation[Either[CustomCaseClass, String]]))
eitherRightState = getRuntimeContext.getState(
new ValueStateDescriptor[Either[CustomCaseClass, String]](
"eitherRightState", createTypeInformation[Either[CustomCaseClass, String]]))
enumOneState = getRuntimeContext.getState(
new ValueStateDescriptor[CustomEnum](
"enumOneState", createTypeInformation[CustomEnum]))
enumThreeState = getRuntimeContext.getState(
new ValueStateDescriptor[CustomEnum](
"enumThreeState", createTypeInformation[CustomEnum]))
}
override def flatMap(in: (Long, Long), collector: Collector[(Long, Long)]): Unit = {
caseClassState.update(CustomCaseClass(in._1.toString, in._2 * 2))
caseClassWithNestingState.update(
CustomCaseClassWithNesting(in._1, CustomCaseClass(in._1.toString, in._2 * 2)))
collectionState.update(List(CustomCaseClass(in._1.toString, in._2 * 2)))
tryState.update(Try(CustomCaseClass(in._1.toString, in._2 * 5)))
tryFailureState.update(Failure(new RuntimeException))
optionState.update(Some(CustomCaseClass(in._1.toString, in._2 * 2)))
optionNoneState.update(None)
eitherLeftState.update(Left(CustomCaseClass(in._1.toString, in._2 * 2)))
eitherRightState.update(Right((in._1 * 3).toString))
enumOneState.update(CustomEnum.ONE)
enumOneState.update(CustomEnum.THREE)
collector.collect(in)
}
}
}
|
mylog00/flink
|
flink-tests/src/test/scala/org/apache/flink/api/scala/migration/StatefulJobSavepointMigrationITCase.scala
|
Scala
|
apache-2.0
| 12,603 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
object CollectorStats {
val unknown = CollectorStats(-1L, -1L, -1L, -1L)
def apply(vs: Iterable[CollectorStats]): CollectorStats = {
val builder = new CollectorStatsBuilder
vs.foreach(builder.update)
builder.result
}
}
/**
* Summary stats for how much data was processed by a collector.
*
* @param inputLines number of lines in the input to the collector
* @param inputDatapoints number of datapoints in the input to the collector
* @param outputLines number of lines in the result output
* @param outputDatapoints number of datapoints in the result output
*/
case class CollectorStats(
inputLines: Long,
inputDatapoints: Long,
outputLines: Long,
outputDatapoints: Long
)
/** Helper for accumulating stats for a collector. */
class CollectorStatsBuilder {
private var inputLines: Long = 0L
private var inputDatapoints: Long = 0L
private var outputLines: Long = 0L
private var outputDatapoints: Long = 0L
def updateInput(datapoints: Int): Unit = {
inputLines += 1
inputDatapoints += datapoints
}
def updateInput(blocks: List[Block]): Unit = {
val size = blocks.size
if (size > 0) {
val b = blocks.head
inputLines += 1
inputDatapoints += b.size * size
}
}
def updateOutput(datapoints: Int): Unit = {
outputLines += 1
outputDatapoints += datapoints
}
def update(s: CollectorStats): Unit = {
if (s.inputLines >= 0) {
inputLines += s.inputLines
inputDatapoints += s.inputDatapoints
outputLines += s.outputLines
outputDatapoints += s.outputDatapoints
}
}
def result: CollectorStats = {
CollectorStats(inputLines, inputDatapoints, outputLines, outputDatapoints)
}
}
|
copperlight/atlas
|
atlas-core/src/main/scala/com/netflix/atlas/core/model/CollectorStats.scala
|
Scala
|
apache-2.0
| 2,373 |
package com.ing.baker.runtime.model
import cats.effect.Sync
import com.ing.baker.il.CompiledRecipe
import com.ing.baker.il.petrinet.Transition
import com.ing.baker.runtime.scaladsl.EventInstance
import com.typesafe.scalalogging.Logger
import org.joda.time.Period
import org.joda.time.format.{PeriodFormatter, PeriodFormatterBuilder}
import org.slf4j.MDC
import scala.concurrent.duration.FiniteDuration
object BakerLogging {
lazy val defaultLogger: Logger = Logger("com.ing.baker")
}
case class BakerLogging(logger: Logger = BakerLogging.defaultLogger) {
val durationFormatter: PeriodFormatter = new PeriodFormatterBuilder()
.appendDays.appendSuffix("d")
.appendSeparator(" ")
.appendHours.appendSuffix("h")
.appendSeparator(" ")
.appendMinutes.appendSuffix("m")
.appendSeparator(" ")
.appendSeconds.appendSuffix("s")
.appendSeparator(" ")
.appendMillis.appendSuffix("ms")
.appendSeparator(" ")
.toFormatter
private def withMDC[F[_]](mdc: Map[String, String], log: Logger => Unit)(implicit effect: Sync[F]): F[Unit] =
effect.delay {
mdc.foreach { case (k, v) => MDC.put(k, v) }
log(logger)
mdc.keys.foreach(MDC.remove)
}
def addedRecipe[F[_]](recipe: CompiledRecipe, timestamp: Long)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Added recipe '${recipe.name}'"
val MDC = Map(
"recipeName" -> recipe.name,
"recipeId" -> recipe.recipeId,
"addedOn" -> timestamp.toString
)
withMDC(MDC, _.info(msg))
}
def recipeInstanceCreated[F[_]](recipeInstanceId: String, createdOn: Long, recipe: CompiledRecipe)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Baked recipe instance '$recipeInstanceId' from recipe '${recipe.name}'"
val MDC = Map(
"recipeInstanceId" -> recipeInstanceId,
"createdOn" -> createdOn.toString,
"recipeName" -> recipe.name,
"recipeId" -> recipe.recipeId
)
withMDC(MDC, _.info(msg))
}
def firingEvent[F[_]](recipeInstanceId: String, executionId: Long, transition: Transition, timeStarted: Long)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Firing event '${transition.label}'"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"eventName" -> transition.label,
"runtimeTimestamp" -> timeStarted.toString,
"executionId" -> executionId.toString
)
withMDC(mdc, _.info(msg))
}
def interactionStarted[F[_]](recipeInstanceId: String, executionId: Long, transition: Transition, timeStarted: Long)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Interaction started '${transition.label}'"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"interactionName" -> transition.label,
"timeStarted" -> timeStarted.toString,
"executionId" -> executionId.toString
)
withMDC(mdc, _.info(msg))
}
def interactionFinished[F[_]](recipeInstanceId: String, executionId: Long, transition: Transition, timeStarted: Long, timeFinished: Long)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Interaction finished '${transition.label}'"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"interactionName" -> transition.label,
"duration" -> (timeFinished - timeStarted).toString,
"timeStarted" -> timeStarted.toString,
"timeFinished" -> timeFinished.toString,
"executionId" -> executionId.toString
)
withMDC(mdc, _.info(msg))
}
def interactionFailed[F[_]](recipeInstanceId: String, transition: Transition, executionId: Long, timeStarted: Long, timeFailed: Long, failureReason: Throwable)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Interaction failed '${transition.label}'"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"interactionName" -> transition.label,
"duration" -> (timeFailed - timeStarted).toString,
"timeStarted" -> timeStarted.toString,
"timeFailed" -> timeFailed.toString,
"executionId" -> executionId.toString,
)
withMDC(mdc, _.error(msg, failureReason))
}
def idleStop[F[_]](recipeInstanceId: String, idleTTL: FiniteDuration)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Instance was idle for $idleTTL"
val mdc = Map("recipeInstanceId" -> recipeInstanceId)
withMDC(mdc, _.info(msg))
}
def eventRejected[F[_]](recipeInstanceId: String, event: EventInstance, rejectReason: String)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Event rejected '${event.name}' because: $rejectReason"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"event" -> event.name,
"rejectReason" -> rejectReason
)
withMDC(mdc, _.warn(msg))
}
def scheduleRetry[F[_]](recipeInstanceId: String, transition: Transition, delay: Long)(implicit effect: Sync[F]): F[Unit] = {
val msg = s"Scheduling a retry of interaction '${transition.label}' in ${durationFormatter.print(new Period(delay))}"
val mdc = Map(
"recipeInstanceId" -> recipeInstanceId,
"interactionName" -> transition.label,
"delay" -> delay.toString
)
withMDC(mdc, _.info(msg))
}
def exceptionOnEventListener[F[_]](throwable: Throwable)(implicit effect: Sync[F]): F[Unit] =
effect.delay(logger.error("Exception on event listener", throwable))
}
|
ing-bank/baker
|
core/baker-interface/src/main/scala/com/ing/baker/runtime/model/BakerLogging.scala
|
Scala
|
mit
| 5,305 |
package slick.memory
import scala.language.{implicitConversions, existentials}
import scala.collection.mutable.Builder
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import slick.ast._
import slick.ast.TypeUtil._
import slick.basic.{FixedBasicAction, FixedBasicStreamingAction}
import slick.compiler._
import slick.dbio._
import slick.relational.{RelationalProfile, ResultConverterCompiler, ResultConverter, CompiledMapping}
import slick.util.{DumpInfo, ??}
/** A profile for interpreted queries on top of the in-memory database. */
trait MemoryProfile extends RelationalProfile with MemoryQueryingProfile { self: MemoryProfile =>
type SchemaDescription = SchemaDescriptionDef
type InsertInvoker[T] = InsertInvokerDef[T]
type Backend = HeapBackend
val backend: Backend = HeapBackend
val api: API = new API {}
lazy val queryCompiler = compiler + new MemoryCodeGen
lazy val updateCompiler = compiler
lazy val deleteCompiler = compiler
lazy val insertCompiler = QueryCompiler(Phase.assignUniqueSymbols, Phase.inferTypes, new InsertCompiler(InsertCompiler.NonAutoInc), new MemoryInsertCodeGen)
override protected def computeCapabilities = super.computeCapabilities ++ MemoryCapabilities.all
def createInsertInvoker[T](tree: Node): InsertInvoker[T] = new InsertInvokerDef[T](tree)
def buildSequenceSchemaDescription(seq: Sequence[_]): SchemaDescription = ??
def buildTableSchemaDescription(table: Table[_]): SchemaDescription = new DDL(Vector(table))
type QueryActionExtensionMethods[R, S <: NoStream] = QueryActionExtensionMethodsImpl[R, S]
type StreamingQueryActionExtensionMethods[R, T] = StreamingQueryActionExtensionMethodsImpl[R, T]
type SchemaActionExtensionMethods = SchemaActionExtensionMethodsImpl
type InsertActionExtensionMethods[T] = InsertActionExtensionMethodsImpl[T]
def createQueryActionExtensionMethods[R, S <: NoStream](tree: Node, param: Any): QueryActionExtensionMethods[R, S] =
new QueryActionExtensionMethods[R, S](tree, param)
def createStreamingQueryActionExtensionMethods[R, T](tree: Node, param: Any): StreamingQueryActionExtensionMethods[R, T] =
new StreamingQueryActionExtensionMethods[R, T](tree, param)
def createSchemaActionExtensionMethods(schema: SchemaDescription): SchemaActionExtensionMethods =
new SchemaActionExtensionMethodsImpl(schema)
def createInsertActionExtensionMethods[T](compiled: CompiledInsert): InsertActionExtensionMethods[T] =
new InsertActionExtensionMethodsImpl[T](compiled)
lazy val MappedColumnType = new MappedColumnTypeFactory
class MappedColumnTypeFactory extends super.MappedColumnTypeFactory {
def base[T : ClassTag, U : BaseColumnType](tmap: T => U, tcomap: U => T): BaseColumnType[T] = {
assertNonNullType(implicitly[BaseColumnType[U]])
new MappedColumnType(implicitly[BaseColumnType[U]], tmap, tcomap)
}
}
class MappedColumnType[T, U](val baseType: ColumnType[U], toBase: T => U, toMapped: U => T)(implicit val classTag: ClassTag[T]) extends ScalaType[T] with BaseTypedType[T] {
def nullable: Boolean = baseType.nullable
def ordered: Boolean = baseType.ordered
def scalaOrderingFor(ord: Ordering): scala.math.Ordering[T] = new scala.math.Ordering[T] {
val uOrdering = baseType.scalaOrderingFor(ord)
def compare(x: T, y: T): Int = uOrdering.compare(toBase(x), toBase(y))
}
}
trait API extends super[RelationalProfile].API with super[MemoryQueryingProfile].API {
type SimpleDBIO[+R] = SimpleMemoryAction[R]
val SimpleDBIO = SimpleMemoryAction
}
protected def createInterpreter(db: Backend#Database, param: Any): QueryInterpreter = new QueryInterpreter(db, param) {
override def run(n: Node) = n match {
case ResultSetMapping(_, from, CompiledMapping(converter, _)) :@ CollectionType(cons, el) =>
val fromV = run(from).asInstanceOf[TraversableOnce[Any]]
val b = cons.createBuilder(el.classTag).asInstanceOf[Builder[Any, Any]]
b ++= fromV.map(v => converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, _]].read(v.asInstanceOf[QueryInterpreter.ProductValue]))
b.result()
case n => super.run(n)
}
}
def runSynchronousQuery[R](tree: Node, param: Any)(implicit session: Backend#Session): R =
createInterpreter(session.database, param).run(tree).asInstanceOf[R]
class InsertInvokerDef[T](tree: Node) {
protected[this] val ResultSetMapping(_, Insert(_, table: TableNode, _, _), CompiledMapping(converter, _)) = tree
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T)(implicit session: Backend#Session) {
val htable = session.database.getTable(table.tableName)
val buf = htable.createInsertRow
converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, Any]].set(value, buf)
htable.append(buf)
}
def ++= (values: Iterable[T])(implicit session: Backend#Session): Unit =
values.foreach(this += _)
}
class DDL(val tables: Vector[Table[_]]) extends SchemaDescriptionDef {
def ++(other: SchemaDescription): SchemaDescription =
new DDL(tables ++ other.asInstanceOf[DDL].tables)
}
type ProfileAction[+R, +S <: NoStream, -E <: Effect] = FixedBasicAction[R, S, E]
type StreamingProfileAction[+R, +T, -E <: Effect] = FixedBasicStreamingAction[R, T, E]
protected[this] def dbAction[R, S <: NoStream, E <: Effect](f: Backend#Session => R): ProfileAction[R, S, E] = new ProfileAction[R, S, E] with SynchronousDatabaseAction[R, S, Backend#This, E] {
def run(ctx: Backend#Context): R = f(ctx.session)
def getDumpInfo = DumpInfo("MemoryProfile.ProfileAction")
}
class StreamingQueryAction[R, T](tree: Node, param: Any) extends StreamingProfileAction[R, T, Effect.Read] with SynchronousDatabaseAction[R, Streaming[T], Backend#This, Effect.Read] {
type StreamState = Iterator[T]
protected[this] def getIterator(ctx: Backend#Context): Iterator[T] = {
val inter = createInterpreter(ctx.session.database, param)
val ResultSetMapping(_, from, CompiledMapping(converter, _)) = tree
val pvit = inter.run(from).asInstanceOf[TraversableOnce[QueryInterpreter.ProductValue]].toIterator
pvit.map(converter.asInstanceOf[ResultConverter[MemoryResultConverterDomain, T]].read _)
}
def run(ctx: Backend#Context): R =
createInterpreter(ctx.session.database, param).run(tree).asInstanceOf[R]
override def emitStream(ctx: Backend#StreamingContext, limit: Long, state: StreamState): StreamState = {
val it = if(state ne null) state else getIterator(ctx)
var count = 0L
while(count < limit && it.hasNext) {
count += 1
ctx.emit(it.next)
}
if(it.hasNext) it else null
}
def head: ProfileAction[T, NoStream, Effect.Read] = new ProfileAction[T, NoStream, Effect.Read] with SynchronousDatabaseAction[T, NoStream, Backend#This, Effect.Read] {
def run(ctx: Backend#Context): T = getIterator(ctx).next
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.first")
}
def headOption: ProfileAction[Option[T], NoStream, Effect.Read] = new ProfileAction[Option[T], NoStream, Effect.Read] with SynchronousDatabaseAction[Option[T], NoStream, Backend#This, Effect.Read] {
def run(ctx: Backend#Context): Option[T] = {
val it = getIterator(ctx)
if(it.hasNext) Some(it.next) else None
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction.firstOption")
}
def getDumpInfo = DumpInfo("MemoryProfile.StreamingQueryAction")
}
class QueryActionExtensionMethodsImpl[R, S <: NoStream](tree: Node, param: Any) extends super.QueryActionExtensionMethodsImpl[R, S] {
def result: ProfileAction[R, S, Effect.Read] =
new StreamingQueryAction[R, Nothing](tree, param).asInstanceOf[ProfileAction[R, S, Effect.Read]]
}
class StreamingQueryActionExtensionMethodsImpl[R, T](tree: Node, param: Any) extends QueryActionExtensionMethodsImpl[R, Streaming[T]](tree, param) with super.StreamingQueryActionExtensionMethodsImpl[R, T] {
override def result: StreamingProfileAction[R, T, Effect.Read] = super.result.asInstanceOf[StreamingProfileAction[R, T, Effect.Read]]
}
class SchemaActionExtensionMethodsImpl(schema: SchemaDescription) extends super.SchemaActionExtensionMethodsImpl {
protected[this] val tables = schema.asInstanceOf[DDL].tables
def create = dbAction { session =>
tables.foreach(t =>
session.database.createTable(t.tableName,
t.create_*.map { fs => new HeapBackend.Column(fs, typeInfoFor(fs.tpe)) }.toIndexedSeq,
t.indexes.toIndexedSeq, t.tableConstraints.toIndexedSeq)
)
}
def drop = dbAction { session =>
tables.foreach(t => session.database.dropTable(t.tableName))
}
}
class InsertActionExtensionMethodsImpl[T](compiled: CompiledInsert) extends super.InsertActionExtensionMethodsImpl[T] {
protected[this] val inv = createInsertInvoker[T](compiled)
type SingleInsertResult = Unit
type MultiInsertResult = Unit
def += (value: T) = dbAction(inv.+=(value)(_))
def ++= (values: Iterable[T]) = dbAction(inv.++=(values)(_))
}
override val profile: MemoryProfile = this
override def computeQueryCompiler = super.computeQueryCompiler ++ QueryCompiler.interpreterPhases
class InsertMappingCompiler(insert: Insert) extends ResultConverterCompiler[MemoryResultConverterDomain] {
val Insert(_, table: TableNode, ProductNode(cols), _) = insert
val tableColumnIdxs = table.profileTable.asInstanceOf[Table[_]].create_*.zipWithIndex.toMap
def createColumnConverter(n: Node, idx: Int, column: Option[FieldSymbol]): ResultConverter[MemoryResultConverterDomain, _] =
new InsertResultConverter(tableColumnIdxs(column.get))
class InsertResultConverter(tidx: Int) extends ResultConverter[MemoryResultConverterDomain, Any] {
def read(pr: MemoryResultConverterDomain#Reader) = ??
def update(value: Any, pr: MemoryResultConverterDomain#Updater) = ??
def set(value: Any, pp: MemoryResultConverterDomain#Writer) = pp(tidx) = value
override def getDumpInfo = super.getDumpInfo.copy(mainInfo = s"tidx=$tidx")
def width = 1
}
}
class MemoryInsertCodeGen extends CodeGen {
def compileServerSideAndMapping(serverSide: Node, mapping: Option[Node], state: CompilerState) =
(serverSide, mapping.map(new InsertMappingCompiler(serverSide.asInstanceOf[Insert]).compileMapping))
}
}
object MemoryProfile extends MemoryProfile
/** A non-streaming Action that wraps a synchronous MemoryProfile API call. */
case class SimpleMemoryAction[+R](f: HeapBackend#Context => R) extends SynchronousDatabaseAction[R, NoStream, HeapBackend, Effect.All] {
def run(context: HeapBackend#Context): R = f(context)
def getDumpInfo = DumpInfo(name = "SimpleMemoryAction")
}
|
knoldus/slick-1
|
slick/src/main/scala/slick/memory/MemoryProfile.scala
|
Scala
|
bsd-2-clause
| 10,872 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.integration.torch
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dllib.nn.{GradientChecker, VolumetricMaxPooling}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.RandomGenerator._
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class VolumetricMaxPoolingSpec extends TorchSpec {
"VolumetricMaxPooling Forward dim 4 Double" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(2, 4).toInt
val to = RNG.uniform(1, 4).toInt
val kt = RNG.uniform(2, 4).toInt
val ki = RNG.uniform(2, 4).toInt
val kj = RNG.uniform(2, 4).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val layer = VolumetricMaxPooling[Double](kt, ki, kj, st, si, sj,
padT, padW, padH)
val input = Tensor[Double](from, int, ini, inj).apply1(e => Random.nextDouble())
val output = layer.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
s"layer = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = layer:forward(input)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input),
Array("output"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
output.size() should be(luaOutput.size())
output should be(luaOutput)
}
"VolumetricMaxPooling Forward dim 5 Double" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(2, 4).toInt
val to = RNG.uniform(1, 4).toInt
val kt = RNG.uniform(2, 4).toInt
val ki = RNG.uniform(2, 4).toInt
val kj = RNG.uniform(2, 4).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val layer = VolumetricMaxPooling[Double](kt, ki, kj, st, si, sj,
padT, padW, padH)
val input = Tensor[Double](batch, from, int, ini, inj).apply1(e => Random.nextDouble())
val output = layer.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
s"layer = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = layer:forward(input)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input),
Array("weight", "bias", "output"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
output should be(luaOutput)
}
"forward backward double batch" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(3, 4).toInt
val kt = RNG.uniform(2, 7).toInt
val ki = RNG.uniform(2, 7).toInt
val kj = RNG.uniform(2, 7).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val module = VolumetricMaxPooling[Double](kt, ki, kj, st, si, sj,
padT, padW, padH)
Random.setSeed(seed)
val input = Tensor[Double](batch, from, int, ini, inj).apply1(e => Random.nextDouble())
val output = module.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
s"module = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input, gradOutput)"
val gradOutput = Tensor[Double]().resizeAs(output).rand()
val gradInput = module.backward(input, gradOutput)
val (luaTime, torchResult) = TH.run(code, Map("input" -> input,
"gradOutput" -> gradOutput), Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
output should be(luaOutput)
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
gradInput should be(luaGradInput)
}
"gradient check double batch" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(2, 4).toInt
val kt = RNG.uniform(2, 7).toInt
val ki = RNG.uniform(2, 7).toInt
val kj = RNG.uniform(2, 7).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val module = VolumetricMaxPooling[Double](kt, ki, kj, st, si, sj,
padT, padW, padH)
Random.setSeed(seed)
val input = Tensor[Double](batch, from, int, ini, inj).apply1(e => Random.nextDouble())
val checker = new GradientChecker(1e-5)
checker.checkLayer[Double](module, input, 1e-3) should be (true)
}
"forward backward double batch ceil" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(3, 4).toInt
val kt = RNG.uniform(2, 5).toInt
val ki = RNG.uniform(2, 5).toInt
val kj = RNG.uniform(2, 5).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val module = VolumetricMaxPooling[Double](kt, ki, kj, st, si, sj,
padT, padW, padH).ceil()
Random.setSeed(seed)
val input = Tensor[Double](batch, from, int, ini, inj).apply1(e => Random.nextDouble())
val output = module.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
s"module = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH):ceil()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input, gradOutput)"
val gradOutput = Tensor[Double]().resizeAs(output).rand()
val gradInput = module.backward(input, gradOutput)
val (luaTime, torchResult) = TH.run(code, Map("input" -> input,
"gradOutput" -> gradOutput), Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Double]]
output should be(luaOutput)
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
gradInput should be(luaGradInput)
}
"VolumetricMaxPooling Forward dim 4 Float" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(2, 4).toInt
val to = RNG.uniform(1, 4).toInt
val kt = RNG.uniform(2, 4).toInt
val ki = RNG.uniform(2, 4).toInt
val kj = RNG.uniform(2, 4).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val layer = VolumetricMaxPooling[Float](kt, ki, kj, st, si, sj,
padT, padW, padH)
val input = Tensor[Float](from, int, ini, inj).apply1(e => Random.nextFloat())
val output = layer.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
"torch.setdefaulttensortype('torch.FloatTensor')" +
s"layer = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = layer:forward(input)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input),
Array("output"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Float]]
output.size() should be(luaOutput.size())
output should be(luaOutput)
}
"VolumetricMaxPooling Forward dim 5 Float" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(2, 4).toInt
val to = RNG.uniform(1, 4).toInt
val kt = RNG.uniform(2, 4).toInt
val ki = RNG.uniform(2, 4).toInt
val kj = RNG.uniform(2, 4).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val layer = VolumetricMaxPooling[Float](kt, ki, kj, st, si, sj,
padT, padW, padH)
val input = Tensor[Float](batch, from, int, ini, inj).apply1(e => Random.nextFloat())
val output = layer.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
"torch.setdefaulttensortype('torch.FloatTensor')" +
s"layer = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = layer:forward(input)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input),
Array("weight", "bias", "output"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Float]]
output should be(luaOutput)
}
"forward backward Float batch" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(3, 5).toInt
val kt = RNG.uniform(2, 7).toInt
val ki = RNG.uniform(2, 7).toInt
val kj = RNG.uniform(2, 7).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val module = VolumetricMaxPooling[Float](kt, ki, kj, st, si, sj,
padT, padW, padH)
Random.setSeed(seed)
val input = Tensor[Float](batch, from, int, ini, inj).apply1(e => Random.nextFloat())
val output = module.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
"torch.setdefaulttensortype('torch.FloatTensor')" +
s"module = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH)\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input, gradOutput)"
val gradOutput = Tensor[Float]()
gradOutput.resizeAs(output).rand()
val gradInput = module.backward(input, gradOutput)
val (luaTime, torchResult) = TH.run(code, Map("input" -> input,
"gradOutput" -> gradOutput), Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Float]]
output should be(luaOutput)
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
gradInput should be(luaGradInput)
}
"forward backward Float batch ceil" should "work properly" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val from = RNG.uniform(3, 5).toInt
val kt = RNG.uniform(2, 5).toInt
val ki = RNG.uniform(2, 5).toInt
val kj = RNG.uniform(2, 5).toInt
val st = RNG.uniform(1, 3).toInt
val si = RNG.uniform(1, 3).toInt
val sj = RNG.uniform(1, 3).toInt
val padT = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padW = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val padH = Math.min(RNG.uniform(0, 2).toInt, math.floor(kt / 2).toInt)
val outt = RNG.uniform(5, 7).toInt
val outi = RNG.uniform(5, 7).toInt
val outj = RNG.uniform(5, 7).toInt
val batch = RNG.uniform(2, 7).toInt
val int = (outt - 1) * st + kt - padT * 2
val ini = (outi - 1) * si + ki - padW * 2
val inj = (outj - 1) * sj + kj - padH * 2
val module = VolumetricMaxPooling[Float](kt, ki, kj, st, si, sj,
padT, padW, padH).ceil()
Random.setSeed(seed)
val input = Tensor[Float](batch, from, int, ini, inj).apply1(e => Random.nextFloat())
val output = module.updateOutput(input)
val code = "torch.manualSeed(" + seed + ")\n" +
"torch.setdefaulttensortype('torch.FloatTensor')" +
s"module = nn.VolumetricMaxPooling($kt, $ki, $kj, $st, $si, $sj, $padT," +
s" $padW, $padH):ceil()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input, gradOutput)"
val gradOutput = Tensor[Float]().resizeAs(output).rand()
val gradInput = module.backward(input, gradOutput)
val (luaTime, torchResult) = TH.run(code, Map("input" -> input,
"gradOutput" -> gradOutput), Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Tensor[Float]]
output should be(luaOutput)
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Float]]
gradInput should be(luaGradInput)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/integration/torch/VolumetricMaxPoolingSpec.scala
|
Scala
|
apache-2.0
| 16,533 |
package org.scaladebugger.test.info
/**
* Tests accessing fields in a variety of different scenarios.
*/
object Fields {
trait TestTrait {
val publicBaseFinalPrimitiveField: Int = 999
val publicBaseFinalObjectField: String = "test"
var publicBaseMutablePrimitiveField: Int = 1000
var publicBaseMutableObjectField: String = "test2"
}
class InheritedClass(
override val publicBaseFinalPrimitiveField: Int
) extends TestTrait {
def runMe(): Unit = {
val local = publicBaseFinalPrimitiveField + publicBaseMutablePrimitiveField
val local2 = publicBaseFinalObjectField.toString +
publicBaseMutableObjectField.toString
println(local)
println(local2)
}
}
class TestClass(
private val privateFinalPrimitiveField: Int,
private val privateFinalObjectField: String,
private var privateMutablePrimitiveField: Int,
private var privateMutableObjectField: String,
protected val protectedFinalPrimitiveField: Int,
protected val protectedFinalObjectField: String,
protected var protectedMutablePrimitiveField: Int,
protected var protectedMutableObjectField: String,
val publicFinalPrimitiveField: Int,
val publicFinalObjectField: String,
var publicMutablePrimitiveField: Int,
var publicMutableObjectField: String
) {
def runMe(): Unit = {
val local = privateFinalPrimitiveField + privateMutablePrimitiveField +
protectedFinalPrimitiveField + protectedMutablePrimitiveField +
publicFinalPrimitiveField + publicMutablePrimitiveField
val local2 = privateFinalObjectField + privateMutableObjectField +
protectedFinalObjectField + protectedMutableObjectField +
publicFinalObjectField + publicMutableObjectField
println(local)
println(local2)
}
}
case class TestCaseClass(primitive: Int, obj: String) {
def runMe(): Unit = {
val local = primitive + obj
val local2 = "another obj"
println(local)
println(local2)
}
}
def main(args: Array[String]): Unit = {
val inheritedClass = new InheritedClass(5)
inheritedClass.runMe()
val testClass = new TestClass(0, "a", 1, "b", 2, "c", 3, "d", 4, "e", 5, "f")
testClass.runMe()
val testCaseClass = TestCaseClass(5, "hello")
testCaseClass.runMe()
println("done")
}
}
|
chipsenkbeil/scala-debugger
|
scala-debugger-test/src/main/scala/org/scaladebugger/test/info/Fields.scala
|
Scala
|
apache-2.0
| 2,347 |
package com.cdgore.test.mlspark
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import com.cdgore.mlspark.SoftmaxLR
import org.jblas.DoubleMatrix
import org.jblas.MatrixFunctions
import scala.util.Random
class SoftMaxLRTest extends SparkTestUtils with ShouldMatchers {
sparkTest("L1 Regularization with Clipping Test") {
val data = sc.parallelize(Array("A", "B", "C").flatMap(x => for(i <- List.range(1, 301)) yield x).map(l => (l, new DoubleMatrix((1 to 500).map(x => (Random.nextGaussian() + l.getBytes.map(x => ( (x.asInstanceOf[Double] % 3)+1 )).sum)/3.0 ).toArray) )))
val miniBatchTrainingPercentage = 0.7
val maxIterations = 100
val regUpdate = SoftmaxLR.l1ClippingUpdate _
val regLambda = 0.05
val learningRateAlpha = 0.04
val lossFileOut = null
val res = SoftmaxLR.trainLR(sc, data, learningRateAlpha, regLambda, regUpdate, miniBatchTrainingPercentage, maxIterations, lossFileOut)
res.first._2.toArray.filter(x => x==0).length should be > 0
res.first._2.toArray.filter(x => x==0).length should be < res.first._2.toArray.length
}
sparkTest("L2 Regularization Test") {
val data = sc.parallelize(Array("A", "B", "C").flatMap(x => for(i <- List.range(1, 301)) yield x).map(l => (l, new DoubleMatrix((1 to 500).map(x => (Random.nextGaussian() + l.getBytes.map(x => ( (x.asInstanceOf[Double] % 3)+1 )).sum)/3.0 ).toArray) )))
val miniBatchTrainingPercentage = 0.7
val maxIterations = 100
val regUpdate = SoftmaxLR.l2Update _
val regLambda = 0.05
val learningRateAlpha = 0.04
val lossFileOut = null
val res = SoftmaxLR.trainLR(sc, data, learningRateAlpha, regLambda, regUpdate, miniBatchTrainingPercentage, maxIterations, lossFileOut)
res.first._2.toArray.filter(x => x==0).length should be (0)
}
sparkTest("Regression without Regularization Test") {
val data = sc.parallelize(Array("A", "B", "C").flatMap(x => for(i <- List.range(1, 301)) yield x).map(l => (l, new DoubleMatrix((1 to 500).map(x => (Random.nextGaussian() + l.getBytes.map(x => ( (x.asInstanceOf[Double] % 3)+1 )).sum)/3.0 ).toArray) )))
val miniBatchTrainingPercentage = 0.7
val maxIterations = 100
val regUpdate = SoftmaxLR.simpleUpdate _
val regLambda = 0.05
val learningRateAlpha = 0.04
val lossFileOut = null
val res = SoftmaxLR.trainLR(sc, data, learningRateAlpha, regLambda, regUpdate, miniBatchTrainingPercentage, maxIterations, lossFileOut)
res.first._2.toArray.filter(x => x==0).length should be (0)
}
// test("non-spark code") {
// val x = 17
// val y = 3
// SoftMaxLRTest.plus(x,y) should be (20)
// }
}
|
cdgore/mlspark
|
src/test/scala/com/cdgore/mlspark/SoftmaxLRTest.scala
|
Scala
|
mit
| 2,764 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.calls
import org.apache.flink.table.planner.codegen.CodeGenUtils.{primitiveDefaultValue, primitiveTypeTermForType}
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, GeneratedExpression}
import org.apache.flink.table.types.logical.LogicalType
/**
* Generates IF function call.
*/
class IfCallGen() extends CallGenerator {
override def generate(
ctx: CodeGeneratorContext,
operands: Seq[GeneratedExpression],
returnType: LogicalType)
: GeneratedExpression = {
// Inferred return type is ARG1. Must be the same as ARG2.
// This is a temporary solution which introduce type cast in codegen.
// Not elegant, but can allow IF function to handle different numeric type arguments.
val castedResultTerm1 = CodeGenUtils.getNumericCastedResultTerm(operands(1), returnType)
val castedResultTerm2 = CodeGenUtils.getNumericCastedResultTerm(operands(2), returnType)
if (castedResultTerm1 == null || castedResultTerm2 == null) {
throw new Exception(String.format("Unsupported operand types: IF(boolean, %s, %s)",
operands(1).resultType, operands(2).resultType))
}
val resultTypeTerm = primitiveTypeTermForType(returnType)
val resultDefault = primitiveDefaultValue(returnType)
val Seq(resultTerm, nullTerm) = ctx.addReusableLocalVariables(
(resultTypeTerm, "result"),
("boolean", "isNull"))
val resultCode =
s"""
|${operands.head.code}
|$resultTerm = $resultDefault;
|if (${operands.head.resultTerm}) {
| ${operands(1).code}
| if (!${operands(1).nullTerm}) {
| $resultTerm = $castedResultTerm1;
| }
| $nullTerm = ${operands(1).nullTerm};
|} else {
| ${operands(2).code}
| if (!${operands(2).nullTerm}) {
| $resultTerm = $castedResultTerm2;
| }
| $nullTerm = ${operands(2).nullTerm};
|}
""".stripMargin
GeneratedExpression(resultTerm, nullTerm, resultCode, returnType)
}
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/calls/IfCallGen.scala
|
Scala
|
apache-2.0
| 2,915 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.queries
import com.normation.rudder.services.queries._
import com.normation.rudder.domain.queries._
import org.junit._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.junit.runners.BlockJUnit4ClassRunner
import net.liftweb.common._
import com.normation.rudder.domain._
import com.normation.rudder.services.queries._
@RunWith(classOf[BlockJUnit4ClassRunner])
class TestStringQueryParser {
/*
* our data store:
* two criteria
*
*
*/
val c1 = Criterion("name", BareComparator(Exists,Greater))
val c2 = Criterion("id", BareComparator(Equals))
val c3 = Criterion("name", BareComparator(Exists,Greater))
val oc1 = ObjectCriterion("node", Seq(c1,c2))
val oc2 = ObjectCriterion("machine", Seq(c3))
val criteria = Map(
"node" -> oc1,
"machine" -> oc2
)
val parser = new DefaultStringQueryParser() {
override val criterionObjects = criteria
}
val valid1_0 = StringQuery(NodeReturnType, Some("and"), Seq(
StringCriterionLine("node","name","exists"),
StringCriterionLine("machine","name","gt",Some("plop")),
StringCriterionLine("node","id","eq",Some("foo"))
))
val valid1_1 = StringQuery(NodeReturnType, Some("and"), Seq())
val valid1_2 = StringQuery(NodeReturnType, Some("or"), Seq())
val valid1_3 = StringQuery(NodeReturnType, None, Seq()) //default to and
val unvalidComp = StringQuery(NodeReturnType, Some("foo"), Seq())
val unknowObjectType = StringQuery(NodeReturnType, None, Seq(
StringCriterionLine("unknown","name","exists")
))
val unknowAttribute = StringQuery(NodeReturnType, None, Seq(
StringCriterionLine("node","unknown","exists")
))
val unknowComparator = StringQuery(NodeReturnType, None, Seq(
StringCriterionLine("node","name","unknown")
))
val missingRequiredValue = StringQuery(NodeReturnType, None, Seq(
StringCriterionLine("node","name","eq")
))
@Test
def basicParsing() {
assertEquals(
Full(Query(NodeReturnType, And, Seq(
CriterionLine(oc1,c1,Exists),
CriterionLine(oc2,c3,Greater,"plop"),
CriterionLine(oc1,c2,Equals,"foo")
))),
parser.parse(valid1_0)
)
assertEquals(
Full(Query(NodeReturnType, And, Seq())),
parser.parse(valid1_1)
)
assertEquals(
Full(Query(NodeReturnType, Or, Seq())),
parser.parse(valid1_2)
)
assertEquals(
Full(Query(NodeReturnType, And, Seq())),
parser.parse(valid1_3)
)
assertFalse(parser.parse(unvalidComp).isDefined)
assertFalse(parser.parse(unknowObjectType).isDefined)
assertFalse(parser.parse(unknowAttribute).isDefined)
assertFalse(parser.parse(unknowComparator).isDefined)
assertFalse(parser.parse(missingRequiredValue).isDefined)
}
}
|
Kegeruneku/rudder
|
rudder-core/src/test/scala/com/normation/rudder/services/queries/TestStringQueryParser.scala
|
Scala
|
agpl-3.0
| 4,490 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{
ReferenceRegion,
SequenceDictionary,
SequenceRecord
}
import org.bdgenomics.formats.avro.AlignmentRecord
class LeftOuterShuffleRegionJoinSuite extends OuterRegionJoinSuite {
val partitionSize = 3
var seqDict: SequenceDictionary = _
before {
seqDict = SequenceDictionary(
SequenceRecord("chr1", 15, url = "test://chrom1"),
SequenceRecord("chr2", 15, url = "tes=t://chrom2"))
}
def runJoin(leftRdd: RDD[(ReferenceRegion, AlignmentRecord)],
rightRdd: RDD[(ReferenceRegion, AlignmentRecord)]): RDD[(Option[AlignmentRecord], AlignmentRecord)] = {
LeftOuterShuffleRegionJoin[AlignmentRecord, AlignmentRecord](seqDict, partitionSize, sc).partitionAndJoin(
rightRdd,
leftRdd).map(_.swap)
}
}
|
tdanford/adam
|
adam-core/src/test/scala/org/bdgenomics/adam/rdd/LeftOuterShuffleRegionJoinSuite.scala
|
Scala
|
apache-2.0
| 1,647 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
object CypherOption {
val DEFAULT: String = "default"
def asCanonicalName(name: String) = name.toLowerCase
}
abstract class CypherOption(inputName: String) {
val name = CypherOption.asCanonicalName(inputName)
def toTextOutput = name
}
trait CypherOptionCompanion[O <: CypherOption] {
self: Product =>
def default: O
def all: Set[O]
def apply(name: String) = findByExactName(CypherOption.asCanonicalName(name)).getOrElse {
throw new SyntaxException(s"Supported ${self.productPrefix} values are: ${all.map(_.name).mkString(", ")}")
}
private def findByExactName(name: String) = if (CypherOption.DEFAULT == name)
Some(default)
else
all.find( _.name == name )
}
|
HuangLS/neo4j
|
community/cypher/cypher/src/main/scala/org/neo4j/cypher/CypherOption.scala
|
Scala
|
apache-2.0
| 1,521 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package registrator
import com.miguno.avro._
import com.esotericsoftware.kryo.Kryo
import org.apache.spark.SparkConf
import org.apache.spark.serializer.{KryoSerializer, KryoRegistrator}
class MyKryoRegistrator extends KryoRegistrator {
override def registerClasses(kryo: Kryo) {
kryo.register(classOf[twitter_schema])
}
}
object MyKryoRegistrator {
def register(conf: SparkConf) {
conf.set("spark.serializer", classOf[KryoSerializer].getName)
conf.set("spark.kryo.registrator", classOf[MyKryoRegistrator].getName)
}
}
|
iulianu/avro-scala-macro-annotations
|
examples/spark/src/main/scala/MyKryoRegistrator.scala
|
Scala
|
apache-2.0
| 1,349 |
package hiddenargs
import org.scalatest._
class ExampleSpec extends FlatSpec with Matchers {
private def example(name: String, run: => Unit): Unit =
s"The example '$name'" should "be valid" in (run)
example("count", new count())
example("factorial", new factorial())
example("fibonacci", new fibonacci())
example("genericsum", new genericsum())
example("reverse", new reverse())
example("sum", new sum())
}
|
keddelzz/hidden-args
|
examples/src/test/scala/hiddenargs/ExampleSpec.scala
|
Scala
|
mit
| 447 |
package com.twitter.algebird
import org.scalatest._
class TupleAggregatorsTest extends WordSpec with Matchers {
// This gives you an implicit conversion from tuples of aggregators
// to aggregator of tuples
val data = List(1, 3, 2, 0, 5, 6)
val MinAgg = Aggregator.min[Int]
val longData = data.map{ _.toLong }
val SizeAgg = Aggregator.size
"GeneratedTupleAggregators" should {
import GeneratedTupleAggregator._
"Create an aggregator from a tuple of 2 aggregators" in {
val agg: Aggregator[Int, Tuple2[Int, Int], Tuple2[Int, Int]] = Tuple2(MinAgg, MinAgg)
assert(agg(data) == Tuple2(0, 0))
}
"Create an aggregator from a tuple of 3 aggregators" in {
val agg: Aggregator[Int, Tuple3[Int, Int, Int], Tuple3[Int, Int, Int]] = Tuple3(MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple3(0, 0, 0))
}
"Create an aggregator from a tuple of 4 aggregators" in {
val agg: Aggregator[Int, Tuple4[Int, Int, Int, Int], Tuple4[Int, Int, Int, Int]] = Tuple4(MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple4(0, 0, 0, 0))
}
"Create an aggregator from a tuple of 5 aggregators" in {
val agg: Aggregator[Int, Tuple5[Int, Int, Int, Int, Int], Tuple5[Int, Int, Int, Int, Int]] = Tuple5(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple5(0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 6 aggregators" in {
val agg: Aggregator[Int, Tuple6[Int, Int, Int, Int, Int, Int], Tuple6[Int, Int, Int, Int, Int, Int]] = Tuple6(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple6(0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 7 aggregators" in {
val agg: Aggregator[Int, Tuple7[Int, Int, Int, Int, Int, Int, Int], Tuple7[Int, Int, Int, Int, Int, Int, Int]] = Tuple7(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple7(0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 8 aggregators" in {
val agg: Aggregator[Int, Tuple8[Int, Int, Int, Int, Int, Int, Int, Int], Tuple8[Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple8(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple8(0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 9 aggregators" in {
val agg: Aggregator[Int, Tuple9[Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple9[Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple9(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple9(0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 10 aggregators" in {
val agg: Aggregator[Int, Tuple10[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple10[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple10(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple10(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 11 aggregators" in {
val agg: Aggregator[Int, Tuple11[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple11[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple11(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple11(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 12 aggregators" in {
val agg: Aggregator[Int, Tuple12[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple12[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple12(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple12(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 13 aggregators" in {
val agg: Aggregator[Int, Tuple13[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple13[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple13(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple13(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 14 aggregators" in {
val agg: Aggregator[Int, Tuple14[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple14[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple14(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple14(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 15 aggregators" in {
val agg: Aggregator[Int, Tuple15[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple15[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple15(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple15(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 16 aggregators" in {
val agg: Aggregator[Int, Tuple16[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple16[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple16(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 17 aggregators" in {
val agg: Aggregator[Int, Tuple17[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple17[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple17(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple17(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 18 aggregators" in {
val agg: Aggregator[Int, Tuple18[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple18[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple18(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple18(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 19 aggregators" in {
val agg: Aggregator[Int, Tuple19[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple19[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple19(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple19(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 20 aggregators" in {
val agg: Aggregator[Int, Tuple20[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple20[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple20(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple20(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 21 aggregators" in {
val agg: Aggregator[Int, Tuple21[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple21[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple21(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple21(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 22 aggregators" in {
val agg: Aggregator[Int, Tuple22[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple22[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = Tuple22(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple22(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
}
"MultiAggregator" should {
import MultiAggregator._
"Create an aggregator from a tuple of 2 aggregators" in {
val agg: Aggregator[Int, Tuple2[Int, Int], Tuple2[Int, Int]] = MultiAggregator(MinAgg, MinAgg)
assert(agg(data) == Tuple2(0, 0))
}
"Create an aggregator from a tuple of 3 aggregators" in {
val agg: Aggregator[Int, Tuple3[Int, Int, Int], Tuple3[Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple3(0, 0, 0))
}
"Create an aggregator from a tuple of 4 aggregators" in {
val agg: Aggregator[Int, Tuple4[Int, Int, Int, Int], Tuple4[Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple4(0, 0, 0, 0))
}
"Create an aggregator from a tuple of 5 aggregators" in {
val agg: Aggregator[Int, Tuple5[Int, Int, Int, Int, Int], Tuple5[Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple5(0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 6 aggregators" in {
val agg: Aggregator[Int, Tuple6[Int, Int, Int, Int, Int, Int], Tuple6[Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple6(0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 7 aggregators" in {
val agg: Aggregator[Int, Tuple7[Int, Int, Int, Int, Int, Int, Int], Tuple7[Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple7(0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 8 aggregators" in {
val agg: Aggregator[Int, Tuple8[Int, Int, Int, Int, Int, Int, Int, Int], Tuple8[Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple8(0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 9 aggregators" in {
val agg: Aggregator[Int, Tuple9[Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple9[Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple9(0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 10 aggregators" in {
val agg: Aggregator[Int, Tuple10[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple10[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple10(0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 11 aggregators" in {
val agg: Aggregator[Int, Tuple11[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple11[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple11(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 12 aggregators" in {
val agg: Aggregator[Int, Tuple12[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple12[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple12(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 13 aggregators" in {
val agg: Aggregator[Int, Tuple13[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple13[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple13(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 14 aggregators" in {
val agg: Aggregator[Int, Tuple14[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple14[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple14(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 15 aggregators" in {
val agg: Aggregator[Int, Tuple15[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple15[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple15(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 16 aggregators" in {
val agg: Aggregator[Int, Tuple16[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple16[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple16(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 17 aggregators" in {
val agg: Aggregator[Int, Tuple17[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple17[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple17(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 18 aggregators" in {
val agg: Aggregator[Int, Tuple18[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple18[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple18(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 19 aggregators" in {
val agg: Aggregator[Int, Tuple19[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple19[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple19(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 20 aggregators" in {
val agg: Aggregator[Int, Tuple20[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple20[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple20(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 21 aggregators" in {
val agg: Aggregator[Int, Tuple21[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple21[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple21(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create an aggregator from a tuple of 22 aggregators" in {
val agg: Aggregator[Int, Tuple22[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Tuple22[Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int]] = MultiAggregator(MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg, MinAgg)
assert(agg(data) == Tuple22(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
}
"Create a MonoidAggregator from a tuple of 2 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple2[Long, Long], Tuple2[Long, Long]] = MultiAggregator(SizeAgg, SizeAgg)
assert(agg(longData) == Tuple2(6, 6))
}
"Create a MonoidAggregator from a tuple of 3 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple3[Long, Long, Long], Tuple3[Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple3(6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 4 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple4[Long, Long, Long, Long], Tuple4[Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple4(6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 5 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple5[Long, Long, Long, Long, Long], Tuple5[Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple5(6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 6 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple6[Long, Long, Long, Long, Long, Long], Tuple6[Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple6(6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 7 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple7[Long, Long, Long, Long, Long, Long, Long], Tuple7[Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple7(6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 8 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple8[Long, Long, Long, Long, Long, Long, Long, Long], Tuple8[Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple8(6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 9 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple9[Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple9[Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple9(6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 10 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple10[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple10[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple10(6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 11 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple11[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple11[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple11(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 12 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple12[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple12[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple12(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 13 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple13[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple13[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple13(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 14 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple14[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple14[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple14(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 15 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple15[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple15[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple15(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 16 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple16[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple16[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple16(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 17 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple17[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple17[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple17(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 18 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple18[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple18[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple18(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 19 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple19[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple19[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple19(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 20 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple20[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple20[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple20(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 21 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple21[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple21[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple21(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
"Create a MonoidAggregator from a tuple of 22 MonoidAggregators" in {
val agg: MonoidAggregator[Long, Tuple22[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long], Tuple22[Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long, Long]] = MultiAggregator(SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg, SizeAgg)
assert(agg(longData) == Tuple22(6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6))
}
}
"MapAggregator" should {
import MapAggregator._
val MinLongAgg = Aggregator.min[Int].andThenPresent{ _.toLong }
"Create an aggregator from 2 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple2[Int, Long], Map[String, Long]] = MapAggregator(
("key1", MinLongAgg),
("key2", SizeAgg))
val expectedMap = Map(
"key1" -> 0,
"key2" -> 6)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 3 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple3[Int, Long, Int], Map[String, Long]] = MapAggregator(
("key1", MinLongAgg),
("key2", SizeAgg),
("key3", MinLongAgg))
val expectedMap = Map(
"key1" -> 0,
"key2" -> 6,
"key3" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 4 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple4[Int, Long, Int, Long], Map[String, Long]] = MapAggregator(
("key1", MinLongAgg),
("key2", SizeAgg),
("key3", MinLongAgg),
("key4", SizeAgg))
val expectedMap = Map(
"key1" -> 0,
"key2" -> 6,
"key3" -> 0,
"key4" -> 6)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 5 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple5[Long, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 6 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple6[Long, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 7 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple7[Long, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 8 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple8[Long, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 9 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple9[Long, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 10 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple10[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 11 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple11[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 12 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple12[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 13 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple13[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 14 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple14[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 15 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple15[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 16 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple16[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 17 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple17[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 18 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple18[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg),
("key18", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0,
"key18" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 19 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple19[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg),
("key18", MinLongAgg),
("key19", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0,
"key18" -> 0,
"key19" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 20 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple20[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg),
("key18", MinLongAgg),
("key19", MinLongAgg),
("key20", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0,
"key18" -> 0,
"key19" -> 0,
"key20" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 21 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple21[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg),
("key18", MinLongAgg),
("key19", MinLongAgg),
("key20", MinLongAgg),
("key21", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0,
"key18" -> 0,
"key19" -> 0,
"key20" -> 0,
"key21" -> 0)
assert(agg(data) == expectedMap)
}
"Create an aggregator from 22 (key, aggregator) pairs" in {
val agg: Aggregator[Int, Tuple22[Long, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int, Int], Map[String, Long]] = MapAggregator(
("key1", SizeAgg),
("key2", MinLongAgg),
("key3", MinLongAgg),
("key4", MinLongAgg),
("key5", MinLongAgg),
("key6", MinLongAgg),
("key7", MinLongAgg),
("key8", MinLongAgg),
("key9", MinLongAgg),
("key10", MinLongAgg),
("key11", MinLongAgg),
("key12", MinLongAgg),
("key13", MinLongAgg),
("key14", MinLongAgg),
("key15", MinLongAgg),
("key16", MinLongAgg),
("key17", MinLongAgg),
("key18", MinLongAgg),
("key19", MinLongAgg),
("key20", MinLongAgg),
("key21", MinLongAgg),
("key22", MinLongAgg))
val expectedMap = Map(
"key1" -> 6,
"key2" -> 0,
"key3" -> 0,
"key4" -> 0,
"key5" -> 0,
"key6" -> 0,
"key7" -> 0,
"key8" -> 0,
"key9" -> 0,
"key10" -> 0,
"key11" -> 0,
"key12" -> 0,
"key13" -> 0,
"key14" -> 0,
"key15" -> 0,
"key16" -> 0,
"key17" -> 0,
"key18" -> 0,
"key19" -> 0,
"key20" -> 0,
"key21" -> 0,
"key22" -> 0)
assert(agg(data) == expectedMap)
}
}
}
|
nvoron23/algebird
|
algebird-test/src/test/scala/com/twitter/algebird/TupleAggregatorsTest.scala
|
Scala
|
apache-2.0
| 47,038 |
package com.saikocat.meownificent.service
import akka.actor.{ActorRefFactory, Actor}
import spray.routing.{HttpService, Route}
import com.saikocat.meownificent.configuration.Settings
import com.saikocat.meownificent.protocol.ApiMediaType
import com.saikocat.meownificent.directive.AcceptHeaderDirective
import com.saikocat.meownificent.error.ApiRejectionHandler
class RootServiceActor(settings: Settings) extends Actor with RootService {
def actorRefFactory: ActorRefFactory = context
def receive = runRoute(rootRoute)
}
trait RootService extends HttpService
with AcceptHeaderDirective
with ApiRejectionHandler
with RedditService {
lazy val rootRoute: Route = {
accept(ApiMediaType.versionMediaType) {
redditRoute
}
}
}
|
saikocat/meownificent
|
rest/src/main/scala/com/saikocat/meownificent/service/RootService.scala
|
Scala
|
mit
| 754 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2013-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package output
/**
* A collection of useful string filters. They are particularly intended
* to be filters for pretty-printer output so that the output can be
* tailored for a restricted setting in which it will be put. E.g., a
* program might be pretty-printed to show in a GUI window of a particular
* size, or lines indented greater than a certain amount might be omitted
* to show an overview.
*/
trait Filters {
/**
* A filter that limits the string `s` to at most `n` characters.
*/
def keepMaxChars (n : Int) (s : String) : String =
s.take (n)
/**
* A filter that limits the string `s` to at most `n` completed
* lines. The final end of line is included.
*/
def keepMaxLines (n : Int) (s : String) : String =
s.linesWithSeparators.take (n).mkString
/**
* A filter that limits the string `s` to at most `n` words. A word
* is one or more consecutive non-whitespace characters. The
* whitespace after the last word (if any) is not included.
*/
def keepMaxWords (n : Int) (s : String) : String = {
val wordRE = """^(?:\\s*[^\\s]+){0,%d}""".format (n).r
wordRE.findFirstIn (s).getOrElse ("")
}
/**
* A replacement function that when given an integer `n` returns the
* string `"..."` preceded by `n` spaces. The string argument `s` is
* ignored.
*/
def indentedEllipsis (n : Int, s : String) : String =
s"${" " * n}...\\n"
/**
* Return the indentation of a line, i.e., the number of spaces that
* appear before the first non-space character.
*/
def indentOf (s : String) : Int =
s.takeWhile (_.isSpaceChar).length
/**
* A filter that replaces runs of lines that have an indentation
* level of at least `n` spaces. A run of replaced lines will be
* replaced by the result of a call `mkrepl (n, l)` where `l` is
* the first line of the run. By default, `mkrepl` is
* `indentedEllipsis`.
*/
def keepMaxIndent (n : Int, s : String,
mkrepl : (Int, String) => String = indentedEllipsis) : String = {
s.linesWithSeparators.foldLeft ((Vector[String] (), true)) {
case ((result, first), l) =>
if (indentOf (l) >= n)
if (first)
(result :+ mkrepl (n, l), false)
else
(result, false)
else
(result :+ l, true)
}._1.mkString
}
}
/**
* A collection of useful string filters.
*/
object Filters extends Filters
|
solomono/kiama
|
library/src/org/kiama/output/Filters.scala
|
Scala
|
gpl-3.0
| 3,437 |
package asia
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import org.scaia.util.asia.example.DilemmaPref._
import org.scaia.asia.{Activity, Coalition, Group, Matching}
import org.scaia.solver.asia._
import org.scaia.util.MathUtils._
import org.scalatest.FlatSpec
class TestDilemmaPref extends FlatSpec {
val debug=false
//val config = ConfigFactory.load()
val system = ActorSystem("TestDilemmaPref")// config.getConfig("myDebugMode").withFallback(config)) //The Actor system with myDebugMode / myRunningMode
val allSoundMatchings= pb.allSoundMatchings()
//Result for the SelectiveSolver with the utilitarian rule (no approximation)
val m1= new Matching(pb)
m1.a= Map(blue -> club, cyan -> club, magenta -> Activity.VOID, red -> ball )
m1.g= Map(blue -> Group(blue, cyan), cyan -> Group(blue, cyan), magenta-> Group(magenta), red -> Group(red))
val m1bis= new Matching(pb)
m1bis.a= Map(blue -> club, cyan -> club, red -> Activity.VOID, magenta -> ball )
m1bis.g= Map(blue -> Group(blue, cyan), cyan -> Group(blue, cyan), magenta-> Group(magenta), red -> Group(red))
//Result for the InculsiveSolver with the egalitarian rule
val m2= new Matching(pb)
m2.a= Map(blue -> club, cyan -> club, magenta -> ball, red -> ball )
m2.g= Map(blue -> Group(blue, cyan), cyan -> Group(blue, cyan), magenta-> Group(magenta, red), red -> Group(magenta, red))
val cMagentaVoid = new Coalition(Activity.VOID, Group(magenta))
val cRedVoid = new Coalition(Activity.VOID, Group(red))
val cMagentaRedBall = new Coalition(ball, Group(magenta,red))
val cMagentaRedClub = new Coalition(club, Group(magenta,red))
"M1 with club={blue,cyan} ball{magenta} void{red}" should "be IR" in {
assert(m1.isIndividuallyRational())
}
"M2 with club={blue,cyan} ball{magenta, red}" should "be not IR" in {
assert(!m2.isIndividuallyRational())
}
"M1 with club={blue,cyan} ball{magenta} void{red}" should "be not SC" in {
assert(!m1.isCohesive())
}
"M2 with club={blue,cyan} ball{magenta, red}" should "be SC" in {
assert(m2.isCohesive())
}
"magenta" should "strictly prefers stay alone doing nothing rather than being with red and ball" in {
if (debug) println("uMagenta(MagentaVoid)= "+magenta.u(cMagentaVoid.group.names, cMagentaVoid.activity.name))
if (debug) println("uMagenta(MagentaRedBall)= "+magenta.u(cMagentaRedBall.group.names, cMagentaRedBall.activity.name))
assert(magenta.sprefC(cMagentaVoid,cMagentaRedBall))
}
"magenta" should "strictly strictly prefers club with red rather staying alone doing nothing" in {
if (debug) println("uMagenta(MagentaVoid)= "+magenta.u(cMagentaVoid.group.names, cMagentaVoid.activity.name))
if (debug) println("uMagenta(MagentaRedClub= "+magenta.u(cMagentaRedClub.group.names, cMagentaRedClub.activity.name))
assert(magenta.sprefC(cMagentaRedClub, cMagentaVoid))
}
"red" should "strictly prefers stay alone doing nothing rather than being with magenta and ball" in {
if (debug) println("uRed(RedVoid)= "+red.u(cRedVoid.group.names, cRedVoid.activity.name))
if (debug) println("uRed(MagentaRedBall)= "+red.u(cMagentaRedBall.group.names, cMagentaRedBall.activity.name))
assert(red.sprefC(cRedVoid, cMagentaRedBall))
}
"red" should "strictly strictly prefers club with magenta rather than staying alone doing nothing" in {
if (debug) println("uRed(RedVoid)= "+red.u(cRedVoid.group.names, cRedVoid.activity.name))
if (debug) println("uRed(MagentaRedClub)= "+red.u(cMagentaRedClub.group.names, cMagentaRedClub.activity.name))
assert(red.sprefC(cMagentaRedClub,cRedVoid))
}
"SelectiveSolver utilitarian" should "be club={blue,cyan} ball={magenta} void={red}" in {
val solver = new SelectiveSolver(pb, false, Utilitarian)
//solver.debug=true
val result =solver.solve()
assert(result.equals(m1))
}
"DistributedSelectiveSolver utilitarian" should "be club={blue,cyan} ball={magenta} void={red} or ball={red} void={maganta}" in {// Test distributed solver
val solver = new DistributedSelectiveSolver(pb, system, false, Utilitarian)
solver.debug=true
val result =solver.solve()
assert(result.equals(m1) || result.equals(m1bis))
}
"SelectiveSolver egalitarian" should "be club={blue,cyan} ball={magenta} void={red}" in {
val solver = new SelectiveSolver(pb, false, Egalitarian)
//solver.debug=true
val result =solver.solve()
assert(result.equals(m1))
}
"Inclusive solver egalitarian" should "be club={blue,cyan} ball={magenta, red}" in {
val solver = new InclusiveSolver(pb, Egalitarian)
//solver.debug=true
val result =solver.solve()
assert(result.equals(m2))
}
"DistributedInclusive solver egalitarian" should "be club={blue,cyan} ball={magenta, red}" in {// Test distributed solver
val solver = new DistributedInclusiveSolver(pb, system, Egalitarian)
//solver.debug=true
val result =solver.solve()
assert(result.equals(m2))
}
"Inclusive solver utilitarian" should "be club={blue,cyan} ball{magenta, red}" in {
val solver = new InclusiveSolver(pb, Utilitarian)
//solver.debug=true
val result =solver.solve()
assert(result.equals(m2))
}
"M1" should "not be Perfect" in {
assert(!m1.isPerfect())
}
"M1" should "be MaxUtilitarian" in {
val maxUtilitarianMatchings=pb.allMaxUtilitarian()
//if (debug) println(s"Utilitarian matchings: $maxUtilitarianMatchings ")
assert(maxUtilitarianMatchings.contains(m1))
}
"M2" should "be not MaxEgaliarian" in {
val maxEgalitarianMatchings=pb.allMaxEgalitarian()
//if (debug) println(s"Egalitarian matchings: $maxEgalitarianMatchings")
assert(! maxEgalitarianMatchings.contains(m2))
}
"M1" should "be not core stable" in {
val matchings= allSoundMatchings.filter(m => m.isCoreStable())
if (debug) println(s"Number of core stable matchings: ${matchings.size} ")
assert(! m1.isCoreStable())// since club(2): magenta strongly blocks this matching
}
"The dilemma problem" should "have no Perfect sound matching" in {
val matchings= allSoundMatchings.filter(m => m.isPerfect())
if (debug) println(s"Number of Perfect sound matchings: ${matchings.size} ")
assert(matchings.isEmpty)
}
"The dilemma problem" should "have no sound CS matching" in {
val matchings= allSoundMatchings.filter(m => m.isCoreStable())
if (debug) println(s"Number of CS sound matchings: ${matchings.size} ")
assert(matchings.isEmpty)
}
"The dilemma problem" should "have no sound SCS matching" in {
val matchings= allSoundMatchings.filter(m => m.isStrictCoreStable())
if (debug) println(s"Number of SCS sound matchings: ${matchings.size} ")
assert(matchings.isEmpty)
}
"The dilemma problem" should "have (7) NS sound matchings" in {
val matchings= allSoundMatchings.filter(m => m.isNashStable())
if (debug) println(s"Number of NS sound matchings: ${matchings.size} ")
assert(matchings.size==7)
}
"The dilemma problem" should "have (9) IS sound matchings" in {
val matchings=allSoundMatchings.filter(m => m.isIndividuallyStable())
if (debug) println(s"Number of IS sound matchings: ${matchings.size} ")
assert(matchings.size==9)
}
"The dilemma problem" should "have (16) CIS sound matchings" in {
val matchings=allSoundMatchings.filter(m => m.isContractuallyIndividuallyStable())
if (debug) println(s"Number of CIS sound matchings: ${matchings.size} ")
assert(matchings.size==16)
}
"The dilemma problem" should "have (15) PO sound matchings" in {
val matchings=allSoundMatchings.filter(m => m.isParetoOptimal())
if (debug) println(s"Number of PO sound matchings: ${matchings.size} ")
assert(matchings.size==15)
}
"The dilemma problem" should "have (51) IR sound matchings" in {
val matchings=allSoundMatchings.filter(m => m.isIndividuallyRational())
if (debug) println(s"Number of IR sound matchings: ${matchings.size} ")
assert(matchings.size==51)
}
"The dilemma problem" should "have (63) sound matchings" in {
val matchings=allSoundMatchings
if (debug) println(s"Number of sound matchings: ${matchings.size} ")
assert(matchings.size==63)
}
"The dilemma problem" should "have (2) MaxUtil sound matchings" in {
val matchings=pb.allMaxUtilitarian()
if (debug) println(s"Number of MaxUtil sound matchings: ${matchings.size} ")
assert(matchings.size==2)
}
"The dilemma problem" should "have (2) MaxEgal sound matchings" in {
val matchings=pb.allMaxEgalitarian()
//if (debug) println(s"Egalitarian sound matchings: $matchings ")
if (debug) println(s"Number of MaxEgal sound matchings: ${matchings.size} ")
assert(matchings.size==2)
}
"The dilemma problem" should "have (6) social cohesive matchings" in {
val matchings=allSoundMatchings.filter(m => m.isCohesive())
if (debug) println(s"Number of SC sound matchings: ${matchings.size} ")
assert(matchings.size==6)
}
}
|
maximemorge/ScaIA
|
src/test/scala/asia/TestDilemmaPref.scala
|
Scala
|
gpl-3.0
| 9,035 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Mon Sep 7 15:05:06 EDT 2009
* @see LICENSE (MIT style license file).
*/
package scalation.util
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Monitor` object is used to trace the actions/events in the models.
*/
object Monitor
{
/** Flag indicating whether tracing is on (initially on)
*/
private var tracing = true
/** Use `EasyWriter` to make it easy to switch from standard out to a (log) file
*/
private val ew = new EasyWriter ("util", "monitor")
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Toggle output destination from default of (log) file to standard output. etc.
*/
def toggle () { ew.toggle () }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Turn tracing off.
*/
def traceOff () { tracing = false }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Turn tracing back on.
*/
def traceOn () { tracing = true }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Trace an action/event.
* @param who who caused the action
* @param what what was the action
* @param whom whom did the action effect
* @param when when was the action taken
*/
def trace (who: Identifiable, what: String, whom: Identifiable, when: Double)
{
if (tracing) {
if (whom == null) {
ew.println ("+ " + who.me + " " + what + " at time " + when + ".")
} else {
ew.println ("+ " + who.me + " " + what + " " + whom.me + " at time " + when + ".")
} // if
} // if
} // trace
} // Monitor object
|
NBKlepp/fda
|
scalation_1.2/src/main/scala/scalation/util/Monitor.scala
|
Scala
|
mit
| 1,915 |
import sbt._
import Keys._
import scala.language.implicitConversions
import scala.language.postfixOps
object Common {
def newProject(projectName: String, base: File): Project =
Project(projectName, base).settings(
name := projectName,
organization := "JetBrains",
scalaVersion := Versions.scalaVersion,
unmanagedSourceDirectories in Compile += baseDirectory.value / "src",
unmanagedSourceDirectories in Test += baseDirectory.value / "test",
unmanagedResourceDirectories in Compile += baseDirectory.value / "resources"
)
def newProject(projectName: String): Project =
newProject(projectName, file(projectName))
def unmanagedJarsFrom(sdkDirectory: File, subdirectories: String*): Classpath = {
val sdkPathFinder = subdirectories.foldLeft(PathFinder.empty) { (finder, dir) =>
finder +++ (sdkDirectory / dir)
}
(sdkPathFinder * globFilter("*.jar")).classpath
}
}
|
triggerNZ/intellij-scala
|
project/Common.scala
|
Scala
|
apache-2.0
| 936 |
package com.twitter.scalding.bdd
import cascading.flow.FlowException
import com.twitter.scalding.typed.TDsl
import com.twitter.scalding._
import org.scalatest.{ Matchers, WordSpec }
import scala.math._
import scala.collection.mutable
case class UserWithGender(name: String, gender: String)
case class UserWithAge(name: String, age: Int)
case class UserInfo(name: String, gender: String, age: Int)
case class EstimatedContribution(name: String, suggestedPensionContributionPerMonth: Double)
class TypedApiTest extends WordSpec with Matchers with TBddDsl {
"A test with a single source" should {
"accept an operation from working with a single tuple-typed pipe" in {
Given {
List(("Joe", "M", 40), ("Sarah", "F", 22))
} When {
in: TypedPipe[(String, String, Int)] =>
in.map[(String, Double)] { person =>
person match {
case (name, "M", age) => (name, (1000.0 / (72 - age)).toDouble)
case (name, _, age) => (name, (1000.0 / (80 - age)).toDouble)
}
}
} Then {
buffer: mutable.Buffer[(String, Double)] =>
buffer.toList shouldBe List(("Joe", 1000.0 / 32), ("Sarah", 1000.0 / 58))
}
}
"accept an operation from single case class-typed pipe" in {
Given {
List(UserInfo("Joe", "M", 40), UserInfo("Sarah", "F", 22))
} When {
in: TypedPipe[UserInfo] =>
in.map { person =>
person match {
case UserInfo(name, "M", age) => EstimatedContribution(name, (1000.0 / (72 - age)))
case UserInfo(name, _, age) => EstimatedContribution(name, (1000.0 / (80 - age)))
}
}
} Then {
buffer: mutable.Buffer[EstimatedContribution] =>
buffer.toList shouldBe List(EstimatedContribution("Joe", 1000.0 / 32), EstimatedContribution("Sarah", 1000.0 / 58))
}
}
}
"A test with a two sources" should {
"accept an operation from two tuple-typed pipes" in {
Given {
List(("Joe", "M"), ("Sarah", "F"))
} And {
List(("Joe", 40), ("Sarah", 22))
} When {
(gender: TypedPipe[(String, String)], age: TypedPipe[(String, Int)]) =>
gender
.group
.join(age.group)
.toTypedPipe
.map { value: (String, (String, Int)) =>
val (name, (gender, age)) = value
(name, gender, age)
}
} Then {
buffer: mutable.Buffer[(String, String, Int)] =>
buffer.toList shouldBe List(("Joe", "M", 40), ("Sarah", "F", 22))
}
}
"accept an operation from two case classes-typed pipes" in {
Given {
List(UserWithGender("Joe", "M"), UserWithGender("Sarah", "F"))
} And {
List(UserWithAge("Joe", 40), UserWithAge("Sarah", 22))
} When {
(gender: TypedPipe[UserWithGender], age: TypedPipe[UserWithAge]) =>
gender
.groupBy(_.name)
.join(age.groupBy(_.name))
.mapValues { value: (UserWithGender, UserWithAge) =>
val (withGender, withAge) = value
UserInfo(withGender.name, withGender.gender, withAge.age)
}
.values
} Then {
buffer: mutable.Buffer[UserInfo] =>
buffer.toList shouldBe List(UserInfo("Joe", "M", 40), UserInfo("Sarah", "F", 22))
}
}
}
"A test with a list of sources" should {
"Work as if combining the sources with the And operator but requires explicit cast of the input pipes" in {
GivenSources {
List(
List(UserWithGender("Joe", "M"), UserWithGender("Sarah", "F")),
List(UserWithAge("Joe", 40), UserWithAge("Sarah", 22)))
} When {
pipes: List[TypedPipe[_]] =>
val gender = pipes(0).asInstanceOf[TypedPipe[UserWithGender]]
val age = pipes(1).asInstanceOf[TypedPipe[UserWithAge]]
gender
.groupBy(_.name)
.join(age.groupBy(_.name))
.mapValues { value: (UserWithGender, UserWithAge) =>
val (withGender, withAge) = value
UserInfo(withGender.name, withGender.gender, withAge.age)
}
.values
} Then {
buffer: mutable.Buffer[UserInfo] =>
buffer.toList shouldBe List(UserInfo("Joe", "M", 40), UserInfo("Sarah", "F", 22))
}
}
"not checking the types of the sources and fail if any error occurs" in {
an[FlowException] should be thrownBy {
GivenSources {
List(
List(UserWithGender("Joe", "M"), UserWithGender("Sarah", "F")),
List(("Joe", 40), ("Sarah", 22)))
} When {
pipes: List[TypedPipe[_]] =>
val gender = pipes(0).asInstanceOf[TypedPipe[UserWithGender]]
val age = pipes(1).asInstanceOf[TypedPipe[UserWithAge]]
gender
.groupBy(_.name)
.join(age.groupBy(_.name))
.mapValues { value: (UserWithGender, UserWithAge) =>
val (withGender, withAge) = value
UserInfo(withGender.name, withGender.gender, withAge.age)
}
.values
} Then {
buffer: mutable.Buffer[UserInfo] =>
buffer.toList shouldBe List(UserInfo("Joe", "M", 40), UserInfo("Sarah", "F", 22))
}
}
}
"be created when adding a source to four sources" in {
Given {
List(("Joe", "user1"), ("Sarah", "user2"))
} And {
List(("user1", "M"), ("user2", "F"))
} And {
List(("user1", 40), ("user2", 22))
} And {
List(("user1", 1000l), ("user2", 800l))
} And {
List(("user1", true), ("user2", false))
} When {
pipes: List[TypedPipe[_]] =>
val withUserID = pipes(0).asInstanceOf[TypedPipe[(String, String)]]
val withGender = pipes(1).asInstanceOf[TypedPipe[(String, String)]]
val withAge = pipes(2).asInstanceOf[TypedPipe[(String, Int)]]
val withIncome = pipes(3).asInstanceOf[TypedPipe[(String, Long)]]
val withSmoker = pipes(4).asInstanceOf[TypedPipe[(String, Boolean)]]
withUserID
.swap.group
.join(withGender.group)
.join(withAge.group)
.join(withIncome.group)
.join(withSmoker.group)
.flatMapValues {
case ((((name: String, gender: String), age: Int), income: Long), smoker) =>
val lifeExpectancy = (gender, smoker) match {
case ("M", true) => 68
case ("M", false) => 72
case (_, true) => 76
case (_, false) => 80
}
Some(EstimatedContribution(name, floor(income / (lifeExpectancy - age))))
case _ => None
}
.values
} Then {
buffer: mutable.Buffer[EstimatedContribution] =>
buffer.toList shouldBe List(EstimatedContribution("Joe", 35.0), EstimatedContribution("Sarah", 13.0))
}
}
}
}
|
sriramkrishnan/scalding
|
scalding-core/src/test/scala/com/twitter/scalding/bdd/TypedApiTest.scala
|
Scala
|
apache-2.0
| 7,093 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Assert.assertEquals
import org.junit.Test
import java.io.File
class SourceFileParserTest extends AssertionsForJUnit {
@Test
def parseEmptyFile(): Unit = {
val configPath = "src/test/resources/config/scalastyle_config.xml"
val config = ScalastyleConfiguration.readFromXml(configPath)
val checks = config.checks.filter(_.enabled)
val sourcePath = new File("src/test/resources/testfiles/EmptyClass.scala")
val sourceFile = new DirectoryFileSpec(sourcePath.getAbsolutePath(), encoding = None, sourcePath.getAbsoluteFile())
val msgs = Checker.verifyFile(config, checks, sourceFile)
assertEquals(Nil, msgs)
}
}
|
firebase/scalastyle
|
src/test/scala/org/scalastyle/SourceFileParserTest.scala
|
Scala
|
apache-2.0
| 1,458 |
package st
import org.scalatest.FreeSpec
class FreeSpecTest extends FreeSpec {
"A Set" - {
"when empty" - {
"should have size 0" in {
assert(Set.empty.isEmpty)
}
"should produce NoSuchElementException when head is invoked" in {
intercept[NoSuchElementException] {
Set.empty.head
}
}
}
}
}
|
objektwerks/scala.test
|
src/test/scala/st/FreeSpecTest.scala
|
Scala
|
mit
| 361 |
package com.sksamuel.scrimage.filter
import org.scalatest.{ OneInstancePerTest, BeforeAndAfter, FunSuite }
import com.sksamuel.scrimage.Image
/** @author Stephen Samuel */
class EdgeFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = getClass.getResourceAsStream("/bird_small.png")
val expected = getClass.getResourceAsStream("/com/sksamuel/scrimage/filters/bird_small_edge.png")
test("filter output matches expected") {
assert(Image(original).filter(EdgeFilter) === Image(expected))
}
}
|
carlosFattor/scrimage
|
scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/EdgeFilterTest.scala
|
Scala
|
apache-2.0
| 541 |
package db.scalikejdbc
import org.intracer.wmua.cmd.SetCurrentRound
import org.specs2.mutable.Specification
class RoundSpec extends Specification with TestDb {
sequential
stopOnFail
"rounds" should {
"be empty" in {
withDb {
val rounds = roundDao.findAll()
rounds.size === 0
}
}
"insert round" in {
withDb {
val round = Round(None, 1, Some("Round 1"), 10, Set("jury"), 3, Round.ratesById(10), active = true, createdAt = now)
val created = roundDao.create(round)
val id = created.id
created === round.copy(id = id)
val found = roundDao.findById(id.get)
found === Some(created)
val all = roundDao.findAll()
all === Seq(created)
}
}
"available jurors" in {
withDb {
implicit val contest = createContests(10).head
val round = Round(None, 1, Some("Round 1"), 10, Set("jury"), 3, Round.ratesById(10), active = true)
roundDao.create(round)
val jurors = createUsers(1 to 3).map(u => u.copy(roles = u.roles + s"USER_ID_${u.getId}"))
val preJurors = createUsers("prejury", 11 to 13)
val orgCom = createUsers("organizer", 20)
val otherContestJurors = createUsers(31 to 33)(contest.copy(id = Some(20)), implicitly)
round.availableJurors === jurors
}
}
"set new current round" in {
withDb {
val contestDao = ContestJuryJdbc
val contest = contestDao.create(None, "WLE", 2015, "Ukraine", None, None, None)
val contestId = contest.getId
val createdAt = now
val round = roundDao.create(Round(None, 1, Some("Round 1"), contest.getId, Set("jury"), 0, Round.ratesById(10), createdAt = createdAt))
roundDao.findById(round.getId).map(_.active) === Some(false)
roundDao.activeRounds(contestId) === Seq.empty
contestDao.findById(contestId).get.currentRound === None
val activeRound = round.copy(active = true)
SetCurrentRound(contestId, None, activeRound).apply()
// TODO fix time issues
roundDao.findById(round.getId).map(_.copy(createdAt = createdAt)) === Some(activeRound.copy(createdAt = createdAt))
roundDao.activeRounds(contestId).map(_.copy(createdAt = createdAt)) === Seq(activeRound.copy(createdAt = createdAt))
contestDao.findById(contestId).get.currentRound === None
}
}
}
}
|
intracer/wlxjury
|
test/db/scalikejdbc/RoundSpec.scala
|
Scala
|
apache-2.0
| 2,424 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.flow.util
import org.argus.jawa.core.util._
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by fgwei on 5/18/17.
*/
class UtilTest extends FlatSpec with Matchers {
"TopologicalSort" should "work for Acyclic Graph" in {
var map: IMap[Int, ISet[Int]] = imapEmpty
map += 0 -> Set(1, 2)
map += 1 -> Set(2, 3)
map += 2 -> Set(3)
assert(TopologicalSortUtil.sort(map) == List(0, 1, 2, 3))
}
}
|
arguslab/Argus-SAF
|
jawa/src/test/scala/org/argus/jawa/flow/util/UtilTest.scala
|
Scala
|
apache-2.0
| 805 |
package org.jetbrains.plugins.scala.worksheet.settings.ui
import com.intellij.openapi.module.Module
import org.jetbrains.plugins.scala.worksheet.settings.WorksheetExternalRunType
private case class WorksheetSettingsData(
isInteractive: Boolean,
isMakeBeforeRun: Boolean,
runType: WorksheetExternalRunType,
cpModule: Module,
compilerProfile: String
)
|
JetBrains/intellij-scala
|
scala/worksheet/src/org/jetbrains/plugins/scala/worksheet/settings/ui/WorksheetSettingsData.scala
|
Scala
|
apache-2.0
| 362 |
package ch5
import scala.annotation.tailrec
import Stream._
object Exercise2 {
implicit class StreamExt[+A](val self: Stream[A]) extends AnyVal {
def take(n: Int): Stream[A] = self match {
case Cons(h, t) if n > 1 => cons(h(), t().take(n - 1))
case Cons(h, _) if n == 1 => cons(h(), empty)
case _ => empty
}
@tailrec def drop(n: Int): Stream[A] = self match {
case Cons(h, t) if n > 0 => t().drop(n - 1)
case _ => self
}
def toList: List[A] = self match {
case Empty => List.empty
case Cons(h, t) => h() :: t().toList
}
}
}
import Exercise2._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch5/Stream.scala
:load src/main/scala/fpinscala/ch5/Exercise2.scala
cons(1, cons(2, cons(3, empty))).take(2)
*/
|
rucka/fpinscala
|
src/main/scala/fpinscala/ch5/Exercise2.scala
|
Scala
|
gpl-2.0
| 805 |
package com.twitter.finagle.filter
import com.twitter.concurrent.AsyncSemaphore
import com.twitter.finagle._
import com.twitter.finagle.tracing.Annotation.BinaryAnnotation
import com.twitter.finagle.tracing.{BufferingTracer, Record, Trace}
import com.twitter.util.{Await, Future}
import org.scalatest.funsuite.AnyFunSuite
class RequestSemaphoreFilterTest extends AnyFunSuite {
def tracingAnnotations(tracer: BufferingTracer): Seq[(String, Any)] = {
tracer.iterator.toList collect {
case Record(_, _, BinaryAnnotation(k, v), _) => k -> v
}
}
test("mark dropped requests as rejected") {
val neverSvc = new Service[Int, Int] {
def apply(req: Int) = Future.never
}
val q = new AsyncSemaphore(1, 0)
val svc = new RequestSemaphoreFilter(q) andThen neverSvc
svc(1)
val f = intercept[Failure] { Await.result(svc(1)) }
assert(f.isFlagged(FailureFlags.Retryable))
}
test("service failures are not wrapped as rejected") {
val exc = new Exception("app exc")
val neverSvc = new Service[Int, Int] {
def apply(req: Int) = Future.exception(exc)
}
val q = new AsyncSemaphore(1, 0)
val svc = new RequestSemaphoreFilter(q) andThen neverSvc
svc(1)
val e = intercept[Exception] { Await.result(svc(1)) }
assert(e == exc)
}
test("annotates dropped requests") {
val tracer = new BufferingTracer()
Trace.letTracer(tracer) {
val neverSvc = new Service[Int, Int] {
def apply(req: Int) = Future.never
}
val q = new AsyncSemaphore(1, 0)
val svc = new RequestSemaphoreFilter(q) andThen neverSvc
svc(1)
val f = intercept[Failure] { Await.result(svc(1)) }
}
val expected = Seq(("clnt/RequestSemaphoreFilter_rejected", "Max waiters exceeded"))
assert(tracingAnnotations(tracer) == expected)
}
}
|
twitter/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/filter/RequestSemaphoreFilterTest.scala
|
Scala
|
apache-2.0
| 1,834 |
package models.quiz
import models.{QuestionId, SkillId}
case class Skill2Question(skillId: SkillId, questionId: QuestionId)
|
kristiankime/calc-tutor
|
app/models/quiz/Skill2Question.scala
|
Scala
|
mit
| 125 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.actorregistry
import akka.actor._
import akka.pattern.AskSupport
import akka.util.Timeout
import scala.concurrent.{ExecutionContext, Promise, Future}
import scala.concurrent.duration.FiniteDuration
import scala.language.existentials
import scala.reflect.ClassTag
import scala.util.Success
object ActorLookup {
/**
* Squbs API: Returns a ActorLookup instance that has default values except for responseClass
* which will be the supplied type
*/
def apply[T: ClassTag] = {
val responseClass = implicitly[ClassTag[T]].runtimeClass
new ActorLookup(responseClass = responseClass, explicitType = responseClass != classOf[Any])
}
/**
* Squbs API: Returns a ActorLookup instance that has default values except for :
* responseClass which will be the supplied type
* actorName will be the supplied actorName
*/
def apply[T: ClassTag](actorName: String) = {
val responseClass = implicitly[ClassTag[T]].runtimeClass
new ActorLookup(responseClass = responseClass, actorName=Some(actorName),
explicitType = responseClass != classOf[Any])
}
def apply() = new ActorLookup(classOf[Any], None, None, explicitType = false)
def apply(requestClass: Class[_]) = new ActorLookup(classOf[Any], Option(requestClass), None, explicitType = false)
def apply(requestClass: Option[Class[_]], actorName: Option[String]) =
new ActorLookup(classOf[Any], requestClass, actorName, explicitType = false)
/**
* Squbs API: Send msg with tell pattern to a corresponding actor based if the msg class type matching with an
* entry at Actor registry
*/
def !(msg: Any)(implicit sender: ActorRef = Actor.noSender, refFactory: ActorRefFactory) = tell(msg, sender)
/**
* Squbs API: Send msg with ask pattern to a corresponding actor based if the msg class type
* matching with an entry at Actor registry
*/
def ?(message: Any)(implicit timeout: Timeout, refFactory: ActorRefFactory): Future[Any] =
ask(message)(timeout, refFactory)
/**
* Squbs API: Send msg with tell pattern to a corresponding actor based if the msg class type matching with an
* entry at Actor registry.
*/
def tell(msg: Any, sender: ActorRef)(implicit refFactory: ActorRefFactory) = ActorLookup().tell(msg, sender)
/**
* Squbs API: Send msg with ask pattern to a corresponding actor based if the msg class type matching with an
* entry at Actor registry.
*/
def ask(msg: Any)(implicit timeout: Timeout, refFactory: ActorRefFactory): Future[Any] =
ActorLookup().ask(msg)(timeout, refFactory)
}
case class ActorNotFound(actorLookup: ActorLookup[_]) extends RuntimeException("Actor not found for: " + actorLookup)
/**
* Construct an [[org.squbs.actorregistry.ActorLookup]] from the requestClass, responseClass, actor name
*/
private[actorregistry] case class ActorLookup[T](responseClass: Class[T],
requestClass: Option[Class[_]] = None,
actorName: Option[String] = None,
explicitType: Boolean = false) extends AskSupport{
/**
* Squbs API: Send msg with tell pattern to a corresponding actor based if requestClass(msg's class type),
* responseClass, actorName matching with an entry at Actor registry
*/
def !(msg: Any)(implicit sender: ActorRef = Actor.noSender, refFactory: ActorRefFactory) = tell(msg, sender)
/**
* Squbs API: Send msg with ask pattern to a corresponding actor based if requestClass(msg's class type),
* responseClass, actorName matching with an entry at Actor registry
*/
def ?(message: Any)(implicit timeout: Timeout, refFactory: ActorRefFactory): Future[T] =
ask(message)(timeout, refFactory)
/**
* Squbs API: Send msg with tell pattern to a corresponding actor based if requestClass(msg's class type),
* responseClass, actorName matching with an entry at Actor registry
*/
def tell(msg: Any, sender: ActorRef)(implicit refFactory: ActorRefFactory) =
refFactory.actorSelection(ActorRegistry.path)
.tell(ActorLookupMessage(copy(requestClass= Some(msg.getClass)), msg), sender)
/**
* Squbs API: Send msg with ask pattern to a corresponding actor based if requestClass(msg's class type),
* responseClass, actorName matching with an entry at Actor registry
*/
def ask(msg: Any)(implicit timeout: Timeout, refFactory: ActorRefFactory ): Future[T] = {
val f = refFactory.actorSelection(ActorRegistry.path)
.ask(ActorLookupMessage(copy(requestClass = Some(msg.getClass)), msg))
import refFactory.dispatcher
mapFuture(f, responseClass)
}
private def mapFuture[U](f: Future[Any], responseType: Class[U])(implicit ec: ExecutionContext): Future[U] = {
val boxedClass = {
if (responseType.isPrimitive) toBoxed(responseType) else responseType
}
require(boxedClass ne null)
f.map(v => boxedClass.cast(v).asInstanceOf[U])
}
private val toBoxed = Map[Class[_], Class[_]](
classOf[Boolean] -> classOf[java.lang.Boolean],
classOf[Byte] -> classOf[java.lang.Byte],
classOf[Char] -> classOf[java.lang.Character],
classOf[Short] -> classOf[java.lang.Short],
classOf[Int] -> classOf[java.lang.Integer],
classOf[Long] -> classOf[java.lang.Long],
classOf[Float] -> classOf[java.lang.Float],
classOf[Double] -> classOf[java.lang.Double],
classOf[Unit] -> classOf[scala.runtime.BoxedUnit]
)
/**
* Squbs API: return a ActorRef if there is requestClass(msg's class type), responseClass, actorName matching
* with an entry at Actor registry
*/
def resolveOne(timeout: FiniteDuration)(implicit refFactory: ActorRefFactory): Future[ActorRef] =
resolveOne()(timeout, refFactory)
def resolveOne()(implicit timeout: Timeout, refFactory: ActorRefFactory) : Future[ActorRef] = {
val p = Promise[ActorRef]()
this match {
case ActorLookup(_, None, None, false) =>
p.failure(org.squbs.actorregistry.ActorNotFound(this))
case _ =>
import refFactory.dispatcher
refFactory.actorSelection(ActorRegistry.path) ? ActorLookupMessage(this, Identify("ActorLookup")) onComplete {
case Success(ActorIdentity(_, Some(ref))) =>
p.success(ref)
case _ =>
p.failure(org.squbs.actorregistry.ActorNotFound(this))
}
}
p.future
}
}
|
Harikiranvuyyuru/squbs
|
squbs-actorregistry/src/main/scala/org/squbs/actorregistry/ActorLookup.scala
|
Scala
|
apache-2.0
| 7,040 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.server.interactive
import java.util.concurrent.TimeUnit
import com.cloudera.hue.livy.msgs.ExecuteRequest
import com.cloudera.hue.livy.sessions.{Idle, Starting}
import org.json4s.{DefaultFormats, Extraction}
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
abstract class BaseSessionSpec extends FunSpec with Matchers with BeforeAndAfter {
implicit val formats = DefaultFormats
var session: InteractiveSession = null
def createSession(): InteractiveSession
before {
session = createSession()
}
after {
session.stop()
}
describe("A spark session") {
it("should start in the starting or idle state") {
session.state should (equal (Starting()) or equal (Idle()))
}
it("should eventually become the idle state") {
session.waitForStateChange(Starting(), Duration(30, TimeUnit.SECONDS))
session.state should equal (Idle())
}
it("should execute `1 + 2` == 3") {
session.waitForStateChange(Starting(), Duration(30, TimeUnit.SECONDS))
val stmt = session.executeStatement(ExecuteRequest("1 + 2"))
val result = Await.result(stmt.output(), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "ok",
"execution_count" -> 0,
"data" -> Map(
"text/plain" -> "res0: Int = 3"
)
))
result should equal (expectedResult)
}
it("should report an error if accessing an unknown variable") {
session.waitForStateChange(Starting(), Duration(30, TimeUnit.SECONDS))
val stmt = session.executeStatement(ExecuteRequest("x"))
val result = Await.result(stmt.output(), Duration.Inf)
val expectedResult = Extraction.decompose(Map(
"status" -> "error",
"execution_count" -> 0,
"ename" -> "Error",
"evalue" ->
"""<console>:8: error: not found: value x
| x
| ^""".stripMargin
))
result should equal (expectedResult)
}
}
}
|
vmanoria/bluemix-hue-filebrowser
|
hue-3.8.1-bluemix/apps/spark/java/livy-server/src/test/scala/com/cloudera/hue/livy/server/interactive/BaseSessionSpec.scala
|
Scala
|
gpl-2.0
| 2,903 |
/*******************************************************************************
Copyright (c) 2013-2014, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml5
import scala.collection.mutable.{Map=>MMap, HashMap=>MHashMap}
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr}
import kr.ac.kaist.jsaf.analysis.typing.{ControlPoint, Helper, PreHelper, Semantics}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import scala.Some
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
// Modeled based on WHATWG HTML Living Standard
// Section 11.2.1 The Storage Interface.
object Storage extends DOM {
private val name = "Storage"
/* predefined locations */
val loc_cons = newSystemRecentLoc(name + "Cons")
val loc_ins = newSystemRecentLoc(name + "Ins")
// for window.sessionStorage
val loc_ins2 = newSystemRecentLoc(name + "Ins2")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Function")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* instant object */
private val prop_ins: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// property
("length", AbsConstValue(PropValue(ObjectValue(Value(UInt), F, F, F)))),
(Str_default_number, AbsConstValue(PropValue(ObjectValue(Value(StrTop), F, F, F)))),
(Str_default_other, AbsConstValue(PropValue(ObjectValue(Value(StrTop), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(ObjProtoLoc, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// API
("key", AbsBuiltinFunc("Storage.key", 1)),
("getItem", AbsBuiltinFunc("Storage.getItem", 1)),
("setItem", AbsBuiltinFunc("Storage.setItem", 2)),
("removeItem", AbsBuiltinFunc("Storage.removeItem", 1)),
("clear", AbsBuiltinFunc("Storage.clear", 0))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
(name, AbsConstValue(PropValue(ObjectValue(loc_cons, T, F, T))))
)
/* initial property list */
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_ins, prop_ins), (loc_ins2, prop_ins), (loc_proto, prop_proto), (loc_cons, prop_cons), (GlobalLoc, prop_global)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("Storage.key" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val n_index = Helper.toNumber(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "0")))
if (n_index </ NumBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val n_length = lset_this.foldLeft[AbsNumber](NumBot)((n, l) =>
n + Helper.toNumber(Helper.toPrimitive_better(h, Helper.Proto(h, l, AbsString.alpha("length")))))
val s_index = Helper.toString(PValue(n_index))
// Returns the indexth item in the collection.
val v_return = lset_this.foldLeft(ValueBot)((v, l) => v + Helper.Proto(h, l, s_index))
// If index is greater than or equal to the number of nodes in the list, this returns null.
val v_null = AbsDomUtils.checkIndex(n_index, n_length)
((Helper.ReturnStore(h, v_return + v_null), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
})),
("Storage.getItem" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val key = Helper.toString(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "0")))
if (key </ StrBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val value = lset_this.foldLeft(ValueBot)((v, l) =>{
val v_t = if (BoolTrue <= Helper.HasOwnProperty(h, l, key))
Helper.Proto(h, l, key)
else ValueBot
val v_f = if (BoolFalse <= Helper.HasOwnProperty(h, l, key))
Value(NullTop)
else ValueBot
v+v_t+v_f
})
((Helper.ReturnStore(h, value), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
})),
("Storage.setItem" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val key = Helper.toString(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "0")))
val value = Helper.toString(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "1")))
if (key </ StrBot && value </ StrBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val h_1 = lset_this.foldLeft(h)((_h, l) =>
Helper.PropStore(_h, l, key, Value(value)))
((Helper.ReturnStore(h_1, Value(UndefTop)), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
})),
("Storage.removeItem" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
/* arguments */
val key = Helper.toString(Helper.toPrimitive_better(h, getArgValue(h, ctx, args, "0")))
if (key </ StrBot) {
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val h_1 = lset_this.foldLeft(h)((_h, l) =>
Helper.Delete(_h, l, key)._1)
((Helper.ReturnStore(h_1, Value(UndefTop)), ctx), (he, ctxe))
}
else
((HeapBot, ContextBot), (he, ctxe))
}))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
)
}
/* instance */
def getInstance(): Option[Loc] = Some (loc_ins)
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMHtml5/Storage.scala
|
Scala
|
bsd-3-clause
| 7,368 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
class PushbackIterator[T](it: Iterator[T]) extends Iterator[T] {
private var pushedbackValue: Option[T] = None
override def hasNext: Boolean = pushedbackValue.isDefined || it.hasNext
override def next(): T = pushedbackValue match {
case None => it.next()
case Some(value) =>
pushedbackValue = None
value
}
def pushback(value: T): Unit = pushedbackValue = Some(value)
}
|
ryez/gatling
|
gatling-commons/src/main/scala/io/gatling/commons/util/PushbackIterator.scala
|
Scala
|
apache-2.0
| 1,054 |
/*
* Copyright 2018 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
package scalatags
import _root_.scalatags.generic.Frag
import org.http4s.Charset.`UTF-8`
import org.http4s.headers.`Content-Type`
trait ScalatagsInstances {
implicit def scalatagsEncoder[C <: Frag[_, String]](implicit
charset: Charset = `UTF-8`
): EntityEncoder.Pure[C] =
contentEncoder(MediaType.text.html)
private def contentEncoder[C <: Frag[_, String]](
mediaType: MediaType
)(implicit charset: Charset): EntityEncoder.Pure[C] =
EntityEncoder.stringEncoder
.contramap[C](content => content.render)
.withContentType(`Content-Type`(mediaType, charset))
}
|
http4s/http4s
|
scalatags/src/main/scala/org/http4s/scalatags/ScalatagsInstances.scala
|
Scala
|
apache-2.0
| 1,214 |
/*
* Copyright 2014 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.redis
import com.twitter.app.App
import com.twitter.util.Await.result
import com.twitter.zipkin.redis.RedisSpanStoreFactory
import org.scalatest.FunSuite
class RedisSpanStoreSpec extends FunSuite {
object RedisStore extends App with RedisSpanStoreFactory
RedisStore.main(Array(
"-zipkin.storage.redis.host", "127.0.0.1",
"-zipkin.storage.redis.port", "6379"))
test("validate") {
val spanStore = RedisStore.newRedisSpanStore()
result(spanStore.clear())
}
}
|
zhoffice/zipkin
|
zipkin-redis/src/test/scala/com/twitter/zipkin/storage/redis/RedisSpanStoreSpec.scala
|
Scala
|
apache-2.0
| 1,117 |
package scala.collection.immutable
/**
* This class allows to create custom integer values that have a
* hash code that differs from the value itself.
*
* The main use-case for this class is to simulate hash-collisions
* when testing collection data structures.
*/
class CustomHashInt(val value: Int, val hash: Int) {
override def hashCode: Int = hash
override def equals(other: Any): Boolean = other match {
case that: CustomHashInt =>
(this eq that) || hash == that.hash && value == that.value
case _ => false
}
override def toString: String = s"$value, [hash = $hash]"
}
object CustomHashInt {
def apply(value: Int, hash: Int) = new CustomHashInt(value, hash)
}
|
scala/scala
|
test/junit/scala/collection/immutable/CustomHashInt.scala
|
Scala
|
apache-2.0
| 709 |
package com.nikolastojiljkovic.quilltrait
import io.getquill.{ MysqlAsyncContext, MysqlEscape }
package object mysql {
object testContext extends MysqlAsyncContext[MysqlEscape]("testMysqlDB") with AnnotatedTraitSupport
}
|
nstojiljkovic/quill-trait
|
quill-trait-core/jvm/src/test/scala/com/nikolastojiljkovic/quilltrait/mysql/package.scala
|
Scala
|
apache-2.0
| 225 |
/*
* Copyright (c) 2015. Paweł Cesar Sanjuan Szklarz.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.pmsoft.sam.see
import io.netty.util.internal.logging.{Slf4JLoggerFactory, InternalLoggerFactory}
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.concurrent.Future
import java.net.InetSocketAddress
import org.slf4j.LoggerFactory
import eu.pmsoft.sam.model._
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, LinkedBlockingQueue}
import eu.pmsoft.sam.transport._
import eu.pmsoft.sam.idgenerator.LongLongID
import eu.pmsoft.sam.model.CanonicalProtocolMethodCallProto.CallType
import eu.pmsoft.sam.model.CanonicalProtocolInstanceProto.InstanceType
import com.google.inject.Key
import java.io._
import com.google.protobuf.ByteString
import java.lang.annotation.Annotation
import eu.pmsoft.sam.model.ClientFilledDataInstance
import eu.pmsoft.sam.model.ClientDataInstance
import eu.pmsoft.sam.model.InterfaceMethodCall
import scala.Some
import eu.pmsoft.sam.model.ExposedServiceTransaction
import eu.pmsoft.sam.model.ServerReturnBindingKeyInstance
import eu.pmsoft.sam.model.ServiceInstanceURL
import eu.pmsoft.sam.model.ClientPendingDataInstance
import eu.pmsoft.sam.model.ServerExternalInstanceBinding
import eu.pmsoft.sam.model.VoidMethodCall
import eu.pmsoft.sam.model.ServerBindingKeyInstance
import eu.pmsoft.sam.model.ServerDataInstance
import eu.pmsoft.sam.model.ClientExternalInstanceBinding
import eu.pmsoft.sam.model.ServerPendingDataInstance
import eu.pmsoft.sam.model.ServerFilledDataInstance
import eu.pmsoft.sam.model.ReturnMethodCall
import eu.pmsoft.sam.model.ClientBindingKeyInstance
import eu.pmsoft.sam.model.ClientReturnBindingKeyInstance
import java.util.concurrent.atomic.AtomicBoolean
object CanonicalTransportLayer {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
private val EnvProtoCodec = TransportCodecBuilder.clientProtobuff[SamEnvironmentAction, SamEnvironmentResult](SamEnvironmentResult.getDefaultInstance)
def createClientDispatcher(address: InetSocketAddress): Future[RPCDispatcher[SamEnvironmentAction, SamEnvironmentResult]] = {
val client = ClientConnector[SamEnvironmentAction, SamEnvironmentResult](EnvProtoCodec)
client.connect(address) map {
transport => RPCDispatcher.clientDispatcher[SamEnvironmentAction, SamEnvironmentResult](transport)
}
}
}
case class CanonicalProtocolMessage(instances: Seq[CanonicalProtocolInstance], calls: Seq[CanonicalProtocolMethodCall], closeHomotopy: Boolean)
private[see] class CanonicalTransportServer(val logic: EnvironmentExternalApiLogic, val port: Int) extends SamEnvironmentExternalConnector {
val logger = LoggerFactory.getLogger(this.getClass)
val serverAddress = new InetSocketAddress(port)
private val server = ServerConnector[SamEnvironmentAction, SamEnvironmentResult](
serverAddress,
TransportCodecBuilder.serverProtobuff[SamEnvironmentAction, SamEnvironmentResult](SamEnvironmentAction.getDefaultInstance),
onConnection
)
val channel = server.bind
def onConnection(transport: Transport[SamEnvironmentResult, SamEnvironmentAction]) {
RPCDispatcher.serverDispatcher[SamEnvironmentAction, SamEnvironmentResult](transport, logic.handle _)
}
def createUrl(configurationId: ServiceConfigurationID): ServiceInstanceURL = {
logger.trace("createUrl {}", configurationId)
ServiceInstanceURL(new java.net.URL("http", serverAddress.getHostName, serverAddress.getPort, s"/service/${configurationId.id}"))
}
val dispatchers: ConcurrentMap[InetSocketAddress, Future[RPCDispatcher[SamEnvironmentAction, SamEnvironmentResult]]] = new ConcurrentHashMap()
private def getClientDispatcher(address: InetSocketAddress) = {
dispatchers.putIfAbsent(address, CanonicalTransportLayer.createClientDispatcher(address))
dispatchers.get(address)
}
def onEnvironment(address: InetSocketAddress): SamEnvironmentExternalApi = new EnvironmentConnection(logic, getClientDispatcher(address))
def getConnectionFromServiceURL(surl: ServiceInstanceURL): SamEnvironmentExternalApi = {
onEnvironment(surl)
}
private implicit def addressFromURL(surl: ServiceInstanceURL): InetSocketAddress = new InetSocketAddress(surl.url.getHost, surl.url.getPort)
def openExecutionPipe(pipeID: PipeIdentifier, remoteEndpointRef: PipeReference): TransportPipe[ThreadMessage] = new RPCBasedTransportPipe(pipeID, remoteEndpointRef, getClientDispatcher(remoteEndpointRef.address))
def openLoopbackPipe(pipeID: PipeIdentifier, remoteEndpointRef: PipeReference): LoopTransportPipe[ThreadMessage] = new RPCBasedLoopTransportPipe(pipeID, remoteEndpointRef, getClientDispatcher(remoteEndpointRef.address))
}
private class EnvironmentConnection(val logic: EnvironmentExternalApiLogic, val dispatcher: Future[RPCDispatcher[SamEnvironmentAction, SamEnvironmentResult]]) extends SamEnvironmentExternalApi {
import ProtocolTransformations._
def getArchitectureSignature(): Future[String] = dispatcher flatMap {
disp =>
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.ARCHITECTURE_INFO)
disp.dispatch(action)
} map {
_.`architectureInfoSignature`.get
}
def ping(): Future[Boolean] = dispatcher flatMap {
disp =>
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.PING)
disp.dispatch(action)
} map {
res => true
}
def getExposedServices: Future[Seq[ExposedServiceTransaction]] = dispatcher flatMap {
disp =>
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.SERVICE_LIST)
disp.dispatch(action)
} map {
res =>
res.`serviceList` map {
ref =>
ExposedServiceTransaction(logic.createUrl(ref.`url`), logic.findServiceContract(ref.`contract`))
}
}
def registerTransactionRemote(globalTransactionID: GlobalTransactionIdentifier, headConnectionPipe: PipeReference, service: ExposedServiceTransaction): Future[TransactionBindRegistration] = dispatcher flatMap {
disp =>
val registerData = SamTransactionRegistration.getDefaultInstance.copy(globalTransactionID.globalID: LongLongIDProto, mapPipeRef(headConnectionPipe), ProtocolTransformations.serviceReference(service))
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.REGISTER_TRANSACTION, Some(registerData))
disp.dispatch(action)
} map {
res => {
val data = res.`registrationConfirmation`.get
val p = data.`headPipeRef`
val pipeRef = PipeReference(ThreadExecutionIdentifier(data.`transactionBindId`), PipeIdentifier(p.`pipeRefId`), logic.createUrl(p.`address`))
TransactionBindRegistration(globalTransactionID, service.url, ThreadExecutionIdentifier(data.`transactionBindId`), pipeRef)
}
}
def unRegisterTransactionRemote(tid: GlobalTransactionIdentifier): Future[Boolean] = dispatcher flatMap {
disp =>
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.UNREGISTER_TRANSACTION, None, Some(tid.globalID))
disp.dispatch(action)
} map {
res => true
}
}
private abstract class QueuedTransportPipe[T] extends TransportPipe[T] {
private val queue = new LinkedBlockingQueue[T]()
def waitResponse: T = queue.take()
def pollResponse: Option[T] = Option(queue.poll())
def receiveMessage(message: T) {
queue.add(message)
}
}
private class RPCBasedLoopTransportPipe(val pipeID: PipeIdentifier, val remoteEndpointRef: PipeReference, val dispatcher: Future[RPCDispatcher[SamEnvironmentAction, SamEnvironmentResult]]) extends LoopTransportPipe[ThreadMessage] {
private val inputPipe = new RPCBasedTransportPipe(pipeID, remoteEndpointRef, dispatcher)
private val loopPipe = new RPCBasedTransportPipe(pipeID, remoteEndpointRef, dispatcher)
def getInputPipe: TransportPipe[ThreadMessage] = inputPipe
def getLoopBackPipe: TransportPipe[ThreadMessage] = loopPipe
}
private class RPCBasedTransportPipe(val pipeID: PipeIdentifier, val remoteEndpointRef: PipeReference, val dispatcher: Future[RPCDispatcher[SamEnvironmentAction, SamEnvironmentResult]]) extends QueuedTransportPipe[ThreadMessage] {
val logger = LoggerFactory.getLogger(this.getClass)
private val waitingForResponse = new AtomicBoolean(false)
override def receiveMessage(message: ThreadMessage) {
waitingForResponse.set(false)
super.receiveMessage(message)
}
def sendMessage(message: ThreadMessage): Unit = {
waitingForResponse.set(true)
transport(message)
}
import ThreadMessageToProtoMapper._
def transport(message: ThreadMessage): Future[Unit] = {
val sended = dispatcher flatMap {
disp =>
logger.debug("sending message to direction {}", remoteEndpointRef.address)
val action = SamEnvironmentAction.getDefaultInstance.copy(SamEnvironmentCommandType.PROTOCOL_EXECUTION_MESSAGE).setMessage(toProto(pipeID, remoteEndpointRef, message))
disp.dispatch(action)
} map {
res => ()
}
sended onFailure {
case t => logger.error("error on send: {}", t)
}
sended
}
def isWaitingForMessage: Boolean = waitingForResponse.get()
def getPipeID: PipeIdentifier = pipeID
}
object ThreadMessageToProtoMapper {
import ProtocolTransformations._
def toProto(sourcePipeID: PipeIdentifier, remotePipeRef: PipeReference, message: ThreadMessage): CanonicalRequestProto = {
val data = message.data.get
val calls = data.calls.map {
mapCall _
}
val instances = data.instances.map {
mapInstance
}
CanonicalRequestProto.getDefaultInstance.copy(sourcePipeID.pid, remotePipeRef, calls.toVector, instances.toVector, data.closeThread)
}
implicit def mapCall(call: CanonicalProtocolMethodCall): CanonicalProtocolMethodCallProto = {
val ctype = call match {
case VoidMethodCall(instanceNr, methodSignature, arguments) => CallType.VOID_CALL
case InterfaceMethodCall(instanceNr, methodSignature, arguments, returnNr) => CallType.INTERFACE_CALL
case ReturnMethodCall(instanceNr, methodSignature, arguments, returnNr) => CallType.RETURN_CALL
}
CanonicalProtocolMethodCallProto.getDefaultInstance.copy(call.instanceNr, call.methodSignature, call.returnInstance, call.arguments.toVector, ctype)
}
implicit def mapCallProto(callp: CanonicalProtocolMethodCallProto): CanonicalProtocolMethodCall = {
callp.`callType` match {
case CallType.VOID_CALL => VoidMethodCall(callp.`instanceNr`, callp.`methodSignature`, callp.`argumentsReference`.toArray)
case CallType.INTERFACE_CALL => InterfaceMethodCall(callp.`instanceNr`, callp.`methodSignature`, callp.`argumentsReference`.toArray, callp.`returnInstanceId`.get)
case CallType.RETURN_CALL => ReturnMethodCall(callp.`instanceNr`, callp.`methodSignature`, callp.`argumentsReference`.toArray, callp.`returnInstanceId`.get)
case _ => ??? // this should not happen
}
}
implicit def mapInstance(ins: CanonicalProtocolInstance): CanonicalProtocolInstanceProto = {
val data = ins match {
case ClientBindingKeyInstance(instanceNr, key) => (ins, InstanceType.CLIENT_BINDING_KEY, None)
case ClientReturnBindingKeyInstance(instanceNr, key) => (ins, InstanceType.CLIENT_RETURN_BINDING_KEY, None)
case ClientExternalInstanceBinding(instanceNr, key) => (ins, InstanceType.CLIENT_EXTERNAL_INSTANCE_BINDING, None)
case ClientPendingDataInstance(instanceNr, key) => (ins, InstanceType.CLIENT_PENDING_DATA_INSTANCE, None)
case ClientFilledDataInstance(instanceNr, key, data) => (ins, InstanceType.CLIENT_FILLED_DATA_INSTANCE, Some(data))
case ClientDataInstance(instanceNr, key, data) => (ins, InstanceType.CLIENT_DATA_INSTANCE, Some(data))
case ServerBindingKeyInstance(instanceNr, key) => (ins, InstanceType.SERVER_BINDING_KEY, None)
case ServerReturnBindingKeyInstance(instanceNr, key) => (ins, InstanceType.SERVER_RETURN_BINDING_KEY, None)
case ServerExternalInstanceBinding(instanceNr, key) => (ins, InstanceType.SERVER_EXTERNAL_INSTANCE_BINDING, None)
case ServerPendingDataInstance(instanceNr, key) => (ins, InstanceType.SERVER_PENDING_DATA_INSTANCE, None)
case ServerFilledDataInstance(instanceNr, key, data) => (ins, InstanceType.SERVER_FILLED_DATA_INSTANCE, Some(data))
case ServerDataInstance(instanceNr, key, data) => (ins, InstanceType.SERVER_DATA_INSTANCE, Some(data))
}
CanonicalProtocolInstanceProto.getDefaultInstance.copy(ins.instanceNr, data._2, ins.key, data._3 map dataMapper _)
}
implicit def mapInstanceProto(insp: CanonicalProtocolInstanceProto): CanonicalProtocolInstance = {
insp.`instanceType` match {
case InstanceType.CLIENT_BINDING_KEY => ClientBindingKeyInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.CLIENT_RETURN_BINDING_KEY => ClientReturnBindingKeyInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.CLIENT_EXTERNAL_INSTANCE_BINDING => ClientExternalInstanceBinding(insp.`instanceNr`, insp.`key`)
case InstanceType.CLIENT_PENDING_DATA_INSTANCE => ClientPendingDataInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.CLIENT_FILLED_DATA_INSTANCE => ClientFilledDataInstance(insp.`instanceNr`, insp.`key`, insp.`dataObjectSerialization`.get)
case InstanceType.CLIENT_DATA_INSTANCE => ClientDataInstance(insp.`instanceNr`, insp.`key`, insp.`dataObjectSerialization`.get)
case InstanceType.SERVER_BINDING_KEY => ServerBindingKeyInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.SERVER_RETURN_BINDING_KEY => ServerReturnBindingKeyInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.SERVER_EXTERNAL_INSTANCE_BINDING => ServerExternalInstanceBinding(insp.`instanceNr`, insp.`key`)
case InstanceType.SERVER_PENDING_DATA_INSTANCE => ServerPendingDataInstance(insp.`instanceNr`, insp.`key`)
case InstanceType.SERVER_FILLED_DATA_INSTANCE => ServerFilledDataInstance(insp.`instanceNr`, insp.`key`, insp.`dataObjectSerialization`.get)
case InstanceType.SERVER_DATA_INSTANCE => ServerDataInstance(insp.`instanceNr`, insp.`key`, insp.`dataObjectSerialization`.get)
case _ => ??? // This should not happen
}
}
implicit def keyMapper(key: Key[_]): BindingKey = {
BindingKey(
key.getTypeLiteral.toString,
Option(key.getAnnotationType()).map(_.toString),
Option(key.getAnnotation).map(ano => dataMapper(ano.asInstanceOf[java.io.Serializable]))
)
}
implicit def keyMapperProto(keyp: BindingKey): Key[_] = {
val ctype = Class.forName(keyp.`type`)
val annotationType = keyp.`annotationType` map {
Class.forName(_).asSubclass(classOf[Annotation])
}
val annotation = keyp.`annotationInstance` map {
data => dataMapperProto(data).asInstanceOf[Annotation]
}
annotation match {
case Some(a) => Key.get(ctype, a)
case None => annotationType match {
case Some(at) => Key.get(ctype, at)
case None => Key.get(ctype)
}
}
}
implicit def dataMapperProto(data: com.google.protobuf.ByteString): java.io.Serializable = {
val s: InputStream = new ByteArrayInputStream(data.toByteArray)
val instr = new ObjectInputStream(s)
try {
val res = instr.readObject()
res.asInstanceOf[java.io.Serializable]
} finally {
instr.close()
}
}
implicit def dataMapper(serializable: java.io.Serializable): com.google.protobuf.ByteString = {
val bos = new ByteArrayOutputStream()
val out = new ObjectOutputStream(bos)
try {
out.writeObject(serializable)
ByteString.copyFrom(bos.toByteArray)
} catch {
case e: Throwable => ???
} finally {
out.close()
bos.close()
}
}
implicit def fromProto(proto: CanonicalRequestProto, urlMap: String => ServiceInstanceURL): MessageRoutingInformation = {
val cs = proto.`calls` map {
mapCallProto _
}
val ins = proto.`instances` map {
mapInstanceProto _
}
val data = Some(CanonicalRequest(ins, cs, proto.`closeThread`))
val p = proto.`remotePipeRef`
val pipeRef = PipeReference(ThreadExecutionIdentifier(p.`transactionBindId`), PipeIdentifier(p.`pipeRefId`), urlMap(p.`address`))
MessageRoutingInformation(PipeIdentifier(proto.`sourcePipeId`), pipeRef, ThreadMessage(data, None))
}
}
// TODO make this class private
object ProtocolTransformations {
implicit def mapPipeRef(p: PipeReference): PipeReferenceProto = PipeReferenceProto.getDefaultInstance.copy(p.transactionBindID.tid, p.pipeID.pid, p.address.url.toExternalForm)
implicit def mapExposedTo(ref: ExposedServiceTransaction): SamExposedServiceReference = SamExposedServiceReference(ref.url.url.toExternalForm, ref.contract.signature.getCanonicalName)
implicit def longLongIdProto(llid: LongLongID): LongLongIDProto = LongLongIDProto.getDefaultInstance.copy(llid.getMark, llid.getLinear)
implicit def longLongId(pllid: LongLongIDProto): LongLongID = new LongLongID(pllid.`mask`, pllid.`linear`)
// FIXME security, contract mapping is using strig<->class, change to generated ids from architecture
implicit def serviceReference(ref: ExposedServiceTransaction): SamExposedServiceReference = SamExposedServiceReference.getDefaultInstance.copy(ref.url.url.toExternalForm, ref.contract.signature.getCanonicalName)
}
|
paweld2/Service-Architecture-Model
|
infrastructure/src/main/scala/eu/pmsoft/sam/see/CanonicalTransportLayer.scala
|
Scala
|
apache-2.0
| 17,902 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.agent.notifications
import net.lshift.diffa.kernel.config.User
import net.lshift.diffa.kernel.notifications.{NotificationEvent, NotificationProvider}
import org.slf4j.{Logger, LoggerFactory}
import javax.mail.internet.{MimeMessage, InternetAddress}
import javax.mail.{MessagingException, Message, Session}
import org.antlr.stringtemplate.StringTemplateGroup
import java.net.{URISyntaxException, URI}
class SmtpNotifier(val session:Session,
val ctx:String,
val host:String) extends NotificationProvider {
val log:Logger = LoggerFactory.getLogger(getClass)
val templateGroup = new StringTemplateGroup("mail")
val bodyMaster = templateGroup.getInstanceOf("notifications/mail_body")
val subjectMaster = templateGroup.getInstanceOf("notifications/mail_subject")
var url:String = null
try {
url = new URI("http://" + host + "/" + ctx).toString
}
catch {
case e:URISyntaxException => log.error("Could not setup link, host = " + host + "; ctx = " + ctx)
}
log.debug("Setting the notification URL to: " + url)
def notify(event:NotificationEvent, user:User) = {
log.debug("Sending notification about " + event + " to " + user)
try {
val subject = subjectMaster.getInstanceOf
subject.setAttribute("pairKey", event.id.pair.key)
val body = bodyMaster.getInstanceOf
body.setAttribute("pairKey", event.id.pair.key)
body.setAttribute("entityId", event.id.id)
body.setAttribute("timestamp", event.lastUpdated.toString())
body.setAttribute("upstream", event.upstreamVsn)
body.setAttribute("downstream", event.downstreamVsn)
body.setAttribute("url", url)
var to = new InternetAddress(user.email)
val message = new MimeMessage(session)
message.addRecipient(Message.RecipientType.TO, to)
message.setSubject(subject.toString)
message.setText(body.toString)
session.getTransport.sendMessage(message, message.getAllRecipients)
}
catch {
case m:MessagingException => {
log.error("SMTP error: " + m.getMessage)
}
case n:NoSuchFieldException => {
log.error("ST error: " + n.getMessage)
val clazz = classOf[NotificationEvent]
try {
val field = clazz.getField("id")
}
catch {
case e:Exception => {
log.error("What is going on here?", e)
}
}
}
case e:Exception => {
log.error("Unknown error", e)
}
}
}
}
|
aprescott/diffa
|
agent/src/main/scala/net/lshift/diffa/agent/notifications/SmtpNotifier.scala
|
Scala
|
apache-2.0
| 3,259 |
package tastytest
import scala.collection.mutable
import tastytest.Suite.Context
import scala.util.control.NonFatal
import scala.reflect.ClassTag
class Suite(val name: String) {
private[this] val counts = mutable.Map.empty[String, Int]
private[this] val tests = mutable.ArrayBuffer.empty[(Context, () => Unit)]
val reps: Int = 1
def test(name: String)(code: => Unit): Unit = {
val count = counts.getOrElse(name, 0)
val name1 = if (count == 0) name else s"$name($count)"
tests += Suite.context(name1) -> (() => code)
counts.update(name, count + 1)
}
def test(code: => Unit): Unit = test("test")(code)
def testExpect[E <: Throwable: reflect.ClassTag](msg: => String)(code: => Unit): Unit = {
test(s"test-expect-${implicitly[ClassTag[E]]}") {
try {
code
throw new IllegalStateException(s"expected ${implicitly[ClassTag[E]]}")
} catch {
case err: E =>
assert(err.getMessage() == msg)
}
}
}
def main(args: Array[String]): Unit = {
require(reps > 0, s"reps <= 0")
val errors = mutable.ArrayBuffer.empty[(Context, Throwable)]
if (reps == 1) {
runImpl(errors, 0)
}
else for (i <- 0 to reps) {
runImpl(errors, i)
}
}
private def runImpl(errors: mutable.ArrayBuffer[(Context, Throwable)], iteration: Int): Unit = {
for ((ctx, test) <- tests) {
try test()
catch {
case NonFatal(err) => errors += (ctx -> err)
}
}
if (errors.nonEmpty) {
val msg = if (errors.size == 1) "error" else "errors"
val msgs = errors.map {
case (ctx, err) => s"${err.getClass.getSimpleName} in $name.`${ctx.name}`: ${err.getMessage}"
}
throw new AssertionError(msgs.mkString(s"${errors.size} $msg at iteration $iteration:\n", "\n", ""))
}
}
}
object Suite {
class Context private[Suite] (val name: String) extends AnyVal
def context(name: String): Context = new Context(name)
}
|
lrytz/scala
|
test/tasty/run/pre/tastytest/Suite.scala
|
Scala
|
apache-2.0
| 1,966 |
/*
* The Reactive Summit Austin talk
* Copyright (C) 2016 Jan Machacek
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.eigengo.rsa.scene.v100
import java.io._
import org.datavec.image.loader.ImageLoader
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork
import org.eigengo.rsa.deeplearning4j.NetworkLoader
import scala.io.Source
import scala.util.{Failure, Success, Try}
/**
* Performs classification using the loaded network and matching labels. The number
* of elements in ``labels`` has to match the number of outputs in the ``network``.
*
* @param network the (trained and initialized) network
* @param labels the human-readable names in order of network outputs
*/
class SceneClassifier private(network: MultiLayerNetwork, labels: List[String]) {
private val loader = new ImageLoader(100, 100, 3)
private val threshold = 0.7
/**
* Classifies the content of the image in the ``imageStream``.
*
* @param imageStream the stream containing a loadable image (i.e. png, jpeg, ...)
* @return error or scene with labels
*/
def classify(imageStream: InputStream): Try[Scene] = {
Try(loader.asRowVector(imageStream)).flatMap { imageRowVector ⇒
val predictions = network.output(imageRowVector)
if (predictions.isRowVector) {
val predictedLabels = (0 until predictions.columns()).flatMap { column ⇒
val prediction = predictions.getDouble(0, column)
if (prediction > threshold) {
Some(Scene.Label(labels(column), prediction))
} else None
}
Success(Scene(predictedLabels))
} else Failure(SceneClassifier.BadPredictionsShape)
}
}
}
/**
* Contains function to construct the ``SceneClassifier`` instance from a base path and
* common error types.
*/
object SceneClassifier {
/**
* The network's prediction for a single row vector is not a row vector
* (This is never expected to happen)
*/
case object BadPredictionsShape extends Exception("Predictions are not row vector.")
/**
* Constructs the SceneClassifier by loading the ``MultiLayerNetwork`` from three files
* at the given ``basePath``. The three files are
*
* - the network configuration in ``basePath.json``
* - the network parameters in ``basePath.bin``
* - the labels in ``basePath.labels``
*
* @param resourceAccessor the resource accessor
* @return error or constructed classifier
*/
def apply(resourceAccessor: NetworkLoader.ResourceAccessor): Try[SceneClassifier] = {
for {
network ← NetworkLoader.loadMultiLayerNetwork(resourceAccessor)
labels ← resourceAccessor("labels").map(is ⇒ Source.fromInputStream(is).getLines().toList)
} yield new SceneClassifier(network, labels)
}
}
|
eigengo/reactive-summit-2016
|
vision-scene-classification/src/main/scala/org/eigengo/rsa/scene/v100/SceneClassifier.scala
|
Scala
|
gpl-3.0
| 3,404 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.common
class ControllerMovedException(message: String, cause: Throwable) extends RuntimeException(message, cause) {
def this(message: String) = this(message, null)
def this() = this(null, null)
}
|
unix1986/universe
|
tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/common/ControllerMovedException.scala
|
Scala
|
bsd-2-clause
| 1,016 |
/**
* Digi Configgy is a library for handling configurations
*
* Copyright 2009 Robey Pointer <[email protected]>
* Copyright 2012-2013 Alexey Aksenov <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.configgy
import scala.collection.JavaConversions._
import org.slf4j.LoggerFactory
import javax.{ management => jmx }
class JmxWrapper(node: Attributes) extends jmx.DynamicMBean {
val log = LoggerFactory.getLogger(getClass)
val operations: Array[jmx.MBeanOperationInfo] = Array(
new jmx.MBeanOperationInfo("set", "set a string value",
Array(
new jmx.MBeanParameterInfo("key", "java.lang.String", "config key"),
new jmx.MBeanParameterInfo("value", "java.lang.String", "string value")), "void", jmx.MBeanOperationInfo.ACTION),
new jmx.MBeanOperationInfo("remove", "remove a value",
Array(
new jmx.MBeanParameterInfo("key", "java.lang.String", "config key")), "void", jmx.MBeanOperationInfo.ACTION),
new jmx.MBeanOperationInfo("add_list", "append a value to a list",
Array(
new jmx.MBeanParameterInfo("key", "java.lang.String", "config key"),
new jmx.MBeanParameterInfo("value", "java.lang.String", "value")), "void", jmx.MBeanOperationInfo.ACTION),
new jmx.MBeanOperationInfo("remove_list", "remove a value to a list",
Array(
new jmx.MBeanParameterInfo("key", "java.lang.String", "config key"),
new jmx.MBeanParameterInfo("value", "java.lang.String", "value")), "void", jmx.MBeanOperationInfo.ACTION))
def getMBeanInfo() =
new jmx.MBeanInfo("org.digimead.configgy.ConfigMap", // The name of the Java class of the MBean described by this MBeanInfo
"configuration node", // description
node.asJmxAttributes(), // attributes
null, // constructors
operations, // operations
null, // notifications
new jmx.ImmutableDescriptor("immutableInfo=false")) // descriptor
def getAttribute(name: String): AnyRef = node.asJmxDisplay(name)
def getAttributes(names: Array[String]): jmx.AttributeList = {
val rv = new jmx.AttributeList
for (name <- names) rv.add(new jmx.Attribute(name, getAttribute(name)))
rv
}
def invoke(actionName: String, params: Array[Object], signature: Array[String]): AnyRef = {
actionName match {
case "set" =>
params match {
case Array(name: String, value: String) =>
try {
node.setString(name, value)
} catch {
case e: Exception =>
log.warn("exception: %s", e.getMessage)
throw e
}
case _ =>
throw new jmx.MBeanException(new Exception("bad signature " + params.toList.toString))
}
case "remove" =>
params match {
case Array(name: String) =>
node.remove(name)
case _ =>
throw new jmx.MBeanException(new Exception("bad signature " + params.toList.toString))
}
case "add_list" =>
params match {
case Array(name: String, value: String) =>
node.setList(name, node.getList(name).toList ++ List(value))
case _ =>
throw new jmx.MBeanException(new Exception("bad signature " + params.toList.toString))
}
case "remove_list" =>
params match {
case Array(name: String, value: String) =>
node.setList(name, node.getList(name).toList.filterNot(_ == value))
case _ =>
throw new jmx.MBeanException(new Exception("bad signature " + params.toList.toString))
}
case _ =>
throw new jmx.MBeanException(new Exception("no such method"))
}
null
}
def setAttribute(attr: jmx.Attribute): Unit = {
attr.getValue() match {
case s: String =>
node.setString(attr.getName(), s)
case _ =>
throw new jmx.InvalidAttributeValueException()
}
}
def setAttributes(attrs: jmx.AttributeList): jmx.AttributeList = {
for (attr <- attrs.asList) setAttribute(attr)
attrs
}
}
|
ezh/digi-configgy
|
src/main/scala/org/digimead/configgy/JmxWrapper.scala
|
Scala
|
apache-2.0
| 4,787 |
package org.jetbrains.plugins.scala
package codeInspection.methodSignature
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil
import com.intellij.testFramework.fixtures.CodeInsightTestFixture
import org.jetbrains.plugins.scala.codeInspection.{InspectionBundle, ScalaQuickFixTestBase}
/**
* Nikolay.Tropin
* 6/25/13
*/
class UnitMethodDefinedLikeProcedureInspectionTest extends ScalaQuickFixTestBase {
import CodeInsightTestFixture.CARET_MARKER
import EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
protected override val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[UnitMethodDefinedLikeProcedureInspection]
protected override val description: String =
InspectionBundle.message("unit.method.like.procedure.name")
private val hint = InspectionBundle.message("insert.return.type.and.equals")
def test1(): Unit = {
val selected = s"def ${START}foo$END() {println()}"
checkTextHasError(selected)
val text = "def foo() {println()}"
val result = "def foo(): Unit = {println()}"
testQuickFix(text, result, hint)
}
def test2(): Unit = {
val selected = s"""def haha() {}
|def ${START}hoho$END() {}
|def hihi()"""
checkTextHasError(selected)
val text = s"""def haha() {}
|def ho${CARET_MARKER}ho() {}
|def hihi()"""
val result = """def haha() {}
|def hoho(): Unit = {}
|def hihi()"""
testQuickFix(text, result, hint)
}
def test3(): Unit = {
val selected = s"def ${START}foo$END(x: Int) {}"
checkTextHasError(selected)
val text = "def foo(x: Int) {}"
val result = "def foo(x: Int): Unit = {}"
testQuickFix(text, result, hint)
}
def test4(): Unit = {
val selected = s"def ${START}foo$END {}"
checkTextHasError(selected)
val text = "def foo {}"
val result = "def foo: Unit = {}"
testQuickFix(text, result, hint)
}
def test5(): Unit = {
val text = """class A(val x: Int, val y: Int) {
| def this(x: Int) {
| this(x, 0)
| }
| }""".stripMargin.replace("\r", "")
checkTextHasNoErrors(text)
}
}
|
loskutov/intellij-scala
|
test/org/jetbrains/plugins/scala/codeInspection/methodSignature/UnitMethodDefinedLikeProcedureInspectionTest.scala
|
Scala
|
apache-2.0
| 2,305 |
package gitbucket.core.model
trait DeployKeyComponent extends TemplateComponent { self: Profile =>
import profile.api._
lazy val DeployKeys = TableQuery[DeployKeys]
class DeployKeys(tag: Tag) extends Table[DeployKey](tag, "DEPLOY_KEY") with BasicTemplate {
val deployKeyId = column[Int]("DEPLOY_KEY_ID", O AutoInc)
val title = column[String]("TITLE")
val publicKey = column[String]("PUBLIC_KEY")
val allowWrite = column[Boolean]("ALLOW_WRITE")
def * =
(userName, repositoryName, deployKeyId, title, publicKey, allowWrite).<>(DeployKey.tupled, DeployKey.unapply)
def byPrimaryKey(userName: String, repositoryName: String, deployKeyId: Int) =
(this.userName === userName.bind) && (this.repositoryName === repositoryName.bind) && (this.deployKeyId === deployKeyId.bind)
}
}
case class DeployKey(
userName: String,
repositoryName: String,
deployKeyId: Int = 0,
title: String,
publicKey: String,
allowWrite: Boolean
)
|
imeszaros/gitbucket
|
src/main/scala/gitbucket/core/model/DeployKey.scala
|
Scala
|
apache-2.0
| 974 |
class RandomNumberIterator
extends scala.collection.Iterator[Int] {
def hasNext = true
def next = scala.util.Random.nextInt(10)
}
object Migration {
val a = new RandomNumberIterator
a.collect { case x => x.toString }
}
|
grzegorzbalcerek/scala-book-examples
|
examples/Migration.scala
|
Scala
|
mit
| 227 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.core.ui.definition.widget
import akka.actor.ActorRef
import java.util.UUID
import org.eclipse.swt.custom.ScrolledComposite
import org.eclipse.swt.widgets.Composite
class SCompositeHSash(val id: UUID, val ref: ActorRef, parent: ScrolledComposite, style: Int)
extends Composite(parent, style) with SComposite {
/** Returns the receiver's parent, which must be a ScrolledComposite. */
override def getParent(): ScrolledComposite = super.getParent.asInstanceOf[ScrolledComposite]
}
|
digimead/digi-TABuddy-desktop
|
part-core-ui/src/main/scala/org/digimead/tabuddy/desktop/core/ui/definition/widget/SCompositeHSash.scala
|
Scala
|
agpl-3.0
| 2,764 |
package io.getquill.context.jdbc.postgres
import io.getquill.context.sql.CaseClassQuerySpec
import org.scalatest.Matchers._
class CaseClassQueryJdbcSpec extends CaseClassQuerySpec {
val context = testContext
import testContext._
override def beforeAll = {
testContext.transaction {
testContext.run(query[Contact].delete)
testContext.run(query[Address].delete)
testContext.run(liftQuery(peopleEntries).foreach(p => peopleInsert(p)))
testContext.run(liftQuery(addressEntries).foreach(p => addressInsert(p)))
}
()
}
"Example 1 - Single Case Class Mapping" in {
testContext.run(`Ex 1 CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 1A - Single Case Class Mapping" in {
testContext.run(`Ex 1A CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 1B - Single Case Class Mapping" in {
testContext.run(`Ex 1B CaseClass Record Output`) should contain theSameElementsAs `Ex 1 CaseClass Record Output expected result`
}
"Example 2 - Single Record Mapped Join" in {
testContext.run(`Ex 2 Single-Record Join`) should contain theSameElementsAs `Ex 2 Single-Record Join expected result`
}
"Example 3 - Inline Record as Filter" in {
testContext.run(`Ex 3 Inline Record Usage`) should contain theSameElementsAs `Ex 3 Inline Record Usage exepected result`
}
"Example 4 - Ex 4 Mapped Union of Nicknames" in {
testContext.run(`Ex 4 Mapped Union of Nicknames`) should contain theSameElementsAs `Ex 4 Mapped Union of Nicknames expected result`
}
"Example 4 - Ex 4 Mapped Union All of Nicknames" in {
testContext.run(`Ex 4 Mapped Union All of Nicknames`) should contain theSameElementsAs `Ex 4 Mapped Union All of Nicknames expected result`
}
"Example 4 - Ex 4 Mapped Union All of Nicknames Filtered" in {
testContext.run(`Ex 4 Mapped Union All of Nicknames Filtered`) should contain theSameElementsAs `Ex 4 Mapped Union All of Nicknames Filtered expected result`
}
"Example 4 - Ex 4 Mapped Union All of Nicknames Same Field" in {
testContext.run(`Ex 4 Mapped Union All of Nicknames Same Field`) should contain theSameElementsAs `Ex 4 Mapped Union All of Nicknames Same Field expected result`
}
"Example 4 - Ex 4 Mapped Union All of Nicknames Same Field Filtered" in {
testContext.run(`Ex 4 Mapped Union All of Nicknames Same Field Filtered`) should contain theSameElementsAs `Ex 4 Mapped Union All of Nicknames Same Field Filtered expected result`
}
}
|
mentegy/quill
|
quill-jdbc/src/test/scala/io/getquill/context/jdbc/postgres/CaseClassQueryJdbcSpec.scala
|
Scala
|
apache-2.0
| 2,599 |
package me.ivanyu.luscinia.entities
/**
* Node log operation
*/
sealed trait LogOperation
case object EmptyOperation extends LogOperation
case object SomeOperation extends LogOperation
/**
* Log entry
* @param term term of entry
* @param operation operation
*/
case class LogEntry(term: Term, operation: LogOperation)
|
ivanyu/luscinia
|
node/src/main/scala/me/ivanyu/luscinia/entities/LogEntry.scala
|
Scala
|
unlicense
| 326 |
package com.sksamuel.elastic4s.handlers.searches.suggestion
import com.sksamuel.elastic4s.EnumConversions
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.requests.searches.suggestion.TermSuggestion
object TermSuggestionBuilderFn {
def apply(term: TermSuggestion): XContentBuilder = {
val builder = XContentFactory.obj()
term.text.foreach(builder.field("text", _))
builder.startObject("term")
builder.field("field", term.fieldname)
term.analyzer.foreach(builder.field("analyzer", _))
term.lowercaseTerms.foreach(builder.field("lowercase_terms", _))
term.maxEdits.foreach(builder.field("max_edits", _))
term.minWordLength.foreach(builder.field("min_word_length", _))
term.maxInspections.foreach(builder.field("max_inspections", _))
term.minDocFreq.foreach(builder.field("min_doc_freq", _))
term.maxTermFreq.foreach(builder.field("max_term_freq", _))
term.prefixLength.foreach(builder.field("prefix_length", _))
term.size.foreach(builder.field("size", _))
term.shardSize.foreach(builder.field("shard_size", _))
term.sort.map(EnumConversions.sortBy).foreach(builder.field("sort", _))
term.stringDistance.map(EnumConversions.stringDistance).foreach(builder.field("string_distance", _))
term.suggestMode.map(EnumConversions.suggestMode).foreach(builder.field("suggest_mode", _))
builder
}
}
|
sksamuel/elastic4s
|
elastic4s-handlers/src/main/scala/com/sksamuel/elastic4s/handlers/searches/suggestion/TermSuggestionBuilderFn.scala
|
Scala
|
apache-2.0
| 1,419 |
package uscala.net
object MapViewShim {
implicit class MapViewImplicitShim[K, V](map: Map[K, V]) {
def mapValuesShim[W](f: (V) => W): Map[K, W] = map.mapValues(f)
}
}
|
albertpastrana/uscala
|
url/src/main/scala-2.13-/uscala/net/MapViewShim.scala
|
Scala
|
mit
| 176 |
// Copyright (C) Maxime MORGE 2017
package org.scaia.experiments
import akka.actor.ActorSystem
import org.scaia.asia._
import org.scaia.solver.ASIA2HedonicSolver
import org.scaia.solver.asia._
import org.scaia.solver.hedonic._
/**
* Main app to test multiples random examples
* */
object TestHedonicSolver{
val debug= true
val system = ActorSystem("TestHedonicSolver")//The Actor system
def main(args: Array[String]): Unit = {
println("n,m,selectiveU,hedoU,miqpU,selectiveTime,hedonicTime,miqpTime")
var n = 0
for (n <- 2 to 2) {
var m = 0
for (m <- 2 to 20 ) {//(m <- 2 * n to 10 * n)
val nbPb = 100
val nbMatchings = 100
var selectiveU=0.0
var hedonicU = 0.0
var miqpU = 0.0
var selectiveTime = 0.0
var hedonicTime = 0.0
var timeMIQP = 0.0
var o=0
for (o <- 1 to nbPb) {
val pb = IAProblem.randomProblem(n, m)
val selectiveSolver = new SelectiveSolver(pb,true,Utilitarian)
var startingTime=System.currentTimeMillis()
val selectiveResult = selectiveSolver.solve()
selectiveTime+=System.currentTimeMillis - startingTime
selectiveU += selectiveResult.utilitarianWelfare()
val hedonicSolver = new ASIA2HedonicSolver(pb)
startingTime=System.currentTimeMillis()
val hedonicResult = hedonicSolver.solve()
hedonicTime+=System.currentTimeMillis - startingTime
hedonicU += hedonicResult.utilitarianWelfare()
val miqpSolver = new MIQPSolver(pb,Utilitarian)
startingTime=System.currentTimeMillis()
val miqpResult = miqpSolver.solve()
timeMIQP+=System.currentTimeMillis - startingTime
miqpU += miqpResult.utilitarianWelfare()
}
println(n + "," + m + "," + selectiveU/nbPb + "," + hedonicU/nbPb + "," + miqpU/nbPb + "," +
selectiveTime/nbPb + "," +hedonicTime/nbPb + "," +timeMIQP/nbPb
)
}
}
}
}
|
maximemorge/ScaIA
|
src/main/scala/org/scaia/experiments/TestHedonicSolver.scala
|
Scala
|
gpl-3.0
| 2,010 |
package org.bitcoins.dlc.testgen
import java.io.File
import org.bitcoins.core.currency.Satoshis
import org.bitcoins.core.wallet.fee.SatoshisPerVirtualByte
import play.api.libs.json.{JsResult, JsValue}
import scala.concurrent.Future
object DLCFeeTestVectorGen
extends TestVectorGen[DLCFeeTestVector, DLCFeeTestVectorInput] {
override val defaultTestFile: File = new File(
"dlc-test/src/test/scala/org/bitcoins/dlc/testgen/dlc_fee_test.json")
override val testVectorParser: DLCFeeTestVector.type =
DLCFeeTestVector
override def inputFromJson: JsValue => JsResult[DLCFeeTestVectorInput] =
DLCFeeTestVectorInput.fromJson
override val inputStr: String = "inputs"
override def generateFromInput: DLCFeeTestVectorInput => Future[
DLCFeeTestVector] = { input =>
Future.successful(DLCFeeTestVector(input))
}
override def generateTestVectors(): Future[Vector[DLCFeeTestVector]] = {
val redeemScriptLens = Vector(0, 22, 34)
val maxWitnessLens = Vector(108, 133, 218)
val feeFundingInfo1 = FundingFeeInfo(0, 108)
val feeFundingInfo2 = FundingFeeInfo(22, 108)
val feeFundingInfo3 = FundingFeeInfo(34, 218)
val oneInput = Vector(feeFundingInfo1)
val twoInputs = Vector(feeFundingInfo2, feeFundingInfo3)
val feeFundingInfos = redeemScriptLens.flatMap { redeemScriptLen =>
maxWitnessLens.flatMap { maxWitnessLen =>
if (
redeemScriptLen == 22 && (maxWitnessLen != 107 || maxWitnessLen != 108)
) {
None
} else {
Some(FundingFeeInfo(redeemScriptLen, maxWitnessLen))
}
}
}
val payoutSPKLens = Vector(22, 25, 34, 35, 71, 173)
val changeSPKLens = Vector(22, 34)
val feeRates = Vector(1L, 5L, 10L)
.map(Satoshis.apply)
.map(SatoshisPerVirtualByte.apply)
def allTests(
offerInputs: Vector[FundingFeeInfo],
acceptInputs: Vector[FundingFeeInfo]): Vector[DLCFeeTestVector] = {
for {
offerPayoutSPKLen <- payoutSPKLens
offerChangeSPKLen <- changeSPKLens
acceptPayoutSPKLen <- payoutSPKLens
acceptChangeSPKLen <- changeSPKLens
feeRate <- feeRates
} yield {
DLCFeeTestVector(
offerInputs,
offerPayoutSPKLen,
offerChangeSPKLen,
acceptInputs,
acceptPayoutSPKLen,
acceptChangeSPKLen,
feeRate
)
}
}
def someTests(
offerInputs: Vector[FundingFeeInfo],
acceptInputs: Vector[FundingFeeInfo]): Vector[DLCFeeTestVector] = {
allTests(offerInputs, acceptInputs)
.sortBy(_ => scala.util.Random.nextDouble())
.take(10)
}
val tests = allTests(oneInput, oneInput) ++
someTests(twoInputs, twoInputs) ++
someTests(oneInput, twoInputs) ++
someTests(twoInputs, oneInput) ++
someTests(feeFundingInfos, feeFundingInfos)
Future.successful(tests)
}
}
|
bitcoin-s/bitcoin-s
|
dlc-test/src/test/scala/org/bitcoins/dlc/testgen/DLCFeeTestVectorGen.scala
|
Scala
|
mit
| 2,933 |
/*
* Copyright 2014 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.http
import java.nio.charset.Charset
import java.util.concurrent.ConcurrentHashMap
import com.twitter.finagle.http.{Method, Request, Response, Status}
import com.twitter.finagle.{Http, ListeningServer, Service}
import com.twitter.storehaus.testing.CloseableCleanup
import com.twitter.storehaus.testing.generator.NonEmpty
import com.twitter.storehaus.{FutureOps, Store}
import com.twitter.util.{Await, Future}
import org.scalacheck.Prop._
import org.scalacheck.{Arbitrary, Gen, Prop, Properties}
object HttpStringStoreProperties
extends Properties("HttpStringStore") with CloseableCleanup[ListeningServer] {
def validPairs: Gen[List[(String, Option[String])]] =
NonEmpty.Pairing.alphaStrs().map(_.map{ case (k, v) => ("/" + k, v) })
def baseTest[K: Arbitrary, V: Arbitrary : Equiv](
store: Store[K, V], validPairs: Gen[List[(K, Option[V])]])
(put: (Store[K, V], List[(K, Option[V])]) => Unit): Prop =
forAll(validPairs) { (examples: List[(K, Option[V])]) =>
put(store, examples)
examples.toMap.forall { case (k, optV) =>
val res = Await.result(store.get(k))
Equiv[Option[V]].equiv(res, optV)
}
}
def putStoreTest[K: Arbitrary, V: Arbitrary : Equiv](
store: Store[K, V], validPairs: Gen[List[(K, Option[V])]]): Prop =
baseTest(store, validPairs) { (s, pairs) =>
pairs.foreach {
case (k, v) =>
Await.result(s.put((k, v)))
}
}
def multiPutStoreTest[K: Arbitrary, V: Arbitrary : Equiv](
store: Store[K, V], validPairs: Gen[List[(K, Option[V])]]): Prop =
baseTest(store, validPairs) { (s, pairs) =>
Await.result(FutureOps.mapCollect(s.multiPut(pairs.toMap)))
}
def storeTest(store: Store[String, String]): Prop =
putStoreTest(store, validPairs) && multiPutStoreTest(store, validPairs)
val service = new Service[Request, Response] {
private val map = new ConcurrentHashMap[String, String]()
private val utf8 = Charset.forName("UTF-8")
def apply(request: Request): Future[Response] = {
val response = request.method match {
case Method.Get =>
Option(map.get(request.uri)).map{ v =>
val resp = Response(request.version, Status.Ok)
resp.contentString = v
resp.contentLength = v.getBytes(utf8).size
resp
}.getOrElse {
val resp = Response(request.version, Status.NotFound)
resp.contentLength = 0
resp
}
case Method.Delete =>
map.remove(request.uri)
val resp = Response(request.version, Status.NoContent)
resp.contentLength = 0
resp
case Method.Put =>
val maybeOldV = Option(map.put(request.uri, request.contentString))
val resp = Response(request.version,
maybeOldV.map(_ => Status.Ok).getOrElse(Status.Created))
resp.content = request.content
resp.contentLength = request.content.length
resp
case _ =>
Response(request.version, Status.MethodNotAllowed)
}
Future.value(response)
}
}
val server = Http.serve("localhost:0", service)
// i dont know how else to convert boundAddress into something usable
val store = HttpStringStore(server.boundAddress.toString.substring(1))
property("HttpStringStore test") = storeTest(store)
override def closeable: ListeningServer = server
override def cleanup(): Unit = {
super.cleanup()
}
}
|
twitter/storehaus
|
storehaus-http/src/test/scala/com/twitter/storehaus/http/HttpStringStoreProperties.scala
|
Scala
|
apache-2.0
| 4,109 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.metrics.source
import java.lang.management.ManagementFactory
import com.codahale.metrics.{Gauge, MetricRegistry}
import javax.management.{MBeanServer, ObjectName}
import scala.util.control.NonFatal
private[spark] class JVMCPUSource extends Source {
override val metricRegistry = new MetricRegistry()
override val sourceName = "JVMCPU"
// Dropwizard/Codahale metrics gauge measuring the JVM process CPU time.
// This Gauge will try to get and return the JVM Process CPU time or return -1 otherwise.
// The CPU time value is returned in nanoseconds.
// It will use proprietary extensions such as com.sun.management.OperatingSystemMXBean or
// com.ibm.lang.management.OperatingSystemMXBean, if available.
metricRegistry.register(MetricRegistry.name("jvmCpuTime"), new Gauge[Long] {
val mBean: MBeanServer = ManagementFactory.getPlatformMBeanServer
val name = new ObjectName("java.lang", "type", "OperatingSystem")
override def getValue: Long = {
try {
// return JVM process CPU time if the ProcessCpuTime method is available
mBean.getAttribute(name, "ProcessCpuTime").asInstanceOf[Long]
} catch {
case NonFatal(_) => -1L
}
}
})
}
|
pgandhi999/spark
|
core/src/main/scala/org/apache/spark/metrics/source/JVMCPUSource.scala
|
Scala
|
apache-2.0
| 2,035 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.spray
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.service.test.config.TestConfig
/**
* Created by wallinm on 1/14/15.
*/
object SprayTestConfig {
val config = TestConfig.conf("""
wookiee-spray {
manager = "com.webtrends.harness.component.spray.SprayManager"
# The port for the http server to be used for internal endpoints
http-port = 9090
# The port for the http-server to be used for external endpoints
http-external-port = 9092
# The port on which to run the websocket server
websocket-port = 9091
}
akka.actor.deployment {
/system/component/wookiee-spray/spray-server/spray-base {
router = round-robin
nr-of-instances = 3
}
}
""").withFallback(ConfigFactory.load("conf/application.conf")).resolve()
}
|
mjwallin1/wookiee-spray
|
src/test/scala/com/webtrends/harness/component/spray/SprayTestConfig.scala
|
Scala
|
apache-2.0
| 1,641 |
package `with spaces` { // error
class Foo
}
package +.* { // error // error
class Bar
}
package object `mixed_*` { // error
class Baz
}
|
lampepfl/dotty
|
tests/neg-custom-args/fatal-warnings/symbolic-packages.scala
|
Scala
|
apache-2.0
| 145 |
package sbn.core.models.graph
import sbn.core.CustomSpec
import sbn.core.io.DataFileLoader
import sbn.core.variables.{Variable, VariableFactory}
import scalax.collection.GraphEdge.DiEdge
import scalax.collection.immutable.Graph
class DirectedGraphSpec extends CustomSpec{
val dataSet = DataFileLoader.loadImmutableDataSet("datasets/test/core/onlyAttributes.arff")
val latent_gaussian = VariableFactory.newGaussianVariable("latent_gaussian")
val latent_multinomial = VariableFactory.newMultinomialVariable("latent_multinomial", 2)
val manifest_gaussian = VariableFactory.newGaussianVariable(dataSet.get.attributes.getAttributeByName("continuousWithBounds"))
val manifest_multinomial = VariableFactory.newMultinomialVariable(dataSet.get.attributes.getAttributeByName("binomial"))
val variables = Set(latent_gaussian, latent_multinomial, manifest_gaussian, manifest_multinomial)
val edges = Set(
DiEdge(latent_gaussian, manifest_gaussian),
DiEdge(latent_multinomial, manifest_multinomial),
DiEdge(latent_multinomial, latent_gaussian))
private def constructAcyclicGraph: DirectedGraph = DirectedGraph(Graph[Variable, DiEdge](
DiEdge(latent_gaussian, manifest_gaussian),
DiEdge(latent_multinomial, manifest_multinomial),
DiEdge(latent_multinomial, latent_gaussian)))
private def constructCyclicGraph: DirectedGraph = DirectedGraph(Graph[Variable, DiEdge](
DiEdge(latent_gaussian, manifest_gaussian),
DiEdge(latent_multinomial, manifest_multinomial),
DiEdge(latent_multinomial, latent_gaussian),
DiEdge(manifest_gaussian, latent_multinomial)))
"DirectedGraph constructor" should "return a valid DirectedGraph object" in {
val graph = constructAcyclicGraph
assert(graph.nodes == variables)
assert(graph.edges.size == 3)
}
"DirectedGraph.apply" should "return a valid DirectedGraph object that is equals to the constructor" in {
val graphApply = DirectedGraph(edges)
val graphConstructor = constructAcyclicGraph
assert(graphApply == graphConstructor)
assert(graphApply equals graphConstructor)
// Test they are not the same reference
assert(!(graphApply eq graphConstructor))
}
"DirectedGraph.nodes" should "return a Set[Variable] representing its nodes" in {
val graph = constructAcyclicGraph
assert(graph.nodes == Set(latent_gaussian, latent_multinomial, manifest_gaussian, manifest_multinomial))
}
"DirectedGraph.edges" should "return a Set[DiEdge] representing its edges" in {
val graph = constructAcyclicGraph
assert(graph.edges equals edges)
// Test they are not the same reference
assert(!(graph.edges eq edges))
assert(graph.edges.isInstanceOf[Set[DiEdge[Variable]]])
}
"DirectedGraph.numberOfNodes" should "return the correct number of nodes, which is the number of variables" in {
val graph = constructAcyclicGraph
assert(graph.nodes.size == variables.size)
}
"DirectedGraph.numberOfEdges" should "return the correct number of edges (edges.size)" in {
val graph = constructAcyclicGraph
assert(graph.edges.size == edges.size)
}
"DirectedGraph.isAcyclic" should "return true if the graph is acyclic and false otherwise" in {
val cyclicGraph = constructCyclicGraph
val acyclicGraph = constructAcyclicGraph
assert(!cyclicGraph.isAcyclic)
assert(acyclicGraph.isAcyclic)
}
}
|
fernandoj92/sbn
|
core/src/test/scala/sbn/core/models/graph/DirectedGraphSpec.scala
|
Scala
|
apache-2.0
| 3,376 |
/*
* Beangle, Agile Development Scaffold and Toolkits.
*
* Copyright © 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.maven.plugin.container
import org.apache.maven.plugin.AbstractMojo
import org.apache.maven.plugins.annotations.{LifecyclePhase, Mojo, Parameter, ResolutionScope}
import org.apache.maven.project.MavenProject
import java.io.{File, FileWriter, IOException}
import scala.jdk.javaapi.CollectionConverters.asScala
@Mojo(name = "dependencies", defaultPhase = LifecyclePhase.PREPARE_PACKAGE, requiresDependencyCollection = ResolutionScope.COMPILE_PLUS_RUNTIME)
class DependenciesMojo extends AbstractMojo {
@Parameter(defaultValue = "${project}", readonly = true)
private var project: MavenProject = _
private val fileName = "dependencies"
private val inlcuded=Set("provided","compile","runtime")
def execute(): Unit = {
val folder = project.getBuild.getOutputDirectory + "/META-INF/beangle/"
new File(folder).mkdirs()
val file = new File(folder + fileName)
file.delete()
try {
file.createNewFile()
val provideds = new collection.mutable.ArrayBuffer[String]
asScala(project.getArtifacts) foreach { artifact =>
val str = artifact.toString
val scope = artifact.getScope
if (inlcuded.contains(scope)) {
provideds += str.replace(":jar", "").replace(":" + scope, "")
}
}
val sb = new StringBuilder()
provideds.sorted foreach { one =>
sb.append(one).append('\\n')
}
val fw = new FileWriter(file)
fw.write(sb.toString)
fw.close()
getLog.info(s"Generated dependencies:(${provideds.size})" + file.getAbsolutePath)
} catch {
case e: IOException => e.printStackTrace()
}
}
private def convert(dependencies: String): Iterable[Dependency] = {
if (dependencies == null) List.empty else {
val results = new collection.mutable.ArrayBuffer[Dependency]
getLog.info(dependencies)
val array = dependencies.replace("\\n", "").replace("\\r", "").replace(";", ",").split(",")
for (a <- array if a.length >= 0) {
val commaIdx = a.indexOf(":")
if (-1 == commaIdx) {
getLog.warn("Invalid dependency:" + a)
} else {
results += new Dependency(a.substring(0, commaIdx), a.substring(commaIdx + 1))
}
}
results
}
}
}
|
beangle/maven
|
src/main/scala/org/beangle/maven/plugin/container/DependenciesMojo.scala
|
Scala
|
lgpl-3.0
| 3,043 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.catalog
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.table.api._
import org.junit.{Before, Test}
import org.junit.Assert._
class InMemoryExternalCatalogTest {
private val databaseName = "db1"
private var catalog: InMemoryExternalCatalog = _
@Before
def setUp(): Unit = {
catalog = new InMemoryExternalCatalog(databaseName)
}
@Test
def testCreateTable(): Unit = {
assertTrue(catalog.listTables().isEmpty)
catalog.createTable("t1", createTableInstance(), ignoreIfExists = false)
val tables = catalog.listTables()
assertEquals(1, tables.size())
assertEquals("t1", tables.get(0))
}
@Test(expected = classOf[TableAlreadyExistException])
def testCreateExistedTable(): Unit = {
val tableName = "t1"
catalog.createTable(tableName, createTableInstance(), ignoreIfExists = false)
catalog.createTable(tableName, createTableInstance(), ignoreIfExists = false)
}
@Test
def testGetTable(): Unit = {
val originTable = createTableInstance()
catalog.createTable("t1", originTable, ignoreIfExists = false)
assertEquals(catalog.getTable("t1"), originTable)
}
@Test(expected = classOf[TableNotExistException])
def testGetNotExistTable(): Unit = {
catalog.getTable("nonexisted")
}
@Test
def testAlterTable(): Unit = {
val tableName = "t1"
val table = createTableInstance()
catalog.createTable(tableName, table, ignoreIfExists = false)
assertEquals(catalog.getTable(tableName), table)
val newTable = createTableInstance(Array("number"), Array(Types.INT))
catalog.alterTable(tableName, newTable, ignoreIfNotExists = false)
val currentTable = catalog.getTable(tableName)
// validate the table is really replaced after alter table
assertNotEquals(table, currentTable)
assertEquals(newTable, currentTable)
}
@Test(expected = classOf[TableNotExistException])
def testAlterNotExistTable(): Unit = {
catalog.alterTable("nonexisted", createTableInstance(), ignoreIfNotExists = false)
}
@Test
def testDropTable(): Unit = {
val tableName = "t1"
catalog.createTable(tableName, createTableInstance(), ignoreIfExists = false)
assertTrue(catalog.listTables().contains(tableName))
catalog.dropTable(tableName, ignoreIfNotExists = false)
assertFalse(catalog.listTables().contains(tableName))
}
@Test(expected = classOf[TableNotExistException])
def testDropNotExistTable(): Unit = {
catalog.dropTable("nonexisted", ignoreIfNotExists = false)
}
@Test(expected = classOf[CatalogNotExistException])
def testGetNotExistDatabase(): Unit = {
catalog.getSubCatalog("notexistedDb")
}
@Test
def testCreateDatabase(): Unit = {
catalog.createSubCatalog("db2", new InMemoryExternalCatalog("db2"), ignoreIfExists = false)
assertEquals(1, catalog.listSubCatalogs().size)
}
@Test(expected = classOf[CatalogAlreadyExistException])
def testCreateExistedDatabase(): Unit = {
catalog.createSubCatalog("existed", new InMemoryExternalCatalog("existed"),
ignoreIfExists = false)
assertNotNull(catalog.getSubCatalog("existed"))
val databases = catalog.listSubCatalogs()
assertEquals(1, databases.size())
assertEquals("existed", databases.get(0))
catalog.createSubCatalog("existed", new InMemoryExternalCatalog("existed"),
ignoreIfExists = false)
}
@Test
def testNestedCatalog(): Unit = {
val sub = new InMemoryExternalCatalog("sub")
val sub1 = new InMemoryExternalCatalog("sub1")
catalog.createSubCatalog("sub", sub, ignoreIfExists = false)
sub.createSubCatalog("sub1", sub1, ignoreIfExists = false)
sub1.createTable("table", createTableInstance(), ignoreIfExists = false)
val tables = catalog.getSubCatalog("sub").getSubCatalog("sub1").listTables()
assertEquals(1, tables.size())
assertEquals("table", tables.get(0))
}
private def createTableInstance(): ExternalCatalogTable = {
val schema = new TableSchema(
Array("first", "second"),
Array(
BasicTypeInfo.STRING_TYPE_INFO,
BasicTypeInfo.INT_TYPE_INFO
)
)
ExternalCatalogTable("csv", schema)
}
private def createTableInstance(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): ExternalCatalogTable = {
val schema = new TableSchema(fieldNames, fieldTypes)
ExternalCatalogTable("csv", schema)
}
}
|
zimmermatt/flink
|
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/catalog/InMemoryExternalCatalogTest.scala
|
Scala
|
apache-2.0
| 5,272 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.supervision
import forms.{EmptyForm, Form2, InvalidForm, ValidForm}
import org.scalatest.MustMatchers
import utils.AmlsViewSpec
import jto.validation.Path
import jto.validation.ValidationError
import models.supervision.ProfessionalBodies$
import play.api.i18n.Messages
import views.Fixture
import views.html.supervision.which_professional_body
class which_professional_bodySpec extends AmlsViewSpec with MustMatchers {
trait ViewFixture extends Fixture {
lazy val which_professional_body = app.injector.instanceOf[which_professional_body]
implicit val requestWithToken = addTokenForView()
}
"which_professional_body view" must {
"have a back link" in new ViewFixture {
def view = which_professional_body(EmptyForm, edit = false)
doc.getElementsByAttributeValue("class", "link-back") must not be empty
}
"have correct title" in new ViewFixture {
def view = which_professional_body(EmptyForm, false)
doc.title must startWith(Messages("supervision.whichprofessionalbody.title") + " - " + Messages("summary.supervision"))
}
"have correct headings" in new ViewFixture {
def view = which_professional_body(EmptyForm, false)
heading.html must be(Messages("supervision.whichprofessionalbody.title"))
subHeading.html must include(Messages("summary.supervision"))
}
"show errors in the correct locations" in new ViewFixture {
val form2: InvalidForm = InvalidForm(Map.empty, Seq(
(Path \\ "businessType") -> Seq(ValidationError("not a message Key")),
(Path \\ "specifyOtherBusiness") -> Seq(ValidationError("not another message Key"))
))
def view = which_professional_body(form2, false)
errorSummary.html() must include("not a message Key")
errorSummary.html() must include("not another message Key")
doc.getElementById("businessType")
.getElementsByClass("error-notification").first().html() must include("not a message Key")
doc.getElementById("specifyOtherBusiness-panel")
.getElementsByClass("error-notification").first().html() must include("not another message Key")
}
}
}
|
hmrc/amls-frontend
|
test/views/supervision/which_professional_bodySpec.scala
|
Scala
|
apache-2.0
| 2,773 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.