code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.ml
import org.scalatest.FunSuite
import org.scalatest.Matchers
import org.apache.spark.Logging
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}
import org.apache.spark.mllib.linalg.Vector
import scala.reflect.runtime.universe._
case class LabeledDocument[T:TypeTag](id: Long, text: String, label: Double)
case class Document[T:TypeTag](id: Long, text: String)
class LogisticRegressionSuite extends FunSuite with WrapperSparkContext with Matchers with Logging {
// Note: This is required by every test to ensure that it runs successfully on windows laptop !!!
val loadConfig = ScalaAutomatedTestBase
test("run logistic regression with default") {
//Make sure system ml home set when run wrapper
val newsqlContext = new org.apache.spark.sql.SQLContext(sc);
import newsqlContext.implicits._
val training = sc.parallelize(Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5))))
val testing = sc.parallelize(Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5))))
val lr = new LogisticRegression("log", sc)
val lrmodel = lr.fit(training.toDF)
lrmodel.transform(testing.toDF).show
lr.getIcpt shouldBe 0
lrmodel.getIcpt shouldBe lr.getIcpt
lrmodel.getMaxInnerIter shouldBe lr.getMaxInnerIter
}
test("test logistic regression with mlpipeline"){
//Make sure system ml home set when run wrapper
val newsqlContext = new org.apache.spark.sql.SQLContext(sc);
import newsqlContext.implicits._
val training = sc.parallelize(Seq(
LabeledDocument(0L, "a b c d e spark", 1.0),
LabeledDocument(1L, "b d", 2.0),
LabeledDocument(2L, "spark f g h", 1.0),
LabeledDocument(3L, "hadoop mapreduce", 2.0),
LabeledDocument(4L, "b spark who", 1.0),
LabeledDocument(5L, "g d a y", 2.0),
LabeledDocument(6L, "spark fly", 1.0),
LabeledDocument(7L, "was mapreduce", 2.0),
LabeledDocument(8L, "e spark program", 1.0),
LabeledDocument(9L, "a e c l", 2.0),
LabeledDocument(10L, "spark compile", 1.0),
LabeledDocument(11L, "hadoop software", 2.0)))
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
val hashingTF = new HashingTF().setNumFeatures(1000).setInputCol(tokenizer.getOutputCol).setOutputCol("features")
val lr = new LogisticRegression("log",sc)
val pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF, lr))
val crossval = new CrossValidator().setEstimator(pipeline).setEvaluator(new BinaryClassificationEvaluator)
val paramGrid = new ParamGridBuilder().addGrid(hashingTF.numFeatures, Array(10, 100, 1000)).addGrid(lr.regParam, Array(0.1, 0.01)).build()
crossval.setEstimatorParamMaps(paramGrid)
crossval.setNumFolds(2)
val lrmodel = crossval.fit(training.toDF)
val test = sc.parallelize(Seq(
Document(12L, "spark i j k"),
Document(13L, "l m n"),
Document(14L, "mapreduce spark"),
Document(15L, "apache hadoop")))
lrmodel.transform(test.toDF).show
lr.getIcpt shouldBe 0
// lrmodel.getIcpt shouldBe lr.getIcpt
// lrmodel.getMaxInnerIter shouldBe lr.getMaxInnerIter
}
} | asurve/arvind-sysml | src/test/scala/org/apache/sysml/api/ml/LogisticRegressionSuite.scala | Scala | apache-2.0 | 4,470 |
import sbt._
object ClientBuild extends Build {
lazy val common = ProjectRef(file("../common"), "common")
val client = Project("client", file(".")).dependsOn(common)
}
| art4ul/scrafty | client/project/ClientBuild.scala | Scala | apache-2.0 | 175 |
package model
import entitytled.Entitytled
import play.api.Play
import play.api.db.slick.DatabaseConfigProvider
import slick.driver.JdbcProfile
trait PIMAidDBContext extends Entitytled
with ConditionExpressionComponent
with DrugComponent
with DrugGroupComponent
with ExpressionTermComponent
with GenericTypeComponent
with MedicationProductComponent
with RuleComponent
with SuggestionTemplateComponent
with UserSessionComponent
{
val dbConfig = DatabaseConfigProvider.get[JdbcProfile](Play.current)
val driver = dbConfig.driver
implicit val db = dbConfig.db
}
object PIMAidDBContext extends PIMAidDBContext
| RSSchermer/pim-aid | app/model/PIMAidDBContext.scala | Scala | mit | 637 |
package io.getquill.context.sql.norm
import io.getquill.ast._
import io.getquill.norm.BetaReduction
import io.getquill.norm.Normalize
/**
* This phase expands inner joins adding the correct aliases so they will function. Unfortunately,
* since it introduces aliases into the clauses that don't actually exist in the inner expressions,
* it is not technically type-safe but will not result in a Quat error since Quats cannot check
* for Ident scoping. For a better implementation, that uses a well-typed FlatMap/FlatJoin cascade, have
* a look here:
* [[https://gist.github.com/deusaquilus/dfb42880656df12779a0afd4f20ef1bb Better Typed ExpandJoin which uses FlatMap/FlatJoin]]
*
* The reason the above implementation is not currently used is because `ExpandNestedQueries` does not
* yet use Quat fields for expansion. Once this is changed, using that implementation here
* should be reconsidered.
*/
object ExpandJoin {
def apply(q: Ast) = expand(q, None)
def expand(q: Ast, id: Option[Ident]) =
Transform(q) {
case q @ Join(_, _, _, Ident(a, _), Ident(b, _), _) => // Ident a and Ident b should have the same Quat, could add an assertion for that
val (qr, tuple) = expandedTuple(q)
Map(qr, id.getOrElse(Ident(s"$a$b", q.quat)), tuple)
}
private def expandedTuple(q: Join): (Join, Tuple) =
q match {
case Join(t, a: Join, b: Join, tA, tB, o) =>
val (ar, at) = expandedTuple(a)
val (br, bt) = expandedTuple(b)
val or = BetaReduction(o, tA -> at, tB -> bt)
(Join(t, ar, br, tA, tB, or), Tuple(List(at, bt)))
case Join(t, a: Join, b, tA, tB, o) =>
val (ar, at) = expandedTuple(a)
val or = BetaReduction(o, tA -> at)
(Join(t, ar, b, tA, tB, or), Tuple(List(at, tB)))
case Join(t, a, b: Join, tA, tB, o) =>
val (br, bt) = expandedTuple(b)
val or = BetaReduction(o, tB -> bt)
(Join(t, a, br, tA, tB, or), Tuple(List(tA, bt)))
case q @ Join(t, a, b, tA, tB, on) =>
(Join(t, nestedExpand(a, tA), nestedExpand(b, tB), tA, tB, on), Tuple(List(tA, tB)))
}
private def nestedExpand(q: Ast, id: Ident) =
Normalize(expand(q, Some(id))) match {
case Map(q, _, _) => q
case q => q
}
} | getquill/quill | quill-sql-portable/src/main/scala/io/getquill/sql/norm/ExpandJoin.scala | Scala | apache-2.0 | 2,277 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.{DataSet, DistributedDataSet, MiniBatch}
import com.intel.analytics.bigdl.parameters.AllReduceParameter
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils._
import org.apache.log4j.Logger
import org.apache.spark.TaskContext
import org.apache.spark.rdd.{RDD, ZippedPartitionsWithLocalityRDD}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Future
import scala.reflect.ClassTag
object DistriOptimizer {
import Optimizer._
val logger = Logger.getLogger(getClass)
/**
* Optimizer cache some metadata on each executor
*
* @param localModels cached models
* @param modelWeights weights of the cached models
* @param modelGradients gradients of the cached models
* @param localCriterions cached criterion
* @param localStates cached state
* @param gradient tensor buffer
* @tparam T
*/
case class Cache[T](
localModels: Array[Module[T]],
modelWeights: Array[Tensor[T]],
modelGradients: Array[Tensor[T]],
localCriterions: Array[Criterion[T]],
localStates: Array[Table],
gradient: Tensor[T],
var moduleTimeList: Array[Long] = null
)
private[optim] def optimize[T: ClassTag](
dataset: DistributedDataSet[MiniBatch[T]],
coresPerNode: Int,
state: Table,
endWhen: Trigger,
metrics: Metrics,
models: RDD[Cache[T]],
optimMethod: OptimMethod[T],
parameters: AllReduceParameter[T],
validationTrigger: Option[Trigger],
validationDataSet: Option[DataSet[MiniBatch[T]]],
validationMethods: Option[Array[ValidationMethod[T]]],
cacheTrigger: Option[Trigger],
cachePath: Option[String],
isOverWrite: Boolean
)(implicit ev: TensorNumeric[T]) = {
val sc = dataset.originRDD().sparkContext
val partitionNum = dataset.originRDD().partitions.length
var wallClockTime = 0L
var lastEpochTime = 0L
val driverState = T("epoch" -> state.get[Int]("epoch").getOrElse(1),
"neval" -> state.get[Int]("neval").getOrElse(1))
val _subModelNumber = Engine.getEngineType match {
case MklBlas => coresPerNode
case _ => throw new IllegalArgumentException()
}
var accumulateCount = 0
val shuffleBefore = System.nanoTime()
logger.info(s"config $state")
logger.info(s"Shuffle data")
dataset.shuffle()
val shuffleEnd = System.nanoTime()
logger.info(s"Shuffle data complete. Takes ${(shuffleEnd - shuffleBefore) / 1e9}s")
var tasks: ArrayBuffer[Future[_]] = new ArrayBuffer()
var threshold = Long.MaxValue
var timeout = Long.MaxValue
var iteration = 0
val dropPercentage = state.get[Double]("dropPercentage").get
val warmupIterationNum = state.get[Int]("warmupIterationNum").get
val computeThresholdbatchSize = state.get[Int]("computeThresholdbatchSize").get
val maxDropPercentage = state.get[Double]("maxDropPercentage").get
val driverSubModelNum = partitionNum * _subModelNumber
var dropModelNumBatch = 0
var lossArray = new Array[Double](_subModelNumber)
var epochStart = System.nanoTime()
var dataRDD = dataset.data(train = true)
while (!endWhen(driverState)) {
val _header = header(driverState[Int]("epoch"), accumulateCount, dataset.size(),
driverState[Int]("neval"), wallClockTime)
val lossSum = sc.accumulator(0.0, "loss sum")
val recordsNum = sc.accumulator(0, "record number")
metrics.set("computing time for each node", mutable.ArrayBuffer[Double](), sc)
metrics.set("computing time average", 0.0, sc, partitionNum)
metrics.set("aggregate gradient time", 0.0, sc, partitionNum)
metrics.set("get weights average", 0.0, sc, partitionNum)
metrics.set("get weights for each node", mutable.ArrayBuffer[Double](), sc)
val driverMetrics = metrics
val start = System.nanoTime()
val finishedModelNum = dataRDD.zipPartitions(
models, true)(
(data, modelIter) => {
val cached = modelIter.next()
val syWStart = System.nanoTime()
val weightsResult = parameters.getWeights(cached.modelWeights.head)
val tensorBuffer = new Array[(Tensor[T], Tensor[T])](_subModelNumber)
tasks += Engine.default.invoke(() => {
val batch = data.next()
var b = 0
require(batch.data.size(1) == batch.labels.size(1),
"data and label batch size not match")
require(batch.data.size(1) >= _subModelNumber,
s"total batch size: ${batch.data.size(1)} " +
s"should be divided by total core number: ${_subModelNumber}")
val stackSize = batch.data.size(1) / _subModelNumber
while (b < _subModelNumber) {
tensorBuffer(b) = (batch.data.narrow(1, b * stackSize + 1, stackSize),
batch.labels.narrow(1, b * stackSize + 1, stackSize))
b += 1
}
})
Engine.default.sync(tasks)
weightsResult.waitResult()
val weightSyncTime = System.nanoTime() - syWStart
driverMetrics.add("get weights average", weightSyncTime)
driverMetrics.add("get weights for each node", weightSyncTime)
tasks.clear()
// ======================Start train models===================================
var time = System.nanoTime()
if(dropPercentage > 0 && iteration > warmupIterationNum + computeThresholdbatchSize - 1) {
timeout = threshold - weightSyncTime
}
val pre = (iteration % computeThresholdbatchSize) * _subModelNumber
val trainingThreads = Engine.default.invokeAndWait2((0 until _subModelNumber).map(i =>
() => {
val trainStart = System.nanoTime()
val localModel = cached.localModels(i)
localModel.training()
val localCriterion = cached.localCriterions(i)
val (input, target) = tensorBuffer(i)
val output = localModel.forward(input)
lossArray(i) = ev.toType[Double](localCriterion.forward(output, target))
val errors = localCriterion.backward(output, target)
localModel.backward(input, errors)
cached.moduleTimeList(i + pre) = System.nanoTime() - trainStart + weightSyncTime
i
}
), timeout)
val computingTime = System.nanoTime() - time
driverMetrics.add("computing time average", computingTime)
driverMetrics.add("computing time for each node", computingTime)
val finishedThreads = trainingThreads.filter(!_.isCancelled).map(_.get())
recordsNum += finishedThreads.size * tensorBuffer.head._2.size(1)
var i = 0
while (i < finishedThreads.size) {
lossSum += lossArray(finishedThreads(i))
i += 1
}
if (finishedThreads.size > 0) {
time = System.nanoTime()
val gradLength = cached.modelGradients(0).nElement()
val taskSize = gradLength / _subModelNumber
val extraTask = gradLength % _subModelNumber
(0 until _subModelNumber).diff(finishedThreads).foreach(i =>
cached.modelGradients(i).zero()
)
// copy multi-model gradient to the buffer
val parallelNum = if (taskSize == 0) extraTask else _subModelNumber
Engine.default.invokeAndWait((0 until parallelNum).map(tid => () => {
val offset = tid * taskSize + math.min(tid, extraTask)
val length = taskSize + (if (tid < extraTask) 1 else 0)
var i = 0
while (i < cached.modelGradients.length) {
if (i == 0) {
cached.gradient.narrow(1, offset + 1, length)
.copy(cached.modelGradients(i).narrow(1, offset + 1, length))
} else {
cached.gradient.narrow(1, offset + 1, length)
.add(cached.modelGradients(i).narrow(1, offset + 1, length))
}
i += 1
}
}))
driverMetrics.add("aggregate gradient time", System.nanoTime() - time)
}
parameters.putGradients(cached.gradient)
tasks ++= Engine.default.invoke((0 until _subModelNumber).map(i => () => {
cached.localModels(i).training()
cached.localModels(i).zeroGradParameters()
}))
Iterator(finishedThreads.size)
}).reduce(_ + _)
dropModelNumBatch += (driverSubModelNum - finishedModelNum)
if (dropPercentage == 0 || finishedModelNum >= driverSubModelNum * (1-maxDropPercentage)) {
val value = lossSum.value / finishedModelNum
models.mapPartitions(modelIter => {
val modelCache = modelIter.next()
parameters.aggregrateGradientPartition()
parameters.gradientPartition.div(ev.fromType(finishedModelNum))
modelCache.localStates.head("neval") = driverState[Int]("neval")
modelCache.localStates.head("epoch") = driverState[Int]("epoch")
optimMethod.optimize(_ => (ev.fromType(value), parameters.gradientPartition),
parameters.weightPartition, modelCache.localStates.head, modelCache.localStates.head)
parameters.sendWeightPartition()
Iterator.empty
}).count()
accumulateCount += recordsNum.value
val end = System.nanoTime()
wallClockTime += end - start
optimMethod.updateHyperParameter(state, driverState)
logger.info(s"${_header} Train ${recordsNum.value} in ${(end - start) / 1e9}seconds. " +
s"Throughput is ${recordsNum.value / ((end - start) / 1e9)} records/second. Loss is ${
lossSum.value / finishedModelNum}. ${optimMethod.getHyperParameter(state)}")
logger.debug("\\n" + metrics.summary())
logger.debug("Dropped modules: " + (driverSubModelNum - finishedModelNum))
lossArray = new Array[Double](_subModelNumber)
// compute threshold
iteration += 1
if (dropPercentage > 0 && iteration > warmupIterationNum &&
iteration % computeThresholdbatchSize == 0) {
val moduleTimeList = models.mapPartitions { iter =>
iter.next().moduleTimeList.iterator
}.collect()
val k = (dropPercentage * computeThresholdbatchSize * driverSubModelNum).toInt
if (k > dropModelNumBatch) {
threshold = Util.kthLargest(moduleTimeList, 0, moduleTimeList.length-1,
k - dropModelNumBatch)
} else {
threshold = (threshold * 1.01).toLong
}
logger.info("threshold: " + threshold)
// clear moduleTimeList in each node
models.mapPartitions { iter =>
val timeList = iter.next.moduleTimeList
var i = 0
while (i < timeList.length) {
timeList(i) = 0
i += 1
}
Iterator.empty
}.count()
dropModelNumBatch = 0
}
driverState("neval") = driverState[Int]("neval") + 1
if (accumulateCount >= dataset.size()) {
val epochEnd = System.nanoTime()
wallClockTime = lastEpochTime + epochEnd - epochStart
lastEpochTime = wallClockTime
epochStart = System.nanoTime()
logger.info(s"${_header} Epoch finished. Wall clock time is ${wallClockTime / 1e6}ms")
driverState("epoch") = driverState[Int]("epoch") + 1
dataset.shuffle()
dataRDD = dataset.data(train = true)
accumulateCount = 0
}
validate(
validationTrigger,
validationDataSet,
validationMethods,
coresPerNode,
models,
wallClockTime,
driverState
)
checkpoint(
cacheTrigger,
cachePath,
isOverWrite,
wallClockTime,
models,
driverState,
parameters
)
} else {
logger.info(s"Warning!!! Ignore this iteration as more than maxDropPercentage " +
s"module is dropped!! Finished modules number: ${finishedModelNum}")
}
}
}
private def checkpoint[T: ClassTag](
cacheTrigger: Option[Trigger],
cachePath: Option[String],
isOverWrite: Boolean,
wallClockTime: Long,
models: RDD[Cache[T]],
state: Table,
parameters: AllReduceParameter[T])
: Unit = {
if (cacheTrigger.isDefined) {
val trigger = cacheTrigger.get
if (trigger(state) && cachePath.isDefined) {
println(s"[Wall Clock ${wallClockTime / 1e9}s] Save model to ${cachePath.get}")
saveModel(getModel(models, parameters), cachePath, isOverWrite,
s".${state[Int]("neval")}")
val localState = models.map(_.localStates.head).first()
localState("neval") = state[Int]("neval")
localState("epoch") = state[Int]("epoch")
saveState(localState, cachePath, isOverWrite, s"" +
s".${state[Int]("neval")}")
}
}
}
private def initThreadModels[T: ClassTag](
model: Module[T],
dataset: DistributedDataSet[MiniBatch[T]],
criterion: Criterion[T],
state: Table,
nodeNumber: Int,
coresPerNode: Int,
checkSingleton: Boolean,
parameters: AllReduceParameter[T]
)(implicit ev: TensorNumeric[T]) = {
val sc = dataset.originRDD().sparkContext
val broadcast = sc.broadcast((model, criterion, state))
val _subModelNumber = Engine.getEngineType match {
case MklBlas => coresPerNode
case _ => throw new IllegalArgumentException
}
require(dataset.originRDD().partitions.length == nodeNumber,
s"Passed in rdd partition number ${dataset.originRDD().partitions.length}" +
s" is not equal to configured node number ${nodeNumber}")
val partitionNum = dataset.originRDD().partitions.length
val computeThresholdbatchSize = state.get[Int]("computeThresholdbatchSize").get
val models = dataset.originRDD().mapPartitions(_ => {
val (broadcastModel, broadcastCriterion, broadcastState) = broadcast.value
if (!Engine.checkSingleton()) {
if (checkSingleton) {
require(Engine.checkSingleton(), "Detect multi-task run on one Executor/Container. " +
"Please disable singleton check or repartition data")
} else {
logger.warn("Detect multi-task run on one Executor/Container.")
}
}
val cached = (0 until _subModelNumber).map { _ =>
val localModel = broadcastModel.cloneModule()
val localCriterion = broadcastCriterion.cloneCriterion()
val localState = broadcastState.clone()
val (weights, grads) = localModel.getParameters()
(localModel, weights, grads, localCriterion, localState)
}.toArray
val weights = cached.head._2
cached.map(c =>
if (!c._2.eq(weights)) {
c._2.storage().set(weights.storage())
}
)
logger.info("model thread pool size is " + Engine.model.getPoolSize)
parameters.init(weights)
Iterator(Cache(
cached.map(_._1), // models
cached.map(_._2), // weights
cached.map(_._3), // gradients
cached.map(_._4), // criterions
cached.map(_._5), // states
cached.head._2.clone(), // a tensor buffer
new Array[Long](_subModelNumber * computeThresholdbatchSize)
))
}).persist()
models.setName("Thread Model RDD")
logger.info("Cache thread models...")
models.count()
logger.info("Cache thread models... done")
models
}
private def validate[T](
validationTrigger: Option[Trigger],
validationDataSet: Option[DataSet[MiniBatch[T]]],
validationMethods: Option[Array[ValidationMethod[T]]],
coresPerNode: Int,
models: RDD[Cache[T]],
wallClockTime: Long,
state: Table
): Unit = {
if (validationTrigger.isEmpty || validationDataSet.isEmpty) {
return
}
val trigger = validationTrigger.get
if (!trigger(state)) {
return
}
val vMethods = validationMethods.get
val validateRDD = validationDataSet.get.toDistributed().data(train = false)
logger.info(s"[Wall Clock ${wallClockTime / 1e9}s] Validate model...")
val _subModelNumber = Engine.getEngineType match {
case MklBlas => coresPerNode
case _ => throw new IllegalArgumentException
}
ZippedPartitionsWithLocalityRDD(models, validateRDD)((modelIter, dataIter) => {
val cached = modelIter.next()
val criterion = cached.localCriterions
val workingModels = cached.localModels
workingModels.foreach(_.evaluate())
dataIter.map(batch => {
require(batch.data.size(1) == batch.labels.size(1))
val stackSize = batch.data.size(1) / _subModelNumber
val extraSize = batch.data.size(1) % _subModelNumber
val parallelism = if (stackSize == 0) extraSize else _subModelNumber
Engine.default.invokeAndWait(
(0 until parallelism).map(b =>
() => {
val offset = b * stackSize + math.min(b, extraSize)
val length = stackSize + (if (b < extraSize) 1 else 0)
val input = batch.data.narrow(1, offset + 1, length)
val target = batch.labels.narrow(1, offset + 1, length)
val output = workingModels(b).forward(input)
vMethods.map(validation => {
validation(output, target, criterion(b))
})
}
)
).reduce((left, right) => {
left.zip(right).map { case (l, r) =>
l + r
}
})
})
}).reduce((left, right) => {
left.zip(right).map { case (l, r) =>
l + r
}
}).zip(vMethods).foreach(r => {
logger.info(s"${r._2} is ${r._1}")
})
}
private def getModel[T: ClassTag](
models: RDD[Cache[T]],
parameters: AllReduceParameter[T]): Module[T] = {
val partitionNum = models.partitions.length
val trainedModel = models.map(_.localModels.head.clearState()).first()
val weights = models.mapPartitions(iter => {
val cached = iter.next()
val curPartitionId = TaskContext.getPartitionId()
Iterator.single(Map(curPartitionId -> parameters.weightPartition))
}).reduce(_ ++ _)
val parameter = trainedModel.getParameters()._1
val parameterLength = parameter.nElement()
val taskSize = parameterLength / partitionNum
require(taskSize != 0, "parameter length should not less than partition number")
val extraSize = parameterLength % partitionNum
(0 until partitionNum).map(pid => {
val start = pid * taskSize + math.min(pid, extraSize)
val length = taskSize + (if (pid < extraSize) 1 else 0)
parameter.narrow(1, start + 1, length).copy(weights(pid))
})
trainedModel
}
}
class DistriOptimizer[T: ClassTag] (
model: Module[T],
dataset: DistributedDataSet[MiniBatch[T]],
criterion: Criterion[T]
)(implicit ev: TensorNumeric[T])
extends Optimizer[T, MiniBatch[T]](
model, dataset, criterion) {
val metrics = new Metrics
private var models: RDD[DistriOptimizer.Cache[T]] = null
/**
* Clean some internal states, so this or other optimizers can run optimize again
*
* This method will be called at the end of optimize. You need not call it if optimize succeed.
* If the optimize fails, you may call it before next optimize.
*/
def clearState() : Unit = {
// Reset the singleton flag, so other optimizers can run
models.mapPartitions(iter => {
Engine.resetSingletonFlag()
iter
}).count()
}
override def optimize(): Module[T] = {
this.assertEngineInited()
optimMethod.clearHistory(state)
state("dropPercentage") = dropPercentage
state("warmupIterationNum") = warmupIterationNum
state("computeThresholdbatchSize") = computeThresholdbatchSize
state("maxDropPercentage") = maxDropPercentage
require(Engine.nodeNumber().isDefined, "Node number is not set")
val nodeNumber = Engine.nodeNumber().get
val coresPerNode = Engine.coreNumber()
val partitionNum = dataset.originRDD().partitions.length
val size = model.getParameters()._1.nElement()
val parameters = AllReduceParameter.newParameter(partitionNum, size)
models = DistriOptimizer.initThreadModels(
model, dataset, criterion, state, nodeNumber, coresPerNode, checkSingleton, parameters)
DistriOptimizer.optimize(
dataset,
coresPerNode,
state,
endWhen,
metrics,
models,
optimMethod,
parameters,
validationTrigger,
validationDataSet,
validationMethods,
checkpointTrigger,
checkpointPath,
isOverWrite
)
val trainedModel = DistriOptimizer.getModel(models, parameters)
nn.Utils.copyModule(trainedModel, model)
// Reset some internal states, so this or other optimizers can run optimize again
clearState()
model
}
}
| dding3/BigDL | dl/src/main/scala/com/intel/analytics/bigdl/optim/DistriOptimizer.scala | Scala | apache-2.0 | 22,036 |
package io.vamp.operation.notification
import io.vamp.common.akka.RequestError
import io.vamp.common.notification.{ ErrorNotification, Notification }
import io.vamp.model.artifact._
case class InternalServerError(reason: Any) extends Notification with ErrorNotification
case class UnsupportedDeploymentRequest(request: Any) extends Notification with RequestError
case class UnsupportedGatewayRequest(request: Any) extends Notification with RequestError
case class DeploymentSynchronizationFailure(deployment: Deployment, exception: Throwable) extends Notification
case class UnresolvedVariableValueError(breed: Breed, name: String) extends Notification
case class UnresolvedEnvironmentValueError(key: String, reason: Any) extends Notification
case class UnsupportedSlaType(`type`: String) extends Notification
case class UnsupportedEscalationType(`type`: String) extends Notification
case class DeploymentServiceError(deployment: Deployment, service: DeploymentService) extends Notification
case class UnsupportedRouteWeight(deployment: Deployment, cluster: DeploymentCluster, weight: Int) extends Notification
case class NonUniqueBreedReferenceError(breed: Breed) extends Notification
case class InvalidRouteWeight(deployment: Deployment, cluster: DeploymentCluster, port: String, weight: Int) extends Notification
case class UnavailableGatewayPortError(port: Port, gateway: Gateway) extends Notification
case class WorkflowSchedulingError(reason: Any) extends Notification with ErrorNotification
case class WorkflowExecutionError(reason: Any) extends Notification with ErrorNotification
case class UnexpectedArtifact(artifact: String) extends Notification
case class InvalidTimeTriggerError(pattern: String) extends Notification
case class MissingRequiredVariableError(required: String) extends Notification
case class NoAvailablePortError(begin: Int, end: Int) extends Notification
case class InternalGatewayCreateError(name: String) extends Notification
case class InternalGatewayUpdateError(name: String) extends Notification
case class InternalGatewayRemoveError(name: String) extends Notification
object InvalidConfigurationError extends Notification
case class DeploymentWorkflowNameCollision(name: String) extends Notification
case class WorkflowUpdateError(workflow: Workflow) extends Notification
| magneticio/vamp | operation/src/main/scala/io/vamp/operation/notification/OperationNotification.scala | Scala | apache-2.0 | 2,334 |
package scala.collection.immutable
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
@RunWith(classOf[JUnit4])
class ParallelConsistencyTest {
private val theSeq = Seq(1,2,3)
// This collection will throw an exception if you do anything but call .length or .seq
private val mustCallSeq: collection.GenSeq[Int] = new collection.parallel.ParSeq[Int] {
def length = 3
// This method is surely sequential & safe -- want all access to go through here
def seq = theSeq
def notSeq = throw new Exception("Access to parallel collection not via .seq")
// These methods could possibly be used dangerously explicitly or internally
// (apply could also be used safely; if it is, do test with mustCallSeq)
def apply(i: Int) = notSeq
def splitter = notSeq
}
// Test Vector ++ with a small parallel collection concatenation (SI-9072).
@Test
def testPlusPlus(): Unit = {
assert((Vector.empty ++ mustCallSeq) == theSeq, "Vector ++ unsafe with parallel vectors")
}
// SI-9126, 1 of 2
@Test
def testTranspose(): Unit = {
assert(List(mustCallSeq).transpose.flatten == theSeq, "Transposing inner parallel collection unsafe")
}
// SI-9126, 2 of 2
@Test
def testList_flatMap(): Unit = {
assert(List(1).flatMap(_ => mustCallSeq) == theSeq, "List#flatMap on inner parallel collection unsafe")
}
}
| felixmulder/scala | test/junit/scala/collection/ParallelConsistencyTest.scala | Scala | bsd-3-clause | 1,412 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.indexer
import org.ensime.fixture.SharedEnsimeVFSFixture
import org.ensime.util.EnsimeSpec
import org.ensime.vfs._
import scala.util.Try
class ClassfileDepicklerSpec extends EnsimeSpec with SharedEnsimeVFSFixture {
"ClassfileDepickler" should "not depickle J2SE classes" in withVFS { vfs =>
new ClassfileDepickler(vfs.vres("java/lang/String.class")).scalasig should ===(
None
)
}
it should "support typical Scala classes" in withVFS { vfs =>
new ClassfileDepickler(vfs.vres("scala/collection/immutable/List.class")).scalasig shouldBe defined
}
it should "not expect anything in companions" in withVFS { vfs =>
new ClassfileDepickler(vfs.vres("scala/collection/immutable/List$.class")).scalasig should ===(
None
)
}
it should "not expect anything in closures" in withVFS { vfs =>
// scala 2.10/2.11 specific, there will be no "scala/io/Source$$anonfun$1.class" generated under 2.12
val anonFun = Try { vfs.vres("scala/io/Source$$anonfun$1.class") }
anonFun.foreach(fo => new ClassfileDepickler(fo).scalasig should ===(None))
}
it should "find type aliases" in withVFS { vfs =>
new ClassfileDepickler(vfs.vres("scala/Predef.class"))
.getClasses("scala.Predef$")
.typeAliases
.get(s"scala.Predef$$String") should ===(
Some(
RawType(
ClassName(PackageName(List("scala")), "Predef$"),
ClassName(PackageName(List("scala")), s"Predef$$String"),
"scala.Predef.String",
Public,
" = java.lang.String"
)
)
)
}
}
| ensime/ensime-server | core/src/test/scala/org/ensime/indexer/ClassfileDepicklerSpec.scala | Scala | gpl-3.0 | 1,725 |
package com.twitter.finagle.server
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ServerInfoTest extends FunSuite {
test("ServerInfo.Empty") {
assert(ServerInfo.Empty.environment.isEmpty)
}
}
| adriancole/finagle | finagle-toggle/src/test/scala/com/twitter/finagle/server/ServerInfoTest.scala | Scala | apache-2.0 | 295 |
package japgolly.scalajs.react.callback
import cats.effect._
import cats.effect.laws.AsyncTests
import cats.effect.testkit.TestInstances
import cats.kernel.Eq
import cats.tests.CatsSuite
import cats.{Id, Order, ~>}
import org.scalacheck._
import scala.concurrent.duration._
import scala.scalajs.js
final class AsyncTest extends CatsSuite with TestInstances {
import CallbackCatsEffect.{AsyncAsyncCallback, AsyncCallbackToIO, ioToAsyncCallback}
private val backlog = new js.Array[Callback]
implicit val asyncCallbackToIO: AsyncCallbackToIO =
new AsyncCallbackToIO(backlog.push(_))
implicit def arbAsyncCallback[A: Arbitrary: Cogen](implicit t: Ticker, f: IO ~> AsyncCallback): Arbitrary[AsyncCallback[A]] =
Arbitrary(arbitraryIO[A].arbitrary.map(f(_)))
implicit def eqAsyncCallback[A](implicit A: Eq[A], t: Ticker): Eq[AsyncCallback[A]] =
eqIOA[A].contramap(asyncCallbackToIO(_))
implicit def ordAsyncCallbackFiniteDuration(implicit t: Ticker): Order[AsyncCallback[FiniteDuration]] =
orderIoFiniteDuration.contramap(asyncCallbackToIO(_))
private val someK: Id ~> Option =
new ~>[Id, Option] { def apply[A](a: A) = a.some }
private def unsafeRun2[A](ioa: IO[A])(implicit ticker: Ticker): Outcome[Option, Throwable, A] =
try {
var results: Outcome[Option, Throwable, A] = Outcome.Succeeded(None)
ioa
.flatMap(IO.pure(_))
.handleErrorWith(IO.raiseError(_))
.unsafeRunAsyncOutcome { oc => results = oc.mapK(someK) }(unsafe
.IORuntime(ticker.ctx, ticker.ctx, scheduler, () => (), unsafe.IORuntimeConfig()))
def go(i: Int): Unit = {
ticker.ctx.tickAll()
if (backlog.length > 0) {
for (f <- backlog)
f.reset.runNow()
backlog.clear()
if (i < 100)
go(i + 1)
}
}
go(0)
results
} catch {
case t: Throwable =>
t.printStackTrace()
throw t
}
implicit def asyncCallbackToProp(implicit t: Ticker): AsyncCallback[Boolean] => Prop =
a => {
val io = asyncCallbackToIO(a)
val x = unsafeRun2(io)
Prop(x.fold(false, _ => false, _.getOrElse(false)))
}
implicit def cogenAsyncCallback[A: Cogen](implicit t: Ticker): Cogen[AsyncCallback[A]] =
Cogen[Outcome[Option, Throwable, A]].contramap(acb => unsafeRun2(asyncCallbackToIO(acb)))
locally {
implicit val ticker = Ticker()
implicit val instance = new AsyncAsyncCallback(asyncCallbackToIO, ioToAsyncCallback)
checkAll("Async[AsyncCallback]", AsyncTests[AsyncCallback].async[Int, Int, Int](10.millis))
}
}
| japgolly/scalajs-react | callbackExtCatsEffect/src/test/scala/japgolly/scalajs/react/callback/AsyncTest.scala | Scala | apache-2.0 | 2,612 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs
import java.awt.RenderingHints
import java.{io, util}
import org.geotools.data.store.{ContentDataStore, ContentEntry, ContentFeatureSource}
import org.geotools.data.{DataAccessFactory, DataStore, DataStoreFactorySpi, Query}
import org.geotools.feature.NameImpl
import org.locationtech.geomesa.fs.storage.api.FileSystemStorage
import org.locationtech.geomesa.fs.storage.common.{FileSystemStorageFactory, PartitionScheme}
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.NamespaceParams
import org.locationtech.geomesa.index.metadata.{GeoMesaMetadata, HasGeoMesaMetadata, NoOpMetadata}
import org.locationtech.geomesa.index.stats.{GeoMesaStats, HasGeoMesaStats, UnoptimizedRunnableStats}
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
import org.locationtech.geomesa.utils.geotools.GeoMesaParam
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.SimpleFeatureType
import scala.collection.JavaConverters._
class FileSystemDataStore(val storage: FileSystemStorage, readThreads: Int)
extends ContentDataStore with HasGeoMesaStats with HasGeoMesaMetadata[String] {
import scala.collection.JavaConversions._
override val metadata: GeoMesaMetadata[String] = new NoOpMetadata[String]
override val stats: GeoMesaStats = new UnoptimizedRunnableStats(this)
override def createTypeNames(): util.List[Name] = {
storage.getFeatureTypes.map(name => new NameImpl(getNamespaceURI, name.getTypeName) : Name).asJava
}
override def createFeatureSource(entry: ContentEntry): ContentFeatureSource = {
storage.getFeatureTypes.find(_.getTypeName == entry.getTypeName)
.getOrElse(throw new RuntimeException(s"Could not find feature type ${entry.getTypeName}"))
new FileSystemFeatureStore(storage, entry, Query.ALL, readThreads)
}
override def createSchema(sft: SimpleFeatureType): Unit = {
storage.createNewFeatureType(sft, PartitionScheme.extractFromSft(sft))
}
}
class FileSystemDataStoreFactory extends DataStoreFactorySpi {
import FileSystemDataStoreParams._
override def createDataStore(params: java.util.Map[String, io.Serializable]): DataStore = {
val storage = Option(FileSystemStorageFactory.getFileSystemStorage(params)).getOrElse {
throw new IllegalArgumentException("Can't create storage factory with the provided params")
}
// Need to do more tuning here. On a local system 1 thread (so basic producer/consumer) was best
// because Parquet is also threading the reads underneath I think. using prod/cons pattern was
// about 30% faster but increasing beyond 1 thread slowed things down. This could be due to the
// cost of serializing simple features though. need to investigate more.
//
// However, if you are doing lots of filtering it appears that bumping the threads up high
// can be very useful. Seems possibly numcores/2 might is a good setting (which is a standard idea)
val readThreads = ReadThreadsParam.lookup(params)
val ds = new FileSystemDataStore(storage, readThreads)
NamespaceParam.lookupOpt(params).foreach(ds.setNamespaceURI)
ds
}
override def createNewDataStore(params: java.util.Map[String, io.Serializable]): DataStore =
createDataStore(params)
override def isAvailable: Boolean = true
override def canProcess(params: java.util.Map[String, io.Serializable]): Boolean =
FileSystemStorageFactory.canProcess(params)
override def getParametersInfo: Array[DataAccessFactory.Param] = {
import org.locationtech.geomesa.fs.storage.common.FileSystemStorageFactory.{ConfParam, EncodingParam, PathParam}
Array(EncodingParam, PathParam, ReadThreadsParam, ConfParam, NamespaceParam)
}
override def getDisplayName: String = "File System (GeoMesa)"
override def getDescription: String = "File System Based Data Store"
override def getImplementationHints: util.Map[RenderingHints.Key, _] =
new util.HashMap[RenderingHints.Key, Serializable]()
}
object FileSystemDataStoreParams extends NamespaceParams {
val WriterFileTimeout = SystemProperty("geomesa.fs.writer.partition.timeout", "60s")
val ReadThreadsParam = new GeoMesaParam[Integer]("fs.read-threads", "Read Threads", default = 4)
}
| boundlessgeo/geomesa | geomesa-fs/geomesa-fs-datastore/src/main/scala/org/locationtech/geomesa/fs/FileSystemDataStore.scala | Scala | apache-2.0 | 4,733 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef._
import quasar.{RenderTreeT, RenderTree}, RenderTree.ops._
import quasar.common.{PhaseResult, PhaseResultTell}
import quasar.common.effect.NameGenerator
import quasar.frontend.logicalplan.LogicalPlan
import quasar.qscript.MonadPlannerErr
import matryoshka.{BirecursiveT, EqualT, ShowT}
import org.slf4s.Logging
import cats.Eval
import scalaz.{Cord, Functor, Kleisli => K, Monad, Show}
import scalaz.syntax.functor._
import scalaz.syntax.show._
final class LPtoQS[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT]
extends QSUTTypes[T]
with Logging {
import LPtoQS.MapSyntax
def apply[F[_]: Monad: MonadPlannerErr: PhaseResultTell: NameGenerator](lp: T[LogicalPlan])
: F[T[QScriptEducated]] = {
val agraph =
ApplyProvenance.AuthenticatedQSU.graph[T]
val lpToQs =
K(ReadLP[T, F]) >==>
debug("ReadLP") >==>
RewriteGroupByArrays[T, F] >==>
debug("RewriteGroupByArrays") >-
EliminateUnary[T] >==>
debug("EliminateUnary") >-
InlineNullary[T] >==>
debug("InlineNullary") >-
CoalesceUnaryMappable[T] >==>
debug("CoalesceUnaryMappable") >-
RecognizeDistinct[T] >==>
debug("RecognizeDistinct") >==>
ExtractFreeMap[T, F] >==>
debug("ExtractFreeMap") >==>
PruneSymmetricDimEdits[T, F] >==>
debug("PruneSymmetricDimEdits") >==>
ApplyProvenance[T, F] >==>
debug("ApplyProvenance") >==>
ReifyBuckets[T, F] >==>
debug("ReifyBuckets") >==>
MinimizeAutoJoins[T, F] >==>
debug("MinimizeAutoJoins") >==>
ReifyAutoJoins[T, F] >==>
debug("ReifyAutoJoins") >==>
ExpandShifts[T, F] >==>
debug("ExpandShifts") >-
agraph.modify(ResolveOwnIdentities[T]) >==>
debug("ResolveOwnIdentities") >==>
ReifyIdentities[T, F] >==>
debug("ReifyIdentities") >==>
Graduate[T, F]
log.debug("LogicalPlan\\n" + lp.render.shows)
lpToQs(lp)
}
private def debug[F[_]: Functor: PhaseResultTell, A: Show](name: String): A => F[A] = { a =>
log.debug((Cord(name + "\\n") ++ a.show).shows)
PhaseResultTell[F].tell(Vector(Eval.later(PhaseResult.detail(s"QSU ($name)", a.shows)))).as(a)
}
}
object LPtoQS {
def apply[T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT]: LPtoQS[T] = new LPtoQS[T]
final implicit class MapSyntax[F[_], A](val self: F[A]) extends AnyVal {
def >-[B](f: A => B)(implicit F: Functor[F]): F[B] =
F.map(self)(f)
}
}
| slamdata/slamengine | qsu/src/main/scala/quasar/qsu/LPtoQS.scala | Scala | apache-2.0 | 3,549 |
package org.adridadou.ethereum.propeller.converters.e2e
import java.math.BigInteger
import org.scalacheck.Arbitrary._
import org.scalacheck.Prop._
import org.scalatest.check.Checkers
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by davidroon on 26.03.18.
* This code is released under Apache 2 license
*/
class ListTest extends FlatSpec with Matchers with Checkers with SolidityConversionHelper {
"List type" should "be converted from and to the same value" in {
val contract = contractObject[ListContract]
check(forAll(arbitrary[Array[Int]])(checkEncode(contract, _)))
}
private def checkEncode(contractObject: ListContract, value: Array[Int]) = {
val realValue = value.map(i => BigInt(i).abs.bigInteger)
contractObject.lstFunc(realValue) shouldEqual realValue
true
}
}
trait ListContract {
def lstFunc(value: Array[BigInteger]): Array[BigInteger]
}
| adridadou/eth-propeller-core | src/test/scala/org/adridadou/ethereum/propeller/converters/e2e/ListTest.scala | Scala | apache-2.0 | 903 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.JBoolean
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.plan.nodes.FlinkRelNode
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.common.CommonLookupJoin
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, RankUtil}
import org.apache.flink.table.runtime.operators.rank.RankType
import org.apache.flink.table.sources.TableSource
import org.apache.calcite.plan.RelOptTable
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.convert.Converter
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex.{RexCall, RexInputRef, RexNode}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.{Bug, BuiltInMethod, ImmutableBitSet, Util}
import java.util
import scala.collection.JavaConversions._
/**
* FlinkRelMdColumnUniqueness supplies a implementation of
* [[RelMetadataQuery#areColumnsUnique]] for the standard logical algebra.
*/
class FlinkRelMdColumnUniqueness private extends MetadataHandler[BuiltInMetadata.ColumnUniqueness] {
def getDef: MetadataDef[BuiltInMetadata.ColumnUniqueness] = BuiltInMetadata.ColumnUniqueness.DEF
def areColumnsUnique(
rel: TableScan,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areTableColumnsUnique(rel, null, rel.getTable, columns)
}
def areColumnsUnique(
rel: FlinkLogicalLegacyTableSourceScan,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areTableColumnsUnique(rel, rel.tableSource, rel.getTable, columns)
}
private def areTableColumnsUnique(
rel: TableScan,
tableSource: TableSource[_],
relOptTable: RelOptTable,
columns: ImmutableBitSet): JBoolean = {
if (columns.cardinality == 0) {
return false
}
// TODO get uniqueKeys from TableSchema of TableSource
relOptTable match {
case table: FlinkPreparingTableBase => {
val ukOptional = table.uniqueKeysSet
if (ukOptional.isPresent) {
if (ukOptional.get().isEmpty) {
false
} else {
ukOptional.get().exists(columns.contains)
}
} else {
null
}
}
case _ => rel.getTable.isKey(columns)
}
}
def areColumnsUnique(
rel: Values,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.tuples.size < 2) {
return true
}
columns.foreach { idx =>
val columnValues = rel.tuples map { tuple =>
val literal = tuple.get(idx)
if (literal.isNull) {
NullSentinel.INSTANCE
} else {
literal.getValueAs(classOf[Comparable[_]])
}
}
if (columnValues.toSet.size == columnValues.size) {
return true
}
}
false
}
def areColumnsUnique(
rel: Project,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// LogicalProject maps a set of rows to a different set;
// Without knowledge of the mapping function(whether it
// preserves uniqueness), it is only safe to derive uniqueness
// info from the child of a project when the mapping is f(a) => a.
//
// Also need to map the input column set to the corresponding child
// references
areColumnsUniqueOfProject(rel.getProjects, mq, columns, ignoreNulls, rel)
}
def areColumnsUnique(
rel: Filter,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
/**
* Determines whether a specified set of columns from a Calc relational expression are unique.
*
* @param rel the Calc relational expression
* @param mq metadata query instance
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column
* uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
*/
def areColumnsUnique(
rel: Calc,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// Calc is composed by projects and conditions. conditions does no change unique property;
// while projects maps a set of rows to a different set.
// Without knowledge of the mapping function(whether it
// preserves uniqueness), it is only safe to derive uniqueness
// info from the child of a project when the mapping is f(a) => a.
//
// Also need to map the input column set to the corresponding child
// references
val program = rel.getProgram
val projects = program.getProjectList.map(program.expandLocalRef)
areColumnsUniqueOfProject(projects, mq, columns, ignoreNulls, rel)
}
private def areColumnsUniqueOfProject(
projects: util.List[RexNode],
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean,
originalNode: SingleRel): JBoolean = {
val childColumns = ImmutableBitSet.builder
columns.foreach { idx =>
val project = projects.get(idx)
project match {
case inputRef: RexInputRef => childColumns.set(inputRef.getIndex)
case asCall: RexCall if asCall.getKind.equals(SqlKind.AS) &&
asCall.getOperands.get(0).isInstanceOf[RexInputRef] =>
childColumns.set(asCall.getOperands.get(0).asInstanceOf[RexInputRef].getIndex)
case call: RexCall if ignoreNulls =>
// If the expression is a cast such that the types are the same
// except for the nullability, then if we're ignoring nulls,
// it doesn't matter whether the underlying column reference
// is nullable. Check that the types are the same by making a
// nullable copy of both types and then comparing them.
if (call.getOperator eq SqlStdOperatorTable.CAST) {
val castOperand = call.getOperands.get(0)
castOperand match {
case castRef: RexInputRef =>
val typeFactory = originalNode.getCluster.getTypeFactory
val castType = typeFactory.createTypeWithNullability(project.getType, true)
val origType = typeFactory.createTypeWithNullability(castOperand.getType, true)
if (castType == origType) {
childColumns.set(castRef.getIndex)
}
case _ => // ignore
}
}
case _ =>
// If the expression will not influence uniqueness of the
// projection, then skip it.
}
}
// If no columns can affect uniqueness, then return unknown
if (childColumns.cardinality == 0) {
null
} else {
mq.areColumnsUnique(originalNode.getInput(), childColumns.build, ignoreNulls)
}
}
def areColumnsUnique(
rel: Expand,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// values of expand_id are unique in rows expanded from a row,
// and a input unique key combined with expand_id are also unique
val expandIdIndex = rel.expandIdIndex
if (!columns.get(expandIdIndex)) {
return false
}
val columnsSkipExpandId = ImmutableBitSet.builder().addAll(columns).clear(expandIdIndex).build()
if (columnsSkipExpandId.cardinality == 0) {
return false
}
val inputRefColumns = columnsSkipExpandId.flatMap {
column =>
val inputRefs = FlinkRelMdUtil.getInputRefIndices(column, rel)
if (inputRefs.size == 1 && inputRefs.head >= 0) {
Array(inputRefs.head)
} else {
Array.empty[Int]
}
}.toSeq
if (inputRefColumns.isEmpty) {
return false
}
mq.areColumnsUnique(rel.getInput, ImmutableBitSet.of(inputRefColumns: _*), ignoreNulls)
}
def areColumnsUnique(
rel: Converter,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rel: Exchange,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rank: Rank,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val input = rank.getInput
val rankFunColumnIndex = RankUtil.getRankNumberColumnIndex(rank).getOrElse(-1)
if (rankFunColumnIndex < 0) {
mq.areColumnsUnique(input, columns, ignoreNulls)
} else {
val childColumns = columns.clear(rankFunColumnIndex)
val isChildColumnsUnique = mq.areColumnsUnique(input, childColumns, ignoreNulls)
if (isChildColumnsUnique != null && isChildColumnsUnique) {
true
} else {
rank.rankType match {
case RankType.ROW_NUMBER =>
val fields = columns.toArray
(rank.partitionKey.toArray :+ rankFunColumnIndex).forall(fields.contains(_))
case _ => false
}
}
}
}
def areColumnsUnique(
rel: Sort,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
def areColumnsUnique(
rel: StreamExecDeduplicate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
columns != null && util.Arrays.equals(columns.toArray, rel.getUniqueKeys)
}
def areColumnsUnique(
rel: StreamExecChangelogNormalize,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
columns != null && ImmutableBitSet.of(rel.uniqueKeys: _*).equals(columns)
}
def areColumnsUnique(
rel: StreamExecDropUpdateBefore,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
mq.areColumnsUnique(rel.getInput, columns, ignoreNulls)
}
def areColumnsUnique(
rel: Aggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.getGroupSet.toArray, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: BatchExecGroupAggregateBase,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.isFinal) {
areColumnsUniqueOnAggregate(rel.getGrouping, mq, columns, ignoreNulls)
} else {
null
}
}
def areColumnsUnique(
rel: StreamExecGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.grouping, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: StreamExecGlobalGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnAggregate(rel.grouping, mq, columns, ignoreNulls)
}
def areColumnsUnique(
rel: StreamExecLocalGroupAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
private def areColumnsUniqueOnAggregate(
grouping: Array[Int],
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// group key of agg output always starts from 0
val outputGroupKey = ImmutableBitSet.of(grouping.indices: _*)
columns.contains(outputGroupKey)
}
def areColumnsUnique(
rel: WindowAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnWindowAggregate(
rel.getGroupSet.toArray,
rel.getNamedProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
}
def areColumnsUnique(
rel: BatchExecWindowAggregateBase,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (rel.isFinal) {
areColumnsUniqueOnWindowAggregate(
rel.getGrouping,
rel.getNamedProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
} else {
null
}
}
def areColumnsUnique(
rel: StreamExecGroupWindowAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
areColumnsUniqueOnWindowAggregate(
rel.getGrouping,
rel.getWindowProperties,
rel.getRowType.getFieldCount,
mq,
columns,
ignoreNulls)
}
private def areColumnsUniqueOnWindowAggregate(
grouping: Array[Int],
namedProperties: Seq[PlannerNamedWindowProperty],
outputFieldCount: Int,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (namedProperties.nonEmpty) {
val begin = outputFieldCount - namedProperties.size
val end = outputFieldCount - 1
val keys = ImmutableBitSet.of(grouping.indices: _*)
(begin to end).map {
i => keys.union(ImmutableBitSet.of(i))
}.exists(columns.contains)
} else {
false
}
}
def areColumnsUnique(
rel: Window,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
def areColumnsUnique(
rel: BatchExecOverAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
def areColumnsUnique(
rel: StreamExecOverAggregate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = areColumnsUniqueOfOverAgg(rel, mq, columns, ignoreNulls)
private def areColumnsUniqueOfOverAgg(
overAgg: SingleRel,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val input = overAgg.getInput
val inputFieldLength = input.getRowType.getFieldCount
val columnsBelongsToInput = ImmutableBitSet.of(columns.filter(_ < inputFieldLength).toList)
val isSubColumnsUnique = mq.areColumnsUnique(
input,
columnsBelongsToInput,
ignoreNulls)
if (isSubColumnsUnique != null && isSubColumnsUnique) {
true
} else if (columnsBelongsToInput.cardinality() < columns.cardinality()) {
// We are not sure whether not belongs to input are unique or not
null
} else {
isSubColumnsUnique
}
}
def areColumnsUnique(
rel: Join,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
rel.getJoinType match {
case JoinRelType.SEMI | JoinRelType.ANTI =>
// only return the unique keys from the LHS since a SEMI/ANTI join only
// returns the LHS
mq.areColumnsUnique(rel.getLeft, columns, ignoreNulls)
case _ =>
areColumnsUniqueOfJoin(
rel.analyzeCondition(),
rel.getJoinType,
rel.getLeft.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getLeft, leftSet, ignoreNulls),
(rightSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getRight, rightSet, ignoreNulls),
mq,
columns
)
}
}
def areColumnsUnique(
rel: StreamExecIntervalJoin,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val joinInfo = JoinInfo.of(rel.getLeft, rel.getRight, rel.joinCondition)
areColumnsUniqueOfJoin(
joinInfo,
rel.joinType,
rel.getLeft.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getLeft, leftSet, ignoreNulls),
(rightSet: ImmutableBitSet) => mq.areColumnsUnique(rel.getRight, rightSet, ignoreNulls),
mq,
columns
)
}
def areColumnsUnique(
join: CommonLookupJoin,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
val left = join.getInput
areColumnsUniqueOfJoin(
join.joinInfo, join.joinType, left.getRowType,
(leftSet: ImmutableBitSet) => mq.areColumnsUnique(left, leftSet, ignoreNulls),
// TODO get uniqueKeys from TableSchema of TableSource
(_: ImmutableBitSet) => null,
mq, columns
)
}
def areColumnsUniqueOfJoin(
joinInfo: JoinInfo,
joinRelType: JoinRelType,
leftRowType: RelDataType,
isLeftUnique: ImmutableBitSet => JBoolean,
isRightUnique: ImmutableBitSet => JBoolean,
mq: RelMetadataQuery,
columns: ImmutableBitSet): JBoolean = {
if (columns.cardinality == 0) {
return false
}
// Divide up the input column mask into column masks for the left and
// right sides of the join
val (leftColumns, rightColumns) =
FlinkRelMdUtil.splitColumnsIntoLeftAndRight(leftRowType.getFieldCount, columns)
// If the original column mask contains columns from both the left and
// right hand side, then the columns are unique if and only if they're
// unique for their respective join inputs
val leftUnique = isLeftUnique(leftColumns)
val rightUnique = isRightUnique(rightColumns)
if ((leftColumns.cardinality > 0) && (rightColumns.cardinality > 0)) {
if ((leftUnique == null) || (rightUnique == null)) {
return null
}
else {
return leftUnique && rightUnique
}
}
// If we're only trying to determine uniqueness for columns that
// originate from one join input, then determine if the equijoin
// columns from the other join input are unique. If they are, then
// the columns are unique for the entire join if they're unique for
// the corresponding join input, provided that input is not null
// generating.
if (leftColumns.cardinality > 0) {
if (joinRelType.generatesNullsOnLeft) {
false
} else {
val rightJoinColsUnique = isRightUnique(joinInfo.rightSet)
if ((rightJoinColsUnique == null) || (leftUnique == null)) {
null
} else {
rightJoinColsUnique && leftUnique
}
}
} else if (rightColumns.cardinality > 0) {
if (joinRelType.generatesNullsOnRight) {
false
} else {
val leftJoinColsUnique = isLeftUnique(joinInfo.leftSet)
if ((leftJoinColsUnique == null) || (rightUnique == null)) {
null
} else {
leftJoinColsUnique && rightUnique
}
}
} else {
throw new AssertionError
}
}
def areColumnsUnique(
rel: Correlate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
rel.getJoinType match {
case JoinRelType.ANTI | JoinRelType.SEMI =>
mq.areColumnsUnique(rel.getLeft, columns, ignoreNulls)
case JoinRelType.LEFT | JoinRelType.INNER =>
val left = rel.getLeft
val right = rel.getRight
val leftFieldCount = left.getRowType.getFieldCount
val (leftColumns, rightColumns) =
FlinkRelMdUtil.splitColumnsIntoLeftAndRight(leftFieldCount, columns)
if (leftColumns.cardinality > 0 && rightColumns.cardinality > 0) {
val leftUnique = mq.areColumnsUnique(left, leftColumns, ignoreNulls)
val rightUnique = mq.areColumnsUnique(right, rightColumns, ignoreNulls)
if (leftUnique == null || rightUnique == null) null else leftUnique && rightUnique
} else {
null
}
case _ => throw new TableException(
s"Unknown join type ${rel.getJoinType} for correlate relation $rel")
}
}
def areColumnsUnique(
rel: BatchExecCorrelate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
def areColumnsUnique(
rel: StreamExecCorrelate,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
def areColumnsUnique(
rel: SetOp,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
// If not ALL then the rows are distinct.
// Therefore the set of all columns is a key.
!rel.all && columns.nextClearBit(0) >= rel.getRowType.getFieldCount
}
def areColumnsUnique(
rel: Intersect,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (areColumnsUnique(rel.asInstanceOf[SetOp], mq, columns, ignoreNulls)) {
return true
}
rel.getInputs foreach { input =>
val unique = mq.areColumnsUnique(input, columns, ignoreNulls)
if (unique != null && unique) {
return true
}
}
false
}
def areColumnsUnique(
rel: Minus,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (areColumnsUnique(rel.asInstanceOf[SetOp], mq, columns, ignoreNulls)) {
true
} else {
mq.areColumnsUnique(rel.getInput(0), columns, ignoreNulls)
}
}
/**
* Determines whether a specified set of columns from a RelSubSet relational expression are
* unique.
*
* FIX BUG in <a href="https://issues.apache.org/jira/browse/CALCITE-2134">[CALCITE-2134] </a>
*
* @param subset the RelSubSet relational expression
* @param mq metadata query instance
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
*/
def areColumnsUnique(
subset: RelSubset,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = {
if (!Bug.CALCITE_1048_FIXED) {
val rel = Util.first(subset.getBest, subset.getOriginal)
return mq.areColumnsUnique(rel, columns, ignoreNulls)
}
var nullCount = 0
for (rel <- subset.getRels) {
rel match {
// NOTE: If add estimation uniqueness for new RelNode type e.g. Rank / Expand,
// add the RelNode to pattern matching in RelSubset.
case _: Aggregate | _: Filter | _: Values | _: TableScan | _: Project | _: Correlate |
_: Join | _: Exchange | _: Sort | _: SetOp | _: Calc | _: Converter | _: Window |
_: Expand | _: Rank | _: FlinkRelNode =>
try {
val unique = mq.areColumnsUnique(rel, columns, ignoreNulls)
if (unique != null) {
if (unique) {
return true
}
} else {
nullCount += 1
}
}
catch {
case _: CyclicMetadataException =>
// Ignore this relational expression; there will be non-cyclic ones in this set.
}
case _ => // skip
}
}
if (nullCount == 0) false else null
}
/**
* Catch-all implementation for
* [[BuiltInMetadata.ColumnUniqueness#areColumnsUnique(ImmutableBitSet, boolean)]],
* invoked using reflection, for any relational expression not
* handled by a more specific method.
*
* @param rel Relational expression
* @param mq Metadata query
* @param columns column mask representing the subset of columns for which
* uniqueness will be determined
* @param ignoreNulls if true, ignore null values when determining column uniqueness
* @return whether the columns are unique, or
* null if not enough information is available to make that determination
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#areColumnsUnique(
* RelNode, ImmutableBitSet, boolean)
*/
def areColumnsUnique(
rel: RelNode,
mq: RelMetadataQuery,
columns: ImmutableBitSet,
ignoreNulls: Boolean): JBoolean = null
}
object FlinkRelMdColumnUniqueness {
private val INSTANCE = new FlinkRelMdColumnUniqueness
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
BuiltInMethod.COLUMN_UNIQUENESS.method, INSTANCE)
}
| greghogan/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdColumnUniqueness.scala | Scala | apache-2.0 | 26,154 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.ooj.errors
import ch.usi.inf.l3.sana
import sana.tiny.errors.ErrorCode
import sana.brokenj
trait ErrorCodes extends brokenj.errors.ErrorCodes {
case object ACCESSING_THIS_IN_STATIC extends ErrorCode {
val message: String =
"``this'' cannot be accessed inside a static context"
}
case object ACCESSING_THIS_OUTSIDE_A_CLASS extends ErrorCode {
val message: String =
"``this'' needs to be enclosed by classes, either directly or indirectly"
}
case object ACCESSING_SUPER_OUTSIDE_A_CLASS extends ErrorCode {
val message: String =
"""|``super'' needs to be enclosed by classes,
|either directly or indirectly""".stripMargin
}
case object ACCESSING_SUPER_IN_STATIC extends ErrorCode {
val message: String =
"``super'' cannot be accessed inside a static context"
}
case object ACCESSING_SUPER_IN_OBJECT_CLASS extends ErrorCode {
val message: String =
"``super'' cannot be accessed inside Object class"
}
case object AMBIGUOUS_METHOD_INVOCATION extends ErrorCode {
val message: String = "Method invocation is ambiguous"
}
case object INSTANCE_METHOD_IN_STATIC_CONTEXT_INVOK extends ErrorCode {
val message: String = "Invoking an instance method in a static context"
}
case object INSTANCE_FIELD_IN_STATIC_CONTEXT_INVOK extends ErrorCode {
val message: String = "Accessing an instance field in a static context"
}
case object FIELD_NOT_ACCESSIBLE extends ErrorCode {
val message: String = "Field is not accessible from this context"
}
case object NON_STATIC_FIELD_IN_INTERFACE extends ErrorCode {
val message: String = "Fields in interfaces should be static final"
}
case object NON_FINAL_FIELD_IN_INTERFACE extends ErrorCode {
val message: String = "Fields in interfaces should be static final"
}
case object NON_ABSTRACT_METHOD_IN_INTERFACE extends ErrorCode {
val message: String = "Method is not abstract"
}
case object CONSTRUCTOR_IN_INTERFACE extends ErrorCode {
val message: String = "Interfaces cannot have constructors"
}
case object ABSTRACT_METHOD_CANNOT_HAVE_BODY extends ErrorCode {
val message: String = "Abstract methods must not have body"
}
case object CONSTRUCTOR_CANNOT_BE_ABSTRACT extends ErrorCode {
val message: String = "Constructors cannot be abstract"
}
case object ABSTRACT_METHOD_IN_CONCRETE_CLASS extends ErrorCode {
val message: String = "Abstract methods cannot occur in concrete classes"
}
case object INSTANTIATING_NON_CONCRETE_CLASS extends ErrorCode {
val message: String =
"Abstract classes/interfaces cannot be instantiated"
}
case object BAD_CLASS_MODIFIER extends ErrorCode {
val message: String =
"Modifier not allowed for class"
}
case object NON_IMPLEMENTED_METHODS extends ErrorCode {
val message: String =
"Concrete classes cannot have abstract members"
}
case object IMPLEMENTING_A_CLASS extends ErrorCode {
val message: String =
"A Class cannot extend an interface, it can only implement it"
}
case object EXTENDING_AN_INTERFACE extends ErrorCode {
val message: String =
"Interfaces may not be extended, but implemented"
}
case object CLASS_SHOULD_EXTEND_EXACTlY_ONE_CLASS extends ErrorCode {
val message: String =
"A class should extend exactly one class"
}
case object CONSTRUCTOR_SHOULD_HAVE_THE_SAME_TYPE_AS_CONTAINING_CLASS extends ErrorCode {
val message: String =
"Constructors should have the same type as their containing class"
}
case object FIELD_OWNED_BY_NON_CLASS extends ErrorCode {
val message: String =
"Field can only appear in class bodies"
}
case object LOCAL_VARIABLE_OWNED_BY_NON_LOCAL extends ErrorCode {
val message: String =
"Local variable can only appear in local contexts"
}
case object EXPLICIT_CONSTRUCTOR_INVOKATION_NOT_FIRST_STATEMENT extends
ErrorCode {
val message: String =
"Explicit constructor invokation needs to be the first statement"
}
case object EXPLICIT_CONSTRUCTOR_INVOKATION_IN_METHOD extends
ErrorCode {
val message: String =
"Explicit constructor invokation can only appear in constructors"
}
case object OVERRIDING_FINAL_METHOD extends
ErrorCode {
val message: String =
"Final methods may not be overridden"
}
case object CLASS_ALREADY_DEFINED extends
ErrorCode {
val message: String =
"Class already defined"
}
case object FINAL_PARENT extends
ErrorCode {
val message: String =
"A parent class/interface must not be final"
}
case object ABSTRACT_FINAL extends ErrorCode {
val message: String =
"Abstract classes, abstract methods and interfaces cannot be final"
}
case object PUBLIC_CLASS_FILE_NAME_MATCH_ERROR extends ErrorCode {
val message: String =
"Public classes/interfaces should have the same name as the containing file"
}
case object REFERENCE_FIELD_BEFORE_SUPERTYPE extends ErrorCode {
val message: String =
"Cannot reference a field before supertype constructor has been called"
}
case object FIELD_FORWARD_REFERENCE_IN_STATIC_INIT extends ErrorCode {
val message: String =
"Accessing fields (before declaration) in static initializer"
}
case object ILLEGAL_FORWARD_REFERENCE extends ErrorCode {
val message: String =
"Illegal forward reference"
}
case object FINAL_FIELD_IS_ALREADY_INITIALIZED extends ErrorCode {
val message: String =
"Final field is already initialized"
}
case object FINAL_FIELDS_MIGHT_NOT_BE_INITIALIZED extends ErrorCode {
val message: String =
"The following fields might not have been initialized"
}
case object CYCLIC_CONSTRUCTOR_CALL extends ErrorCode {
val message: String =
"Cyclic constructor call detected"
}
case object VARIABLE_MIGHT_NOT_HAVE_BEEN_INITIALIZED extends ErrorCode {
val message: String =
"Variable might not have been initialized"
}
case object UNREACHABLE_STATEMENT extends ErrorCode {
val message: String =
"The statment is not reachable"
}
}
object ErrorCodes extends ErrorCodes
| amanjpro/languages-a-la-carte | ooj/src/main/scala/errors/errorcodes.scala | Scala | bsd-3-clause | 7,810 |
package edu.stanford.graphics.shapenet.util
import com.jme3.math.ColorRGBA
import com.typesafe.config.{ConfigFactory, Config}
import edu.stanford.graphics.shapenet.Constants
import scala.collection.JavaConversions._
import joptsimple.{OptionException, OptionParser}
import java.io.File
/**
* Config helper
*
* @author Angel Chang
*/
class ConfigHelper(val config: Config) {
def getBoolean(s: String, default: Boolean = false) =
if (config.hasPath(s)) config.getBoolean(s) else default
def getBooleanOption(s: String) =
if (config.hasPath(s)) Option(config.getBoolean(s)) else None
def getInt(s: String, default: Int = 0) =
if (config.hasPath(s)) config.getInt(s) else default
def getIntOption(s: String) =
if (config.hasPath(s)) Option(config.getInt(s)) else None
def getDouble(s: String, default: Double = 0.0) =
if (config.hasPath(s)) config.getDouble(s) else default
def getDoubleOption(s: String) =
if (config.hasPath(s)) Option(config.getDouble(s)) else None
def getFloat(s: String, default: Float = 0.0f) =
if (config.hasPath(s)) config.getDouble(s).toFloat else default
def getFloatOption(s: String) =
if (config.hasPath(s)) Option(config.getDouble(s).toFloat) else None
def getString(s: String, default: String = null) =
if (config.hasPath(s)) config.getString(s) else default
def getStringOption(s: String) =
if (config.hasPath(s)) Option(config.getString(s)) else None
def getStringList(s: String, default: Seq[String] = null) =
if (config.hasPath(s)) config.getStringList(s).map( x => x.toString ) else default
def getColor(s: String, default: ColorRGBA = null) =
if (config.hasPath(s)) {
val colorStr = config.getString(s)
val c = java.awt.Color.decode(colorStr)
new ColorRGBA(c.getRed/255.0f, c.getGreen/255.0f, c.getBlue/255.0f, c.getAlpha/255.0f)
} else {
default
}
}
trait MutableConfigHelper {
case class ConfigOption[T](name: String, gloss: String, getValue: Unit => T, setValue: String => Unit, supportedValues: Seq[String] = null) {
}
protected val mutableOptions = new scala.collection.mutable.HashMap[String, ConfigOption[_]]
def registerMutable[T](name: String, gloss: String, getValue: Unit => T, setValue: String => Unit, supportedValues: Seq[String] = null) = {
val option = ConfigOption[T](name, gloss, getValue, setValue, supportedValues)
mutableOptions.put(name, option)
option
}
def registerMutableBoolean(name: String, gloss: String, getValue: Unit => Boolean, setValue: Boolean => Unit) = {
def set(s: String): Unit = {
val flag = ConfigHelper.parseBoolean(s)
setValue(flag)
}
registerMutable[Boolean](name, gloss, getValue, set, Seq("on", "off"))
}
def getOptionNames = mutableOptions.keySet
def getOptions = mutableOptions.values
def getOption(name: String) = mutableOptions.getOrElse(name, null)
}
class ConfigManager(config: Config) extends ConfigHelper(config) with MutableConfigHelper {
def getConfig(): Config = config
}
trait ConfigHandler {
def init(config: ConfigManager)
def onUpdate(config: ConfigManager) = init(config)
}
object ConfigHelper {
def getBoolean(s: String, default: Boolean = false)(implicit config: Config) =
if (config != null && config.hasPath(s)) config.getBoolean(s) else default
def getBooleanOption(s: String)(implicit config: Config) =
if (config != null && config.hasPath(s)) Option(config.getBoolean(s)) else None
def getInt(s: String, default: Int = 0)(implicit config: Config) =
if (config != null && config.hasPath(s)) config.getInt(s) else default
def getIntOption(s: String)(implicit config: Config) =
if (config != null && config.hasPath(s)) Option(config.getInt(s)) else None
def getDouble(s: String, default: Double = 0.0)(implicit config: Config) =
if (config != null && config.hasPath(s)) config.getDouble(s) else default
def getDoubleOption(s: String)(implicit config: Config) =
if (config != null && config.hasPath(s)) Option(config.getDouble(s)) else None
def getFloat(s: String, default: Float = 0.0f)(implicit config: Config) =
if (config.hasPath(s)) config.getDouble(s).toFloat else default
def getFloatOption(s: String)(implicit config: Config) =
if (config.hasPath(s)) Option(config.getDouble(s).toFloat) else None
def getString(s: String, default: String = null)(implicit config: Config) =
if (config != null && config.hasPath(s)) config.getString(s) else default
def getStringOption(s: String)(implicit config: Config) =
if (config != null && config.hasPath(s)) Option(config.getString(s)) else None
def getStringList(s: String, default: List[String] = null)(implicit config: Config): Seq[String] =
if (config != null && config.hasPath(s)) config.getStringList(s) else default
def fromString(conf: String): Config = {
val config = ConfigFactory.parseString(conf)
config.withFallback(defaultConfig).resolve()
}
def fromMap(map: Map[String,String]): Config = {
// val config = ConfigFactory.parseMap(map)
// config.withFallback(defaultConfig).resolve()
val str = map.map( x => x._1 + " = " + x._2 ).mkString("\\n")
fromString(str)
}
def fromOptions(args: String*): Config = {
// Set up command line options
// create the parser
val optionsParser = new OptionParser()
val confOption = optionsParser.accepts( "conf" ).withRequiredArg()
.withValuesSeparatedBy( ',' ).ofType( classOf[String] ).describedAs( "file" )
// TODO: Should we just use the AppSettings instead of this extra Config?
var config = ConfigFactory.empty()
try {
// parse the command line arguments
val options = optionsParser.parse( args:_* )
if (options.has(confOption)) {
val conffiles = options.valuesOf(confOption)
for (conffile <- conffiles) {
println("Using conf " + conffile)
val c = ConfigFactory.parseFileAnySyntax(new File(conffile))
config = config.withFallback(c)
}
}
config.withFallback(defaultConfig).withFallback(ConfigFactory.systemProperties).resolve()
}
catch {
// oops, something went wrong
case exp: OptionException => {
System.err.println( "Invalid arguments. Reason: " + exp.getMessage() )
optionsParser.printHelpOn( System.out )
sys.exit(-1)
}
}
}
def parseBoolean(str: String): Boolean = {
if (str != null) str.toLowerCase match {
case "true" => true
case "false" => false
case "on" => true
case "off" => false
case "enabled" => true
case "disabled" => false
case "1" => true
case "0" => true
case _ => throw new IllegalArgumentException("Invalid boolean for input string: \\""+str+"\\"")
}
else
throw new IllegalArgumentException("Invalid boolean input string: \\"null\\"")
}
def getSupportedBooleanStrings = Seq("true", "false", "on", "off", "enabled", "disabled", "1", "0")
def getDefaultConfig = defaultConfig
private val defaultConfig = ConfigFactory.parseMap(
Map(
"HOME_DIR" -> Constants.HOME_DIR,
"CODE_DIR" -> Constants.CODE_DIR,
"DATA_DIR" -> Constants.DATA_DIR,
"WORK_DIR" -> Constants.WORK_DIR,
"TEST_DIR" -> Constants.TEST_DIR,
"LOG_DIR" -> Constants.LOG_DIR,
"CACHE_DIR" -> Constants.CACHE_DIR,
"ASSETS_DIR" -> Constants.ASSETS_DIR,
"SHAPENET_VIEWER_DIR" -> Constants.SHAPENET_VIEWER_DIR
)
)
}
| ShapeNet/shapenet-viewer | src/main/scala/edu/stanford/graphics/shapenet/util/ConfigHelper.scala | Scala | mit | 7,495 |
/* Copyright 2014 UniCredit S.p.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package unicredit.hippo.source
import cascading.tuple.Fields
import cascading.tap.SinkMode
import com.twitter.scalding.TemplateSource
case class TemplatedTextSequenceFile(
override val basePath: String,
override val template: String,
override val fields: Fields,
override val pathFields: Fields,
override val sinkMode: SinkMode = SinkMode.REPLACE
) extends TemplateSource with TextSequenceFileScheme | unicredit/hippodb | hbase-sync/src/main/scala/unicredit/hippo/source/TemplatedTextSequenceFile.scala | Scala | apache-2.0 | 1,020 |
/*
"Functional programs contain no assignment statements, so variables, once given a value, never change."
What is this property called?
*/
| agconti/scala-school | 03-intro-fp-scala/slides/slide003.scala | Scala | mit | 143 |
/*
* Copyright 2016 Renaud Bruneliere
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.bruneli.scalaopt.core.function
import com.github.bruneli.scalaopt.core._
import com.github.bruneli.scalaopt.core.variable.UnconstrainedVariable
/**
* Differentiable objective function with gradient passed as input
*
* @param f objective function and its gradient
* @param config configuration parameters
* @author bruneli
*/
case class ObjectiveFunctionWithGradient(
f: (UnconstrainedVariablesType => Double, UnconstrainedVariablesType => UnconstrainedVariablesType),
implicit val config: ConfigPars = new ConfigPars()) extends DifferentiableObjectiveFunction[UnconstrainedVariable] {
def apply(x: UnconstrainedVariablesType) = f._1(x)
override def gradient(x: UnconstrainedVariablesType) = f._2(x)
override def dirder(x: UnconstrainedVariablesType, d: UnconstrainedVariablesType): Double = {
gradient(x) dot d
}
override def dirHessian(
x: UnconstrainedVariablesType,
d: UnconstrainedVariablesType): UnconstrainedVariablesType = {
val gradx = gradient(x)
val gradxd = gradient(x + (d * config.eps))
(gradxd - gradx) / config.eps
}
}
| bruneli/scalaopt | core/src/main/scala/com/github/bruneli/scalaopt/core/function/ObjectiveFunctionWithGradient.scala | Scala | apache-2.0 | 1,707 |
import org.scalatest.{FlatSpec, Matchers}
class MethodTest extends FlatSpec with Matchers {
}
| oliverhaase/sai | src/test/scala/MethodTest.scala | Scala | mit | 96 |
package com.v_standard.vsp.compiler
import java.io.File
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
import scala.io.Source
/**
* ScriptCompiler テストスペッククラス。
*/
class ScriptCompilerSpec extends FunSpec with ShouldMatchers {
describe("compile") {
describe("動的の場合") {
it("コンパイル済みスクリプトが生成される") {
val res = ScriptCompiler.compile(Source.fromString("""<html>
<body>
%{abc}
</body>
</html>"""), TokenParseConfig(null, '%'))
res.cscript should not be (None)
res.text should be (None)
}
}
describe("静的の場合") {
it("テキストが生成される") {
val res = ScriptCompiler.compile(Source.fromString("""<html>
<body>
</body>
</html>"""), TokenParseConfig(null, '%'))
res.cscript should be (None)
res.text.get should be ("""<html>
<body>
</body>
</html>""")
}
}
describe("インクルードがある場合") {
it("インクルードしたファイル一覧が作成される") {
val baseDir = new File("./src/test/resources/templates")
val res = ScriptCompiler.compile(Source.fromString("""<html>
<body>
<%/include_twice.html%>
</body>
</html>"""), TokenParseConfig(baseDir, '%'))
res.includeFiles should be (Set(new File(baseDir, "common.html").getCanonicalFile,
new File(baseDir, "include_twice.html").getCanonicalFile))
}
}
describe("コンパイルエラーの場合") {
it("テキストにエラーが出力される") {
val res = ScriptCompiler.compile(Source.fromString("""<html>
<body>
<% **abc %>
</body>
</html>"""), TokenParseConfig(null, '%'))
res.cscript should be (None)
res.text should not be (None)
}
}
describe("コンパイルエラーでインクルードがある場合") {
it("インクルードしたファイル一覧が作成される") {
val baseDir = new File("./src/test/resources/templates")
val res = ScriptCompiler.compile(Source.fromString("""<html>
<body>
<%/include_twice.html%>
<%
</body>
</html>"""), TokenParseConfig(baseDir, '%'))
res.includeFiles should be (Set(new File(baseDir, "common.html").getCanonicalFile,
new File(baseDir, "include_twice.html").getCanonicalFile))
}
}
}
}
| VanishStandard/vsp | src/test/scala/com/v_standard/vsp/compiler/ScriptCompilerSpec.scala | Scala | bsd-3-clause | 2,269 |
package drt.server.feeds.lcy
import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import drt.server.feeds.common.HttpClient
import drt.shared.FlightsApi.Flights
import org.mockito.Mockito.{times, verify}
import org.specs2.mock.Mockito
import server.feeds.{ArrivalsFeedFailure, ArrivalsFeedResponse, ArrivalsFeedSuccess}
import services.crunch.CrunchTestLike
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class LCYFeedSpec extends CrunchTestLike with Mockito {
val httpClient: HttpClient = mock[HttpClient]
"Given a request for a full refresh of all flights success, match result according to polling count" in {
httpClient.sendRequest(anyObject[HttpRequest])(anyObject[ActorSystem]) returns Future(HttpResponse(entity = HttpEntity(ContentTypes.`text/xml(UTF-8)`, lcySoapResponseSuccessXml)))
val lcyClient = LCYClient(httpClient, "user", "someSoapEndPoint", "someUsername", "somePassword")
val feed: Source[ArrivalsFeedResponse, Cancellable] = LCYFeed(lcyClient, 1 millisecond, 1 millisecond)
val result = Await.result(feed.take(2).runWith(Sink.seq), 1 second)
verify(httpClient, times(2)).sendRequest(anyObject[HttpRequest])(anyObject[ActorSystem])
result.size mustEqual 2
}
"Given a request for a full refresh of all flights success , it keeps polling for update" >> {
val lcyClient = mock[LCYClient]
val success = ArrivalsFeedSuccess(Flights(List()))
lcyClient.initialFlights(anyObject[ActorSystem], anyObject[Materializer]) returns Future(success)
lcyClient.updateFlights(anyObject[ActorSystem], anyObject[Materializer]) returns Future(success)
val feed: Source[ArrivalsFeedResponse, Cancellable] = LCYFeed(lcyClient, 1 millisecond, 1 millisecond)
val result = Await.result(feed.take(4).runWith(Sink.seq), 1 second)
verify(lcyClient, times(1)).initialFlights(anyObject[ActorSystem], anyObject[Materializer])
verify(lcyClient, times(3)).updateFlights(anyObject[ActorSystem], anyObject[Materializer])
result.toList.size mustEqual 4
}
"Given a request for a full refresh of all flights fails , it keeps polling for initials" >> {
val lcyClient = mock[LCYClient]
val success = ArrivalsFeedSuccess(Flights(List()))
val failure = ArrivalsFeedFailure("Failure")
lcyClient.initialFlights(anyObject[ActorSystem], anyObject[Materializer]) returns Future(failure)
lcyClient.updateFlights(anyObject[ActorSystem], anyObject[Materializer]) returns Future(success)
val feed: Source[ArrivalsFeedResponse, Cancellable] = LCYFeed(lcyClient, 1 millisecond, 1 millisecond)
val result = Await.result(feed.take(4).runWith(Sink.seq), 1 second)
verify(lcyClient, times(4)).initialFlights(anyObject[ActorSystem], anyObject[Materializer])
verify(lcyClient, times(0)).updateFlights(anyObject[ActorSystem], anyObject[Materializer])
result.toList.size mustEqual 4
}
val fullRefresh =
"""<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:ns="http://www.airport2020.com/RequestAIDX/">
| <soapenv:Header/>
| <soapenv:Body>
| <ns:userID>user</ns:userID>
| <ns:fullRefresh>1</ns:fullRefresh>
| </soapenv:Body>
| </soapenv:Envelope>
""".stripMargin
val lcySoapResponseSuccessXml: String =
"""<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
| <s:Body xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
| <IATA_AIDX_FlightLegRS TimeStamp="2020-07-03T10:59:35.1977952+01:00" Version="13.2" xmlns="http://www.iata.org/IATA/2007/00">
| <Success/>
| </IATA_AIDX_FlightLegRS>
| </s:Body>
|</s:Envelope>
""".stripMargin
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/drt/server/feeds/lcy/LCYFeedSpec.scala | Scala | apache-2.0 | 3,951 |
package sample.stream.akka_http
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Sink, Source }
object ConnectionLevelExample {
def main(args: Array[String]) {
implicit val actorSystem = ActorSystem("akka-connection-level")
implicit val materialiser = ActorMaterializer()
implicit val executionContextExecutor = actorSystem.dispatcher
val outgoingConnection = Http().outgoingConnection("akka.io")
val eventualResponse = Source.single(HttpRequest(uri = "/"))
.via(outgoingConnection)
.runWith(Sink.head)
eventualResponse.map({ x => println(x); actorSystem.shutdown(); 1 })
}
}
| pallavig/akka-examples | src/main/scala/sample/stream/akka_http/ConnectionLevelExample.scala | Scala | cc0-1.0 | 746 |
package systems.adaptix.bling.tags.graph
import org.specs2.mutable.Specification
import scala.collection.mutable
/**
* Created by nkashyap on 5/17/15.
*/
class DagVertexSpecification extends Specification {
"A DagVertex is instantiated with a string which serves as its label." >> {
val vertex = new DagVertex("test")
vertex.label mustEqual "test"
}
"DagVertex instances are identified by reference." >> {
val v1 = new DagVertex("lol")
val v2 = new DagVertex("lol")
val v3 = new DagVertex("rofl")
v1 mustNotEqual v2
v1 mustNotEqual v3
v2 mustNotEqual v3
}
"A DagVertex instance has a \\"children\\" variable, which is of type mutable.Set, and is set to an empty set of DagVertices at instantiation." >> {
val vertex = new DagVertex("test")
vertex.children mustEqual mutable.Set[DagVertex]()
}
"Children may be added individually to a DagVertex using its \\"addChild\\" method." >> {
val parent = new DagVertex("parent")
val child = new DagVertex("child")
parent.addChild(child)
parent.children must haveSize(1)
parent.children must contain(child)
}
"A set of vertices may be passed to the \\"addChildren\\" method to perform a batch addition." >> {
val parent = new DagVertex("parent")
val child1 = new DagVertex("child1")
val child2 = new DagVertex("child2")
parent addChildren Set(child1, child2)
parent.children must haveSize(2)
parent.children must contain(child1)
parent.children must contain(child2)
}
"The \\"hasChild\\" method of a DagVertex tests if a given DagVertex is one of its children." >> {
val parent = new DagVertex("parent")
val child = new DagVertex("child")
parent addChild child
parent hasChild child must beTrue
child hasChild parent must beFalse
}
"The \\"hasChildren\\" method of a DagVertex tests if all the vertices in a given set are its children." >> {
val parent = new DagVertex("")
val child1 = new DagVertex("")
val child2 = new DagVertex("")
val nonchild = new DagVertex("")
parent addChildren Set(child1, child2)
parent.hasChildren(Set(child1, child2)) must beTrue
parent.hasChildren(Set(nonchild)) must beFalse
parent.hasChildren(Set(child1, nonchild)) must beFalse
}
"The \\"removeChild\\" method of a DagVertex removes a given vertex from its list of children if it was a child in the first place. Else it does nothing." >> {
val parent = new DagVertex("parent")
val child = new DagVertex("child")
val nonchild = new DagVertex("nonchild")
parent addChild child
parent hasChild child must beTrue
parent hasChild nonchild must beFalse
parent removeChild child
parent hasChild child must beFalse
parent removeChild nonchild
parent hasChild nonchild must beFalse
}
"The \\"removeChildren\\" method performs a batch deletion of children present in a given set of vertices." >> {
val parent = new DagVertex("")
val child1 = new DagVertex("")
val child2 = new DagVertex("")
val nonchild = new DagVertex("")
parent addChildren Set(child1, child2)
parent removeChildren Set(child1, nonchild)
parent.children must haveSize(1)
parent.children must contain(child2)
}
"A DagVertex also has the member variables \\"onStack\\", \\"index\\" and \\"backLink\\", of types Boolean, Option[Int], and Option[Int] respectively, which are used to validate acyclicity. They are set to None at instantiation." >> {
val test = new DagVertex("test")
test.onStack must beFalse
test.index must beNone
test.lowLink must beNone
}
"A DagVertex can be instantiated by calling the DagVertex companion object." >> {
val vertex = DagVertex("test")
vertex.label mustEqual "test"
}
"The DagVertex companion object also allows for the instantiation of a DagVertex with a given set of children." >> {
val child1 = DagVertex("child1")
val child2 = DagVertex("child2")
val parent = DagVertex("parent", Set(child1, child2))
parent.label mustEqual "parent"
parent hasChild child1 must beTrue
parent hasChild child2 must beTrue
parent.children.size mustEqual 2
}
}
| nkashy1/bling | src/test/scala/systems/adaptix/bling/tags/graph/DagVertexSpecification.scala | Scala | mit | 4,167 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount
import slamdata.Predef._
import quasar.effect._
import quasar.fs.BackendEffect
import hierarchical.MountedResultH
import eu.timepit.refined.auto._
import monocle.function.Field1
import scalaz._, Scalaz._
/** Handles mount requests, validating them and updating a hierarchical
* `FileSystem` interpreter as mounts are added and removed.
*
* @tparam F the base effect that `FileSystem` operations are translated into
* @tparam S the composite effect, supporting the base and hierarchical effects
*/
final class MountRequestHandler[F[_], S[_]](
fsDef: BackendDef[F]
)(implicit
S0: F :<: S,
S1: MountedResultH :<: S,
S2: MonotonicSeq :<: S
) {
import MountRequest._
type HierarchicalFsRef[A] = AtomicRef[BackendEffect ~> Free[S, ?], A]
object HierarchicalFsRef {
def Ops[G[_]](implicit G: HierarchicalFsRef :<: G) =
AtomicRef.Ops[BackendEffect ~> Free[S, ?], G]
}
def mount[T[_]](
req: MountRequest
)(implicit
T0: F :<: T,
T1: fsm.MountedFsRef :<: T,
T2: HierarchicalFsRef :<: T,
F: Monad[F]
): Free[T, MountingError \\/ Unit] = {
val handleMount: MntErrT[Free[T, ?], Unit] =
EitherT(req match {
case MountFileSystem(d, typ, uri) => fsm.mount[T](d, typ, uri)
// Previously we would validate at this point that a view's Sql could be compiled
// to `LogicalPlan` but now that views can contain Imports, that's no longer easy or very
// valuable. Validation can once again be performed once `LogicalPlan` has a representation
// for functions and imports
// See https://github.com/quasar-analytics/quasar/issues/2398
case _ => ().right.point[Free[T, ?]]
})
(handleMount *> updateHierarchy[T].liftM[MntErrT]).run
}
def unmount[T[_]](
req: MountRequest
)(implicit
T0: F :<: T,
T1: fsm.MountedFsRef :<: T,
T2: HierarchicalFsRef :<: T
): Free[T, Unit] =
fsDir.getOption(req).traverse_(fsm.unmount[T]) *> updateHierarchy[T]
////
private val fsm = FileSystemMountHandler[F](fsDef)
private val fsDir = mountFileSystem composeLens Field1.first
/** Builds the hierarchical interpreter from the currently mounted filesystems,
* storing the result in `HierarchicalFsRef`.
*
* TODO: Effects should be `Read[MountedFs, ?]` and `Write[HierarchicalFs, ?]`
* to be more precise.
*
* This involves, roughly
* 1. Get the current mounted filesystems from `MountedFsRef`.
*
* 2. Build a hierarchical filesystem interpreter using the mounts from (1).
*
* 3. Lift the result of (2) into the output effect, `S[_]`.
*
* 4. Store the result of (3) in `HierarchicalFsRef`.
*/
private def updateHierarchy[T[_]](
implicit
T0: F :<: T,
T1: fsm.MountedFsRef :<: T,
T2: HierarchicalFsRef :<: T
): Free[T, Unit] =
for {
mnted <- fsm.MountedFsRef.Ops[T].get ∘
(mnts => hierarchical.backendEffect[F, S](mnts.map(_.run)))
_ <- HierarchicalFsRef.Ops[T].set(mnted)
} yield ()
}
object MountRequestHandler {
def apply[F[_], S[_]](
fsDef: BackendDef[F]
)(implicit
S0: F :<: S,
S1: MountedResultH :<: S,
S2: MonotonicSeq :<: S
): MountRequestHandler[F, S] =
new MountRequestHandler[F, S](fsDef)
}
| jedesah/Quasar | core/src/main/scala/quasar/fs/mount/MountRequestHandler.scala | Scala | apache-2.0 | 3,923 |
package com.twitter.finagle.ssl.client
import com.twitter.finagle.Address
import com.twitter.finagle.ssl.{Engine, SslConfigurations}
import javax.net.ssl.SSLContext
/**
* This class provides an ability to use an initialized supplied
* `javax.net.ssl.SSLContext` as the basis for creating [[Engine Engines]].
*/
final class SslContextClientEngineFactory(sslContext: SSLContext) extends SslClientEngineFactory {
/**
* Creates a new [[Engine]] based on an [[Address]] and an [[SslClientConfiguration]]
* using the supplied `javax.net.ssl.SSLContext`.
*
* @param address A physical address which potentially includes metadata.
*
* @param config A collection of parameters which the engine factory should
* consider when creating the TLS client [[Engine]].
*
* @note [[KeyCredentials]] other than Unspecified are not supported.
* @note [[TrustCredentials]] other than Unspecified are not supported.
* @note [[ApplicationProtocols]] other than Unspecified are not supported.
*/
def apply(address: Address, config: SslClientConfiguration): Engine = {
SslConfigurations.checkKeyCredentialsNotSupported(
"SslContextClientEngineFactory",
config.keyCredentials
)
SslConfigurations.checkTrustCredentialsNotSupported(
"SslContextClientEngineFactory",
config.trustCredentials
)
SslConfigurations.checkApplicationProtocolsNotSupported(
"SslContextClientEngineFactory",
config.applicationProtocols
)
val engine = SslClientEngineFactory.createEngine(sslContext, address, config)
SslClientEngineFactory.configureEngine(engine, config)
engine
}
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/client/SslContextClientEngineFactory.scala | Scala | apache-2.0 | 1,649 |
package code.model
import _root_.net.liftweb.mapper._
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
import _root_.net.liftweb.sitemap.Loc._
import _root_.net.liftweb.http._
import _root_.scala.xml.transform._
import _root_.net.liftweb.util.Helpers._
object Comment extends Comment with LongKeyedMetaMapper[Comment] {
override def dbTableName = "Comments" // define the DB table name
}
/**
* An O-R mapped "Comment" class
*/
class Comment extends LongKeyedMapper[Comment] {
def getSingleton = Comment // what's the "meta" server
// comment id
def primaryKeyField = id
object id extends MappedLongIndex(this)
// user that published the comment
object user extends LongMappedMapper(this, User)
// activity that's commented on
object activity extends LongMappedMapper(this, Activity)
// when the activity was created
object time extends MappedDateTime(this)
// message
object text extends MappedText(this)
}
| Cerovec/LiftSocial | src/main/scala/code/model/Comment.scala | Scala | apache-2.0 | 964 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.validation
import org.apache.flink.api.common.typeinfo.BasicTypeInfo.{DOUBLE_TYPE_INFO, INT_TYPE_INFO, STRING_TYPE_INFO}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.{GenericTypeInfo, RowTypeInfo, TupleTypeInfo, TypeExtractor}
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{CClass, PojoClass, TableEnvironment, TableException}
import org.apache.flink.table.expressions.{Alias, UnresolvedFieldReference}
import org.apache.flink.table.runtime.types.CRowTypeInfo
import org.apache.flink.types.Row
import org.junit.Assert.assertTrue
import org.junit._
class TableEnvironmentValidationTest {
private val env = ExecutionEnvironment.getExecutionEnvironment
private val tEnv = TableEnvironment.getTableEnvironment(env)
val tupleType = new TupleTypeInfo(
INT_TYPE_INFO,
STRING_TYPE_INFO,
DOUBLE_TYPE_INFO)
val rowType = new RowTypeInfo(INT_TYPE_INFO, STRING_TYPE_INFO,DOUBLE_TYPE_INFO)
val cRowType = new CRowTypeInfo(rowType)
val caseClassType: TypeInformation[CClass] = implicitly[TypeInformation[CClass]]
val pojoType: TypeInformation[PojoClass] = TypeExtractor.createTypeInfo(classOf[PojoClass])
val atomicType = INT_TYPE_INFO
val genericRowType = new GenericTypeInfo[Row](classOf[Row])
@Test(expected = classOf[TableException])
def testGetFieldInfoPojoNames1(): Unit = {
tEnv.getFieldInfo(
pojoType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2"),
UnresolvedFieldReference("name3")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoAtomicName2(): Unit = {
tEnv.getFieldInfo(
atomicType,
Array(
UnresolvedFieldReference("name1"),
UnresolvedFieldReference("name2")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoTupleAlias3(): Unit = {
tEnv.getFieldInfo(
tupleType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias(UnresolvedFieldReference("zzz"), "name3")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoCClassAlias3(): Unit = {
tEnv.getFieldInfo(
caseClassType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias(UnresolvedFieldReference("zzz"), "name3")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoPojoAlias3(): Unit = {
tEnv.getFieldInfo(
pojoType,
Array(
Alias(UnresolvedFieldReference("xxx"), "name1"),
Alias(UnresolvedFieldReference("yyy"), "name2"),
Alias(UnresolvedFieldReference("zzz"), "name3")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoAtomicAlias(): Unit = {
tEnv.getFieldInfo(
atomicType,
Array(
Alias(UnresolvedFieldReference("name1"), "name2")
))
}
@Test(expected = classOf[TableException])
def testGetFieldInfoGenericRowAlias(): Unit = {
tEnv.getFieldInfo(
genericRowType,
Array(UnresolvedFieldReference("first")))
}
@Test(expected = classOf[TableException])
def testRegisterExistingDataSet(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val ds1 = CollectionDataSets.get3TupleDataSet(env)
tEnv.registerDataSet("MyTable", ds1)
val ds2 = CollectionDataSets.get5TupleDataSet(env)
// Must fail. Name is already in use.
tEnv.registerDataSet("MyTable", ds2)
}
@Test(expected = classOf[TableException])
def testScanUnregisteredTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
// Must fail. No table registered under that name.
tEnv.scan("someTable")
}
@Test(expected = classOf[TableException])
def testRegisterExistingTable(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
val t1 = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv)
tEnv.registerTable("MyTable", t1)
val t2 = CollectionDataSets.get5TupleDataSet(env).toTable(tEnv)
// Must fail. Name is already in use.
tEnv.registerDataSet("MyTable", t2)
}
@Test(expected = classOf[TableException])
def testRegisterTableFromOtherEnv(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv1 = TableEnvironment.getTableEnvironment(env)
val tEnv2 = TableEnvironment.getTableEnvironment(env)
val t1 = CollectionDataSets.get3TupleDataSet(env).toTable(tEnv1)
// Must fail. Table is bound to different TableEnvironment.
tEnv2.registerTable("MyTable", t1)
}
@Test(expected = classOf[TableException])
def testToTableWithToManyFields(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
CollectionDataSets.get3TupleDataSet(env)
// Must fail. Number of fields does not match.
.toTable(tEnv, 'a, 'b, 'c, 'd)
}
@Test(expected = classOf[TableException])
def testToTableWithAmbiguousFields(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
CollectionDataSets.get3TupleDataSet(env)
// Must fail. Field names not unique.
.toTable(tEnv, 'a, 'b, 'b)
}
@Test(expected = classOf[TableException])
def testToTableWithNonFieldReference1(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
// Must fail. as() can only have field references
CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a + 1, 'b, 'c)
}
@Test(expected = classOf[TableException])
def testToTableWithNonFieldReference2(): Unit = {
val env = ExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
// Must fail. as() can only have field references
CollectionDataSets.get3TupleDataSet(env)
.toTable(tEnv, 'a as 'foo, 'b, 'c)
}
@Test(expected = classOf[TableException])
def testGenericRow() {
val env = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
// use null value the enforce GenericType
val dataSet = env.fromElements(Row.of(null))
assertTrue(dataSet.getType().isInstanceOf[GenericTypeInfo[_]])
assertTrue(dataSet.getType().getTypeClass == classOf[Row])
// Must fail. Cannot import DataSet<Row> with GenericTypeInfo.
tableEnv.fromDataSet(dataSet)
}
@Test(expected = classOf[TableException])
def testGenericRowWithAlias() {
val env = ExecutionEnvironment.getExecutionEnvironment
val tableEnv = TableEnvironment.getTableEnvironment(env)
// use null value the enforce GenericType
val dataSet = env.fromElements(Row.of(null))
assertTrue(dataSet.getType().isInstanceOf[GenericTypeInfo[_]])
assertTrue(dataSet.getType().getTypeClass == classOf[Row])
// Must fail. Cannot import DataSet<Row> with GenericTypeInfo.
tableEnv.fromDataSet(dataSet, "nullField")
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/validation/TableEnvironmentValidationTest.scala | Scala | apache-2.0 | 8,277 |
package models
import java.io.ByteArrayInputStream
import com.amazonaws.services.s3.model.ObjectMetadata
import java.io.InputStream
import scala.io.Source
import play.api.Logger
import com.amazonaws.auth.BasicAWSCredentials
import com.amazonaws.services.s3.AmazonS3Client
import org.slf4j.LoggerFactory
import com.amazonaws.ClientConfiguration
object S3Communicator {
val logger = LoggerFactory.getLogger(this.getClass().getName())
val credentials = new BasicAWSCredentials(sys.env("aws_s3_accesskey"), sys.env("aws_s3_secretkey"))
val amazonS3Client = new AmazonS3Client(credentials)
val client = new ClientConfiguration()
client.setSocketTimeout(300000)
def test() = {
println("S3Communicator.test")
var input = new ByteArrayInputStream("Hello World!".getBytes());
upload("helloWorld", input)
var stream = getObject("helloWorld")
for ( line <- Source.fromInputStream(stream).getLines() ) {
println("line " + line )
}
}
/**
* Upload a file to standard bucket on S3
*/
def upload(filename: String, stream: ByteArrayInputStream): Boolean = {
try {
println("Up load object")
amazonS3Client.putObject("imdraces", filename, stream, new ObjectMetadata ); true
} catch {
case ex: Exception => logger.error(ex.getMessage(), ex); false
}
}
/**
* Deletes a file to standard bucket on S3
*/
def delete(fileKeyName: String): Boolean = {
try {
amazonS3Client.deleteObject("imdraces", fileKeyName); true
} catch {
case ex: Exception => logger.error(ex.getMessage(), ex); false
}
}
/**
* Checks if the file exists on the standard bucket of S3
*/
def doesFileExist(fileKeyName: String): Boolean = {
try {
amazonS3Client.getObjectMetadata("imdraces", fileKeyName); true
} catch {
case ex: Exception => logger.error(ex.getMessage(), ex); false
}
}
def getObject(fileKeyName: String) : InputStream = {
try {
amazonS3Client.getObject("imdraces", fileKeyName).getObjectContent
} catch {
case ex: Exception => logger.error(ex.getMessage(), ex); null
}
}
}
| saine1a/IMDRaces | app/models/S3Communicator.scala | Scala | mit | 2,147 |
package iot.pood.management.actor
import iot.pood.base.model.security.SecurityMessages.JwtToken
import iot.pood.management.actors.AuthenticatorService
import iot.pood.management.actors.AuthenticatorService._
import iot.pood.management.security.internal.JwtTokenService
import akka.pattern.ask
import com.typesafe.config.ConfigFactory
import iot.pood.base.model.user.UserMessages.SimpleUser
import iot.pood.management.security.SecurityConfig
import scala.concurrent.Await
import scala.concurrent.duration.DurationDouble
/**
* Created by rafik on 12.10.2017.
*/
class AuthenticatorServiceTest extends SecurityTests {
"Authenticator service actor " must {
"with credential (user,user) return success login response" in {
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(securityConfig)), AuthenticatorService.NAME)
authenticatorService ! LoginRequest("user", "user")
expectMsgPF() {
case LoginSuccessResponse(x: JwtToken) => {
x should not be (Nil)
x.expiration should ===(2)
x.refreshToken should not be (Nil)
x.authToken should not be (Nil)
}
}
}
"with credetial (user,incorrect) return error login response" in {
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(securityConfig)))
authenticatorService ! LoginRequest("user", "incorrect")
expectMsg(LoginErrorResponse)
}
"login success with credential (admin,user)" in {
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(securityConfig)))
val futureResult = authenticatorService ? LoginRequest("admin", "user")
val result = Await.result(futureResult, 1 seconds).asInstanceOf[LoginSuccessResponse]
result.token match {
case token: JwtToken => {
val authFuture = authenticatorService ? AuthenticationRequest(token.authToken)
val authResult = Await.result(authFuture, 1 seconds).asInstanceOf[AuthenticationSuccess]
authResult.user should matchPattern {
case SimpleUser("admin") =>
}
}
}
}
"login success with credential (user1,user) but get Unauthorized message" in {
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(securityConfig)))
val futureResult = authenticatorService ? LoginRequest("user1", "user")
val result = Await.result(futureResult, 1 seconds).asInstanceOf[LoginSuccessResponse]
result.token match {
case token: JwtToken => {
authenticatorService ! AuthenticationRequest(token.authToken)
expectMsg(UnauthorizedResponse)
}
}
}
"login success with credential (admin,user) but after that access with expired token" in {
val expiredConfig = ConfigFactory.parseString(
"""
|security {
| expiration = 2 seconds
| secret_key = "thisjusasodifsodifj"
| header = "HS256"
|}
""".stripMargin)
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(SecurityConfig.securityConfig(expiredConfig))))
authenticatorService ! LoginRequest("admin", "user")
expectMsgPF() {
case LoginSuccessResponse(token: JwtToken) => {
Thread.sleep(3000)
authenticatorService ! AuthenticationRequest(token.authToken)
expectMsg(TokenExpiredResponse)
}
}
}
"invalid token authentication" in {
val authenticatorService = system.actorOf(AuthenticatorService.props(JwtTokenService(securityConfig)))
authenticatorService ! AuthenticationRequest("123")
expectMsg(InvalidTokenResponse)
}
}
}
| rafajpet/iot-pood | iot-pood-management/src/test/scala/iot/pood/management/actor/AuthenticatorServiceTest.scala | Scala | mit | 3,772 |
package caustic.benchmark.runtime
import caustic.runtime._
import scala.util.Random
/**
* Benchmarks the throughput of the runtime under varying workloads. Adjusting the size of the key
* space and the relative proportion of keys that are read and written in each transaction
* implicitly changes the contention probability, the likelihood that any two concurrent
* transactions conflict.
*/
object ThroughputBenchmark extends App {
val keys = 1000 // Total number of keys.
val reads = 0.01 // Percentage of keys read in each program.
val writes = 0.00 // Percentage of keys written in each program.
val attempts = 10000 // Number of attempts per thread.
val runtime = Runtime(Volume.Memory()) // In-memory runtime.
(1 to 8) foreach { threads =>
println {
// Construct the specified number of threads and concurrent generate and execute programs.
val program = Seq.fill(threads)(Seq.fill(attempts)(gen))
val current = System.nanoTime()
val success = program.par.map(_.map(runtime.execute).count(_.isSuccess)).sum
val elapsed = System.nanoTime() - current
1E9 * success / elapsed
}
}
/**
* Generates a randomized transaction that performs the specified number of reads and writes on
* a key space of the specified size.
*
* @return Randomly generated transaction.
*/
def gen: Program =
(random(keys, reads).map(read) ++ random(keys, writes).map(write(_, real(1)))).reduce(cons)
/**
* Returns a sequence of l integers drawn uniformly at random from [0, n).
*
* @param n Population size.
* @param l Sample size.
* @return Uniformly random integers.
*/
def random(n: Int, l: Double): Seq[Program] =
Random.shuffle(Seq.range(0, n)).take((n * l).toInt).map(x => text(x.toString))
}
| ashwin153/caustic | caustic-benchmark/src/main/scala/caustic/benchmark/runtime/ThroughputBenchmark.scala | Scala | apache-2.0 | 1,885 |
package org.jetbrains.plugins.dotty.lang.parser.parsing.params
/**
* @author adkozlov
*/
object ParamClause extends org.jetbrains.plugins.scala.lang.parser.parsing.params.ParamClause {
override protected val params = Params
}
| katejim/intellij-scala | src/org/jetbrains/plugins/dotty/lang/parser/parsing/params/ParamClause.scala | Scala | apache-2.0 | 233 |
package object regex {
val validChars = (' ' to '~').toSet
}
| dkesler/regex-crossword-solver | src/main/scala/regex/package.scala | Scala | mit | 63 |
package at.logic.gapt.prooftool
/**
* Created by IntelliJ IDEA.
* User: mrukhaia
* Date: 2/3/11
* Time: 4:25 PM
*/
import at.logic.gapt.language.hol.toPrettyString
import at.logic.gapt.proofs.lk.base.{ FSequent, Sequent }
import at.logic.gapt.expr._
import at.logic.gapt.proofs.occurrences.{ FormulaOccurrence, defaultFormulaOccurrenceFactory }
import at.logic.gapt.proofs.algorithms.ceres.struct.ClauseSetSymbol
import at.logic.gapt.proofs.algorithms.ceres.PStructToExpressionTree.ProjectionSetSymbol
import org.scilab.forge.jlatexmath.{ TeXIcon, TeXConstants, TeXFormula }
import java.awt.{ Color, Font }
import java.awt.image.BufferedImage
import swing._
import event.{ MouseClicked, MouseEntered, MouseExited, WindowDeactivated }
import java.awt.event.MouseEvent
import at.logic.gapt.language.schema._
import at.logic.gapt.utils.latex.nameToLatexString
import collection.mutable
import at.logic.gapt.expr.Tindex
object DrawSequent {
implicit val factory = defaultFormulaOccurrenceFactory
implicit def fo2occ( f: HOLFormula ) = factory.createFormulaOccurrence( f, Seq[FormulaOccurrence]() )
implicit def fseq2seq( s: FSequent ) = Sequent( s._1 map fo2occ, s._2 map fo2occ )
//used by DrawClList
def apply( seq: Sequent, ft: Font, str: String ): FlowPanel = if ( !str.isEmpty ) {
val set: Set[FormulaOccurrence] = ( seq.antecedent.filter( fo => formulaToLatexString( fo.formula ).contains( str ) ) ++
seq.succedent.filter( fo => formulaToLatexString( fo.formula ).contains( str ) ) ).toSet
val fp = apply( seq, ft, None ) // first create FlowPanel to pass the event
ProofToolPublisher.publish( ChangeFormulaColor( set, Color.green, reset = false ) )
fp
} else apply( seq, ft, None )
//used by DrawClList to draw FSequents
def applyF( seq: FSequent, ft: Font, str: String ): FlowPanel = apply( fseq2seq( seq ), ft, str )
//used by DrawProof
def apply( seq: Sequent, ft: Font, vis_occ: Option[Set[FormulaOccurrence]] ) = new FlowPanel {
opaque = false // Necessary to draw the proof properly
hGap = 0 // no gap between components
listenTo( ProofToolPublisher )
reactions += {
// since panel is not opaque, it cannot have a background color,
case ChangeSequentColor( s, color, reset ) => // so change background of each component.
if ( s == seq ) contents.foreach( c => c.background = color )
else if ( reset ) contents.foreach( c => c.background = Color.white )
}
private var first = true
for ( f <- seq.antecedent ) {
if ( vis_occ == None || vis_occ.get.contains( f ) ) {
if ( !first ) contents += LatexLabel( ft, ",", null )
else first = false
contents += formulaToLabel( f, ft )
}
}
contents += LatexLabel( ft, "\\\\vdash", null ) // \\u22a2
first = true
for ( f <- seq.succedent ) {
if ( vis_occ == None || vis_occ.get.contains( f ) ) {
if ( !first ) contents += LatexLabel( ft, ",", null )
else first = false
contents += formulaToLabel( f, ft )
}
}
}
def formulaToLabel( f: HOLFormula, ft: Font ): LatexLabel = LatexLabel( ft, formulaToLatexString( f ), fo2occ( f ) )
def formulaToLabel( fo: FormulaOccurrence, ft: Font ): LatexLabel = LatexLabel( ft, formulaToLatexString( fo.formula ), fo )
// this method is used by DrawTree when drawing projections.
// also by ProofToLatexExporter.
def sequentToLatexString( seq: Sequent ): String = {
var s = " "
var first = true
for ( f <- seq.antecedent ) {
if ( !first ) s = s + ", "
else first = false
s = s + formulaToLatexString( f.formula )
}
s = s + " \\\\vdash " // \\u22a2
first = true
for ( f <- seq.succedent ) {
if ( !first ) s = s + ", "
else first = false
s = s + formulaToLatexString( f.formula )
}
s
}
def formulaToLatexString( t: LambdaExpression, outermost: Boolean = true ): String = t match {
case Neg( f ) => """\\neg """ + formulaToLatexString( f, outermost = false )
case And( f1, f2 ) =>
if ( outermost )
formulaToLatexString( f1, outermost = false ) + """ \\wedge """ + formulaToLatexString( f2, outermost = false )
else
"(" + formulaToLatexString( f1, outermost = false ) + """ \\wedge """ + formulaToLatexString( f2, outermost = false ) + ")"
case Or( f1, f2 ) =>
if ( outermost )
formulaToLatexString( f1, outermost = false ) + """ \\vee """ + formulaToLatexString( f2, outermost = false )
else
"(" + formulaToLatexString( f1, outermost = false ) + """ \\vee """ + formulaToLatexString( f2, outermost = false ) + ")"
case Imp( f1, f2 ) =>
if ( outermost )
formulaToLatexString( f1, outermost = false ) + """ \\supset """ + formulaToLatexString( f2, outermost = false )
else
"(" + formulaToLatexString( f1, outermost = false ) + """ \\supset """ + formulaToLatexString( f2, outermost = false ) + ")"
case Ex( v, f ) =>
if ( v.exptype == Tindex -> Tindex )
"(" + """\\exists^{hyp} """ + formulaToLatexString( v, outermost = false ) + """)""" + formulaToLatexString( f, outermost = false )
else
"(" + """\\exists """ + formulaToLatexString( v, outermost = false ) + """)""" + formulaToLatexString( f, outermost = false )
case All( v, f ) =>
if ( v.exptype == Tindex -> Tindex )
"(" + """\\forall^{hyp} """ + formulaToLatexString( v, outermost = false ) + """)""" + formulaToLatexString( f, outermost = false )
else
"(" + """\\forall """ + formulaToLatexString( v, outermost = false ) + """)""" + formulaToLatexString( f, outermost = false )
case BigAnd( v, formula, init, end ) =>
""" \\bigwedge_{ """ + formulaToLatexString( v, outermost = false ) + "=" + formulaToLatexString( init ) + "}^{" + formulaToLatexString( end, outermost = false ) + "}" + formulaToLatexString( formula, outermost = false )
case BigOr( v, formula, init, end ) =>
""" \\bigvee_{ """ + formulaToLatexString( v, outermost = false ) + "=" + formulaToLatexString( init, outermost = false ) + "}^{" + formulaToLatexString( end, outermost = false ) + "}" + formulaToLatexString( formula )
case IndexedPredicate( constant, indices ) if constant != BiggerThanC =>
{
if ( constant.sym.isInstanceOf[ClauseSetSymbol] ) { //parse cl variables to display cut-configuration.
val cl = constant.name.asInstanceOf[ClauseSetSymbol]
"cl^{" + cl.name + ",(" + cl.cut_occs._1.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f, outermost = false ) ) + " | " +
cl.cut_occs._2.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f, outermost = false ) ) + ")}"
} else if ( constant.sym.isInstanceOf[ProjectionSetSymbol] ) { //parse pr variables to display cut-configuration.
val pr = constant.name.asInstanceOf[ProjectionSetSymbol]
"pr^{" + pr.name + ",(" + pr.cut_occs._1.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f, outermost = false ) ) + " | " +
pr.cut_occs._2.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f, outermost = false ) ) + ")}"
} //or return the predicate symbol
else nameToLatexString( constant.name.toString )
} + { if ( indices.isEmpty ) "" else indices.map( x => formulaToLatexString( x ) ).mkString( "_{", ",", "}" ) }
case HOLAtom( pred, args ) =>
val name = pred match {
case Const( n, _ ) => n
case Var( n, _ ) => n
case _ => throw new Exception( "An atom can only contain a const or a var on the outermost level!" )
}
if ( args.size == 2 && name.toString.matches( """(=|!=|\\\\neq|<|>|\\\\leq|\\\\geq|\\\\in|\\+|-|\\*|/)""" ) ) { //!name.toString.matches("""[\\w\\p{InGreek}]*""")) {
//formats infix formulas
if ( outermost ) {
//if the whole formula is an infix atom, we can skip parenthesis
formulaToLatexString( args.head, outermost = false ) + " " + nameToLatexString( name.toString ) + " " + formulaToLatexString( args.last, outermost = false )
} else {
"(" + formulaToLatexString( args.head, outermost = false ) + " " + nameToLatexString( name.toString ) + " " + formulaToLatexString( args.last, outermost = false ) + ")"
}
} else {
//formats everything else
nameToLatexString( name.toString ) + { if ( args.isEmpty ) "" else args.map( x => formulaToLatexString( x, outermost = false ) ).mkString( "(", ",", ")" ) }
}
case indexedFOVar( name, index ) => name + "_{" + formulaToLatexString( index, outermost = false ) + "}"
case indexedOmegaVar( name, index ) => name + "_{" + formulaToLatexString( index, outermost = false ) + "}"
case v: Var if v.sym.isInstanceOf[ClauseSetSymbol] => //Fixme: never enters here because type of ClauseSetSymbol is changed
//parse cl variables to display cut-configuration.
val cl = v.sym.asInstanceOf[ClauseSetSymbol]
"cl^{" + cl.name + ",(" + cl.cut_occs._1.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f ) ) + " | " +
cl.cut_occs._2.foldLeft( "" )( ( s, f ) => s + { if ( s != "" ) ", " else "" } + formulaToLatexString( f, outermost = false ) ) + ")}"
case Var( name, _ ) if t.exptype == Tindex -> Tindex =>
"\\\\textbf {" + name.toString + "}"
case Var( name, _ ) => name
case Const( name, _ ) => name
case HOLFunction( f, args ) =>
val name = f match {
case Const( n, _ ) => n
case Var( n, _ ) => n
case _ => throw new Exception( "An atom can only contain a const or a var on the outermost level!" )
}
if ( name.toString == "EXP" )
args.last.asInstanceOf[IntVar].name + "^{" + parseIntegerTerm( args.head.asInstanceOf[IntegerTerm], 0 ) + "}"
else if ( args.size == 1 ) parseNestedUnaryFunction( name.toString, args.head, 1 )
else if ( args.size == 2 && name.toString.matches( """(=|!=|\\\\neq|<|>|\\\\leq|\\\\geq|\\\\in|\\+|-|\\*|/)""" ) ) //!name.toString.matches("""[\\w\\p{InGreek}]*"""))
"(" + formulaToLatexString( args.head, outermost = false ) + " " + nameToLatexString( name.toString ) + " " + formulaToLatexString( args.last, outermost = false ) + ")"
else nameToLatexString( name.toString ) + { if ( args.isEmpty ) "" else args.map( x => formulaToLatexString( x, outermost = false ) ).mkString( "(", ",", ")" ) }
case Abs( v, s ) => "(" + """ \\lambda """ + formulaToLatexString( v, outermost = false ) + """.""" + formulaToLatexString( s, outermost = false ) + ")"
case App( s, t ) => formulaToLatexString( s, outermost = false ) + "(" + formulaToLatexString( t, outermost = false ) + ")"
case t: IntegerTerm if t.exptype == Tindex => parseIntegerTerm( t, 0 )
}
def parseIntegerTerm( t: IntegerTerm, n: Int ): String = t match {
// FIXME: in the first case, we implicitly assume that all IntConsts are 0!
// this is just done for convenience, and should be changed ASAP
case z: IntConst => n.toString
case IntZero() => n.toString
case v: IntVar if n > 0 =>
toPrettyString( v ) + "+" + n.toString //TODO: why do we use to pretty string here? it doesn't handle LaTeX?
case v: IntVar /* if n <= 0 */ =>
toPrettyString( v ) //TODO: why do we use to pretty string here? it doesn't handle LaTeX?
case Succ( s ) => parseIntegerTerm( s, n + 1 )
case _ => throw new Exception( "Error in parseIntegerTerm(..) in gui" )
}
def parseNestedUnaryFunction( parent_name: String, t: LambdaExpression, n: Int ): String = t match {
case HOLFunction( name, args ) =>
if ( args.size == 1 && name.toString == parent_name ) parseNestedUnaryFunction( parent_name, args.head, n + 1 )
else parent_name + { if ( n > 1 ) "^{" + n.toString + "}" else "" } + "(" + formulaToLatexString( t ) + ")"
case _ => parent_name + { if ( n > 1 ) "^{" + n.toString + "}" else "" } + "(" + formulaToLatexString( t ) + ")"
}
}
object LatexLabel {
private val cache = mutable.Map[( String, Font ), TeXIcon]()
def clearCache() = this.synchronized( cache.clear() )
def apply( font: Font, latexText: String ): LatexLabel = apply( font, latexText, null )
def apply( font: Font, latexText: String, fo: FormulaOccurrence ): LatexLabel = {
val key = ( latexText, font )
this.synchronized( {
val icon = cache.getOrElseUpdate( key, {
val formula = try {
new TeXFormula( latexText )
} catch {
case e: Exception =>
throw new Exception( "Could not create formula " + latexText + ": " + e.getMessage, e )
}
val myicon = formula.createTeXIcon( TeXConstants.STYLE_DISPLAY, font.getSize )
val myimage = new BufferedImage( myicon.getIconWidth, myicon.getIconHeight, BufferedImage.TYPE_INT_ARGB )
val g2 = myimage.createGraphics()
g2.setColor( Color.white )
g2.fillRect( 0, 0, myicon.getIconWidth, myicon.getIconHeight )
myicon.paintIcon( null, g2, 0, 0 )
myicon
} )
new LatexLabel( font, latexText, icon, fo )
} )
}
}
class LatexLabel( val ft: Font, val latexText: String, val myicon: TeXIcon, fo: FormulaOccurrence )
extends Label( "", myicon, Alignment.Center ) {
background = Color.white
foreground = Color.black
font = ft
opaque = true
yLayoutAlignment = 0.5
if ( latexText == "," ) {
border = Swing.EmptyBorder( font.getSize / 5, 2, 0, font.getSize / 5 )
icon = null
text = latexText
}
if ( latexText == "\\\\vdash" ) border = Swing.EmptyBorder( font.getSize / 6 )
listenTo( mouse.moves, mouse.clicks, ProofToolPublisher )
reactions += {
case e: MouseEntered => foreground = Color.blue
case e: MouseExited => foreground = Color.black
case e: MouseClicked if e.peer.getButton == MouseEvent.BUTTON3 && e.clicks == 2 =>
val d = new Dialog {
resizable = false
peer.setUndecorated( true )
contents = new TextField( latexText ) {
editable = false
border = Swing.EmptyBorder( 7 )
tooltip = "Select text and right-click to copy."
font = font.deriveFont( Font.PLAIN, 14 )
listenTo( mouse.clicks )
reactions += {
case e: MouseClicked if e.peer.getButton == MouseEvent.BUTTON3 => copy()
}
}
// modal = true
reactions += {
case e: WindowDeactivated if e.source == this => dispose()
}
}
d.location = locationOnScreen
d.open()
case ChangeFormulaColor( set, color, reset ) =>
if ( set.contains( fo ) ) background = color
else if ( reset ) background = Color.white
}
} | gisellemnr/gapt | src/main/scala/at/logic/gapt/prooftool/DrawSequent.scala | Scala | gpl-3.0 | 14,902 |
package com.rasterfoundry.datamodel
import io.circe._
import cats.syntax.either._
sealed abstract class BandDataType(val repr: String) {
override def toString = repr
}
object BandDataType {
case object Diverging extends BandDataType("DIVERGING")
case object Sequential extends BandDataType("SEQUENTIAL")
case object Categorical extends BandDataType("CATEGORICAL")
def fromString(s: String): BandDataType = s.toUpperCase match {
case "DIVERGING" => Diverging
case "SEQUENTIAL" => Sequential
case "CATEGORICAL" => Categorical
}
implicit val bandDataTypeEncoder: Encoder[BandDataType] =
Encoder.encodeString.contramap[BandDataType](_.toString)
implicit val bandDataTypeDecoder: Decoder[BandDataType] =
Decoder.decodeString.emap { str =>
Either.catchNonFatal(fromString(str)).leftMap(_ => "BandDataType")
}
}
| aaronxsu/raster-foundry | app-backend/datamodel/src/main/scala/BandDataType.scala | Scala | apache-2.0 | 860 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.io._
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.{SparkConf, SparkEnv, SparkException}
import org.apache.spark.internal.config.MEMORY_OFFHEAP_ENABLED
import org.apache.spark.memory._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.BroadcastMode
import org.apache.spark.sql.types.LongType
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.util.{KnownSizeEstimation, Utils}
/**
* Interface for a hashed relation by some key. Use [[HashedRelation.apply]] to create a concrete
* object.
*/
private[execution] sealed trait HashedRelation extends KnownSizeEstimation {
/**
* Returns matched rows.
*
* Returns null if there is no matched rows.
*/
def get(key: InternalRow): Iterator[InternalRow]
/**
* Returns matched rows for a key that has only one column with LongType.
*
* Returns null if there is no matched rows.
*/
def get(key: Long): Iterator[InternalRow] = {
throw new UnsupportedOperationException
}
/**
* Returns the matched single row.
*/
def getValue(key: InternalRow): InternalRow
/**
* Returns the matched single row with key that have only one column of LongType.
*/
def getValue(key: Long): InternalRow = {
throw new UnsupportedOperationException
}
/**
* Returns true iff all the keys are unique.
*/
def keyIsUnique: Boolean
/**
* Returns a read-only copy of this, to be safely used in current thread.
*/
def asReadOnlyCopy(): HashedRelation
/**
* Release any used resources.
*/
def close(): Unit
}
private[execution] object HashedRelation {
/**
* Create a HashedRelation from an Iterator of InternalRow.
*/
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int = 64,
taskMemoryManager: TaskMemoryManager = null): HashedRelation = {
val mm = Option(taskMemoryManager).getOrElse {
new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue,
1),
0)
}
if (key.length == 1 && key.head.dataType == LongType) {
LongHashedRelation(input, key, sizeEstimate, mm)
} else {
UnsafeHashedRelation(input, key, sizeEstimate, mm)
}
}
}
/**
* A HashedRelation for UnsafeRow, which is backed BytesToBytesMap.
*
* It's serialized in the following format:
* [number of keys]
* [size of key] [size of value] [key bytes] [bytes for value]
*/
private[joins] class UnsafeHashedRelation(
private var numFields: Int,
private var binaryMap: BytesToBytesMap)
extends HashedRelation with Externalizable with KryoSerializable {
private[joins] def this() = this(0, null) // Needed for serialization
override def keyIsUnique: Boolean = binaryMap.numKeys() == binaryMap.numValues()
override def asReadOnlyCopy(): UnsafeHashedRelation = {
new UnsafeHashedRelation(numFields, binaryMap)
}
override def estimatedSize: Long = binaryMap.getTotalMemoryConsumption
// re-used in get()/getValue()
var resultRow = new UnsafeRow(numFields)
override def get(key: InternalRow): Iterator[InternalRow] = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
new Iterator[UnsafeRow] {
private var _hasNext = true
override def hasNext: Boolean = _hasNext
override def next(): UnsafeRow = {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
_hasNext = loc.nextValue()
resultRow
}
}
} else {
null
}
}
def getValue(key: InternalRow): InternalRow = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
resultRow
} else {
null
}
}
override def close(): Unit = {
binaryMap.free()
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
write(out.writeInt, out.writeLong, out.write)
}
override def write(kryo: Kryo, out: Output): Unit = Utils.tryOrIOException {
write(out.writeInt, out.writeLong, out.write)
}
private def write(
writeInt: (Int) => Unit,
writeLong: (Long) => Unit,
writeBuffer: (Array[Byte], Int, Int) => Unit) : Unit = {
writeInt(numFields)
// TODO: move these into BytesToBytesMap
writeLong(binaryMap.numKeys())
writeLong(binaryMap.numValues())
var buffer = new Array[Byte](64)
def write(base: Object, offset: Long, length: Int): Unit = {
if (buffer.length < length) {
buffer = new Array[Byte](length)
}
Platform.copyMemory(base, offset, buffer, Platform.BYTE_ARRAY_OFFSET, length)
writeBuffer(buffer, 0, length)
}
val iter = binaryMap.iterator()
while (iter.hasNext) {
val loc = iter.next()
// [key size] [values size] [key bytes] [value bytes]
writeInt(loc.getKeyLength)
writeInt(loc.getValueLength)
write(loc.getKeyBase, loc.getKeyOffset, loc.getKeyLength)
write(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
}
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
read(() => in.readInt(), () => in.readLong(), in.readFully)
}
private def read(
readInt: () => Int,
readLong: () => Long,
readBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
numFields = readInt()
resultRow = new UnsafeRow(numFields)
val nKeys = readLong()
val nValues = readLong()
// This is used in Broadcast, shared by multiple tasks, so we use on-heap memory
// TODO(josh): This needs to be revisited before we merge this patch; making this change now
// so that tests compile:
val taskMemoryManager = new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue,
1),
0)
val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes)
.getOrElse(new SparkConf().getSizeAsBytes("spark.buffer.pageSize", "16m"))
// TODO(josh): We won't need this dummy memory manager after future refactorings; revisit
// during code review
binaryMap = new BytesToBytesMap(
taskMemoryManager,
(nKeys * 1.5 + 1).toInt, // reduce hash collision
pageSizeBytes)
var i = 0
var keyBuffer = new Array[Byte](1024)
var valuesBuffer = new Array[Byte](1024)
while (i < nValues) {
val keySize = readInt()
val valuesSize = readInt()
if (keySize > keyBuffer.length) {
keyBuffer = new Array[Byte](keySize)
}
readBuffer(keyBuffer, 0, keySize)
if (valuesSize > valuesBuffer.length) {
valuesBuffer = new Array[Byte](valuesSize)
}
readBuffer(valuesBuffer, 0, valuesSize)
val loc = binaryMap.lookup(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize)
val putSuceeded = loc.append(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize,
valuesBuffer, Platform.BYTE_ARRAY_OFFSET, valuesSize)
if (!putSuceeded) {
binaryMap.free()
throw new IOException("Could not allocate memory to grow BytesToBytesMap")
}
i += 1
}
}
override def read(kryo: Kryo, in: Input): Unit = Utils.tryOrIOException {
read(() => in.readInt(), () => in.readLong(), in.readBytes)
}
}
private[joins] object UnsafeHashedRelation {
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int,
taskMemoryManager: TaskMemoryManager): HashedRelation = {
val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes)
.getOrElse(new SparkConf().getSizeAsBytes("spark.buffer.pageSize", "16m"))
val binaryMap = new BytesToBytesMap(
taskMemoryManager,
// Only 70% of the slots can be used before growing, more capacity help to reduce collision
(sizeEstimate * 1.5 + 1).toInt,
pageSizeBytes)
// Create a mapping of buildKeys -> rows
val keyGenerator = UnsafeProjection.create(key)
var numFields = 0
while (input.hasNext) {
val row = input.next().asInstanceOf[UnsafeRow]
numFields = row.numFields()
val key = keyGenerator(row)
if (!key.anyNull) {
val loc = binaryMap.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes)
val success = loc.append(
key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
row.getBaseObject, row.getBaseOffset, row.getSizeInBytes)
if (!success) {
binaryMap.free()
// scalastyle:off throwerror
throw new SparkOutOfMemoryError("There is no enough memory to build hash map")
// scalastyle:on throwerror
}
}
}
new UnsafeHashedRelation(numFields, binaryMap)
}
}
/**
* An append-only hash map mapping from key of Long to UnsafeRow.
*
* The underlying bytes of all values (UnsafeRows) are packed together as a single byte array
* (`page`) in this format:
*
* [bytes of row1][address1][bytes of row2][address1] ...
*
* address1 (8 bytes) is the offset and size of next value for the same key as row1, any key
* could have multiple values. the address at the end of last value for every key is 0.
*
* The keys and addresses of their values could be stored in two modes:
*
* 1) sparse mode: the keys and addresses are stored in `array` as:
*
* [key1][address1][key2][address2]...[]
*
* address1 (Long) is the offset (in `page`) and size of the value for key1. The position of key1
* is determined by `key1 % cap`. Quadratic probing with triangular numbers is used to address
* hash collision.
*
* 2) dense mode: all the addresses are packed into a single array of long, as:
*
* [address1] [address2] ...
*
* address1 (Long) is the offset (in `page`) and size of the value for key1, the position is
* determined by `key1 - minKey`.
*
* The map is created as sparse mode, then key-value could be appended into it. Once finish
* appending, caller could call optimize() to try to turn the map into dense mode, which is faster
* to probe.
*
* see http://java-performance.info/implementing-world-fastest-java-int-to-int-hash-map/
*/
private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, capacity: Int)
extends MemoryConsumer(mm) with Externalizable with KryoSerializable {
// Whether the keys are stored in dense mode or not.
private var isDense = false
// The minimum key
private var minKey = Long.MaxValue
// The maximum key
private var maxKey = Long.MinValue
// The array to store the key and offset of UnsafeRow in the page.
//
// Sparse mode: [key1] [offset1 | size1] [key2] [offset | size2] ...
// Dense mode: [offset1 | size1] [offset2 | size2]
private var array: Array[Long] = null
private var mask: Int = 0
// The page to store all bytes of UnsafeRow and the pointer to next rows.
// [row1][pointer1] [row2][pointer2]
private var page: Array[Long] = null
// Current write cursor in the page.
private var cursor: Long = Platform.LONG_ARRAY_OFFSET
// The number of bits for size in address
private val SIZE_BITS = 28
private val SIZE_MASK = 0xfffffff
// The total number of values of all keys.
private var numValues = 0L
// The number of unique keys.
private var numKeys = 0L
// needed by serializer
def this() = {
this(
new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue,
1),
0),
0)
}
private def ensureAcquireMemory(size: Long): Unit = {
// do not support spilling
val got = acquireMemory(size)
if (got < size) {
freeMemory(got)
throw new SparkException(s"Can't acquire $size bytes memory to build hash relation, " +
s"got $got bytes")
}
}
private def init(): Unit = {
if (mm != null) {
require(capacity < 512000000, "Cannot broadcast more than 512 millions rows")
var n = 1
while (n < capacity) n *= 2
ensureAcquireMemory(n * 2L * 8 + (1 << 20))
array = new Array[Long](n * 2)
mask = n * 2 - 2
page = new Array[Long](1 << 17) // 1M bytes
}
}
init()
def spill(size: Long, trigger: MemoryConsumer): Long = 0L
/**
* Returns whether all the keys are unique.
*/
def keyIsUnique: Boolean = numKeys == numValues
/**
* Returns total memory consumption.
*/
def getTotalMemoryConsumption: Long = array.length * 8L + page.length * 8L
/**
* Returns the first slot of array that store the keys (sparse mode).
*/
private def firstSlot(key: Long): Int = {
val h = key * 0x9E3779B9L
(h ^ (h >> 32)).toInt & mask
}
/**
* Returns the next probe in the array.
*/
private def nextSlot(pos: Int): Int = (pos + 2) & mask
private[this] def toAddress(offset: Long, size: Int): Long = {
((offset - Platform.LONG_ARRAY_OFFSET) << SIZE_BITS) | size
}
private[this] def toOffset(address: Long): Long = {
(address >>> SIZE_BITS) + Platform.LONG_ARRAY_OFFSET
}
private[this] def toSize(address: Long): Int = {
(address & SIZE_MASK).toInt
}
private def getRow(address: Long, resultRow: UnsafeRow): UnsafeRow = {
resultRow.pointTo(page, toOffset(address), toSize(address))
resultRow
}
/**
* Returns the single UnsafeRow for given key, or null if not found.
*/
def getValue(key: Long, resultRow: UnsafeRow): UnsafeRow = {
if (isDense) {
if (key >= minKey && key <= maxKey) {
val value = array((key - minKey).toInt)
if (value > 0) {
return getRow(value, resultRow)
}
}
} else {
var pos = firstSlot(key)
while (array(pos + 1) != 0) {
if (array(pos) == key) {
return getRow(array(pos + 1), resultRow)
}
pos = nextSlot(pos)
}
}
null
}
/**
* Returns an iterator of UnsafeRow for multiple linked values.
*/
private def valueIter(address: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = {
new Iterator[UnsafeRow] {
var addr = address
override def hasNext: Boolean = addr != 0
override def next(): UnsafeRow = {
val offset = toOffset(addr)
val size = toSize(addr)
resultRow.pointTo(page, offset, size)
addr = Platform.getLong(page, offset + size)
resultRow
}
}
}
/**
* Returns an iterator for all the values for the given key, or null if no value found.
*/
def get(key: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = {
if (isDense) {
if (key >= minKey && key <= maxKey) {
val value = array((key - minKey).toInt)
if (value > 0) {
return valueIter(value, resultRow)
}
}
} else {
var pos = firstSlot(key)
while (array(pos + 1) != 0) {
if (array(pos) == key) {
return valueIter(array(pos + 1), resultRow)
}
pos = nextSlot(pos)
}
}
null
}
/**
* Appends the key and row into this map.
*/
def append(key: Long, row: UnsafeRow): Unit = {
val sizeInBytes = row.getSizeInBytes
if (sizeInBytes >= (1 << SIZE_BITS)) {
throw new UnsupportedOperationException("Does not support row that is larger than 256M")
}
if (key < minKey) {
minKey = key
}
if (key > maxKey) {
maxKey = key
}
grow(row.getSizeInBytes)
// copy the bytes of UnsafeRow
val offset = cursor
Platform.copyMemory(row.getBaseObject, row.getBaseOffset, page, cursor, row.getSizeInBytes)
cursor += row.getSizeInBytes
Platform.putLong(page, cursor, 0)
cursor += 8
numValues += 1
updateIndex(key, toAddress(offset, row.getSizeInBytes))
}
/**
* Update the address in array for given key.
*/
private def updateIndex(key: Long, address: Long): Unit = {
var pos = firstSlot(key)
assert(numKeys < array.length / 2)
while (array(pos) != key && array(pos + 1) != 0) {
pos = nextSlot(pos)
}
if (array(pos + 1) == 0) {
// this is the first value for this key, put the address in array.
array(pos) = key
array(pos + 1) = address
numKeys += 1
if (numKeys * 4 > array.length) {
// reach half of the capacity
if (array.length < (1 << 30)) {
// Cannot allocate an array with 2G elements
growArray()
} else if (numKeys > array.length / 2 * 0.75) {
// The fill ratio should be less than 0.75
throw new UnsupportedOperationException(
"Cannot build HashedRelation with more than 1/3 billions unique keys")
}
}
} else {
// there are some values for this key, put the address in the front of them.
val pointer = toOffset(address) + toSize(address)
Platform.putLong(page, pointer, array(pos + 1))
array(pos + 1) = address
}
}
private def grow(inputRowSize: Int): Unit = {
// There is 8 bytes for the pointer to next value
val neededNumWords = (cursor - Platform.LONG_ARRAY_OFFSET + 8 + inputRowSize + 7) / 8
if (neededNumWords > page.length) {
if (neededNumWords > (1 << 30)) {
throw new UnsupportedOperationException(
"Can not build a HashedRelation that is larger than 8G")
}
val newNumWords = math.max(neededNumWords, math.min(page.length * 2, 1 << 30))
ensureAcquireMemory(newNumWords * 8L)
val newPage = new Array[Long](newNumWords.toInt)
Platform.copyMemory(page, Platform.LONG_ARRAY_OFFSET, newPage, Platform.LONG_ARRAY_OFFSET,
cursor - Platform.LONG_ARRAY_OFFSET)
val used = page.length
page = newPage
freeMemory(used * 8L)
}
}
private def growArray(): Unit = {
var old_array = array
val n = array.length
numKeys = 0
ensureAcquireMemory(n * 2 * 8L)
array = new Array[Long](n * 2)
mask = n * 2 - 2
var i = 0
while (i < old_array.length) {
if (old_array(i + 1) > 0) {
updateIndex(old_array(i), old_array(i + 1))
}
i += 2
}
old_array = null // release the reference to old array
freeMemory(n * 8L)
}
/**
* Try to turn the map into dense mode, which is faster to probe.
*/
def optimize(): Unit = {
val range = maxKey - minKey
// Convert to dense mode if it does not require more memory or could fit within L1 cache
// SPARK-16740: Make sure range doesn't overflow if minKey has a large negative value
if (range >= 0 && (range < array.length || range < 1024)) {
try {
ensureAcquireMemory((range + 1) * 8L)
} catch {
case e: SparkException =>
// there is no enough memory to convert
return
}
val denseArray = new Array[Long]((range + 1).toInt)
var i = 0
while (i < array.length) {
if (array(i + 1) > 0) {
val idx = (array(i) - minKey).toInt
denseArray(idx) = array(i + 1)
}
i += 2
}
val old_length = array.length
array = denseArray
isDense = true
freeMemory(old_length * 8L)
}
}
/**
* Free all the memory acquired by this map.
*/
def free(): Unit = {
if (page != null) {
freeMemory(page.length * 8L)
page = null
}
if (array != null) {
freeMemory(array.length * 8L)
array = null
}
}
private def writeLongArray(
writeBuffer: (Array[Byte], Int, Int) => Unit,
arr: Array[Long],
len: Int): Unit = {
val buffer = new Array[Byte](4 << 10)
var offset: Long = Platform.LONG_ARRAY_OFFSET
val end = len * 8L + Platform.LONG_ARRAY_OFFSET
while (offset < end) {
val size = Math.min(buffer.length, end - offset)
Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size)
writeBuffer(buffer, 0, size.toInt)
offset += size
}
}
private def write(
writeBoolean: (Boolean) => Unit,
writeLong: (Long) => Unit,
writeBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
writeBoolean(isDense)
writeLong(minKey)
writeLong(maxKey)
writeLong(numKeys)
writeLong(numValues)
writeLong(array.length)
writeLongArray(writeBuffer, array, array.length)
val used = ((cursor - Platform.LONG_ARRAY_OFFSET) / 8).toInt
writeLong(used)
writeLongArray(writeBuffer, page, used)
}
override def writeExternal(output: ObjectOutput): Unit = {
write(output.writeBoolean, output.writeLong, output.write)
}
override def write(kryo: Kryo, out: Output): Unit = {
write(out.writeBoolean, out.writeLong, out.write)
}
private def readLongArray(
readBuffer: (Array[Byte], Int, Int) => Unit,
length: Int): Array[Long] = {
val array = new Array[Long](length)
val buffer = new Array[Byte](4 << 10)
var offset: Long = Platform.LONG_ARRAY_OFFSET
val end = length * 8L + Platform.LONG_ARRAY_OFFSET
while (offset < end) {
val size = Math.min(buffer.length, end - offset)
readBuffer(buffer, 0, size.toInt)
Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size)
offset += size
}
array
}
private def read(
readBoolean: () => Boolean,
readLong: () => Long,
readBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
isDense = readBoolean()
minKey = readLong()
maxKey = readLong()
numKeys = readLong()
numValues = readLong()
val length = readLong().toInt
mask = length - 2
array = readLongArray(readBuffer, length)
val pageLength = readLong().toInt
page = readLongArray(readBuffer, pageLength)
// Restore cursor variable to make this map able to be serialized again on executors.
cursor = pageLength * 8 + Platform.LONG_ARRAY_OFFSET
}
override def readExternal(in: ObjectInput): Unit = {
read(() => in.readBoolean(), () => in.readLong(), in.readFully)
}
override def read(kryo: Kryo, in: Input): Unit = {
read(() => in.readBoolean(), () => in.readLong(), in.readBytes)
}
}
private[joins] class LongHashedRelation(
private var nFields: Int,
private var map: LongToUnsafeRowMap) extends HashedRelation with Externalizable {
private var resultRow: UnsafeRow = new UnsafeRow(nFields)
// Needed for serialization (it is public to make Java serialization work)
def this() = this(0, null)
override def asReadOnlyCopy(): LongHashedRelation = new LongHashedRelation(nFields, map)
override def estimatedSize: Long = map.getTotalMemoryConsumption
override def get(key: InternalRow): Iterator[InternalRow] = {
if (key.isNullAt(0)) {
null
} else {
get(key.getLong(0))
}
}
override def getValue(key: InternalRow): InternalRow = {
if (key.isNullAt(0)) {
null
} else {
getValue(key.getLong(0))
}
}
override def get(key: Long): Iterator[InternalRow] = map.get(key, resultRow)
override def getValue(key: Long): InternalRow = map.getValue(key, resultRow)
override def keyIsUnique: Boolean = map.keyIsUnique
override def close(): Unit = {
map.free()
}
override def writeExternal(out: ObjectOutput): Unit = {
out.writeInt(nFields)
out.writeObject(map)
}
override def readExternal(in: ObjectInput): Unit = {
nFields = in.readInt()
resultRow = new UnsafeRow(nFields)
map = in.readObject().asInstanceOf[LongToUnsafeRowMap]
}
}
/**
* Create hashed relation with key that is long.
*/
private[joins] object LongHashedRelation {
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int,
taskMemoryManager: TaskMemoryManager): LongHashedRelation = {
val map = new LongToUnsafeRowMap(taskMemoryManager, sizeEstimate)
val keyGenerator = UnsafeProjection.create(key)
// Create a mapping of key -> rows
var numFields = 0
while (input.hasNext) {
val unsafeRow = input.next().asInstanceOf[UnsafeRow]
numFields = unsafeRow.numFields()
val rowKey = keyGenerator(unsafeRow)
if (!rowKey.isNullAt(0)) {
val key = rowKey.getLong(0)
map.append(key, unsafeRow)
}
}
map.optimize()
new LongHashedRelation(numFields, map)
}
}
/** The HashedRelationBroadcastMode requires that rows are broadcasted as a HashedRelation. */
private[execution] case class HashedRelationBroadcastMode(key: Seq[Expression])
extends BroadcastMode {
override def transform(rows: Array[InternalRow]): HashedRelation = {
transform(rows.iterator, Some(rows.length))
}
override def transform(
rows: Iterator[InternalRow],
sizeHint: Option[Long]): HashedRelation = {
sizeHint match {
case Some(numRows) =>
HashedRelation(rows, canonicalized.key, numRows.toInt)
case None =>
HashedRelation(rows, canonicalized.key)
}
}
override lazy val canonicalized: HashedRelationBroadcastMode = {
this.copy(key = key.map(_.canonicalized))
}
}
| guoxiaolongzte/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala | Scala | apache-2.0 | 26,710 |
package ch.uzh.ifi.pdeboer.pplib.hcomp
/**
* Created by pdeboer on 26/11/14.
*/
//TODO implement me
class CrowdWorker(val id: String) extends HCompPortalAdapter {
override def processQuery(query: HCompQuery, properties: HCompQueryProperties): Option[HCompAnswer] = ???
override def getDefaultPortalKey: String = ???
override def cancelQuery(query: HCompQuery): Unit = ???
}
| uzh/PPLib | src/main/scala/ch/uzh/ifi/pdeboer/pplib/hcomp/CrowdWorker.scala | Scala | mit | 382 |
package com.wix.mysql.config
import java.util.TimeZone
import java.util.concurrent.TimeUnit
import com.wix.mysql.config.Charset.{LATIN1, defaults}
import com.wix.mysql.config.MysqldConfig.aMysqldConfig
import com.wix.mysql.distribution.Version._
import org.specs2.mutable.SpecWithJUnit
import scala.collection.JavaConverters._
class MysqldConfigTest extends SpecWithJUnit {
"MysqldConfig" should {
"build with defaults" in {
val mysqldConfig = aMysqldConfig(v5_6_latest).build()
mysqldConfig.getPort mustEqual 3310
mysqldConfig.getVersion mustEqual v5_6_latest
mysqldConfig.getCharset mustEqual defaults()
mysqldConfig.getUsername mustEqual "auser"
mysqldConfig.getPassword mustEqual "sa"
mysqldConfig.getTimeZone mustEqual TimeZone.getTimeZone("UTC")
mysqldConfig.getTimeout(TimeUnit.SECONDS) mustEqual 30
}
"accept custom port, user, charset, timezone" in {
val mysqldConfig = aMysqldConfig(v5_6_latest)
.withPort(1111)
.withCharset(LATIN1)
.withUser("otheruser", "otherpassword")
.withTimeZone("Europe/Vilnius")
.withTimeout(20, TimeUnit.SECONDS)
.build()
mysqldConfig.getPort mustEqual 1111
mysqldConfig.getCharset mustEqual LATIN1
mysqldConfig.getUsername mustEqual "otheruser"
mysqldConfig.getPassword mustEqual "otherpassword"
mysqldConfig.getTimeZone mustEqual TimeZone.getTimeZone("Europe/Vilnius")
mysqldConfig.getTimeout(TimeUnit.MILLISECONDS) mustEqual 20000
}
"accept custom system variables" in {
val mysqldConfig = aMysqldConfig(v5_6_latest)
.withServerVariable("some-int", 123)
.withServerVariable("some-string", "one")
.withServerVariable("some-boolean", false)
.build
mysqldConfig.getServerVariables.asScala.map(_.toCommandLineArgument) mustEqual
Seq("--some-int=123", "--some-string=one", "--some-boolean=false")
}
"accept free port" in {
val mysqldConfig = aMysqldConfig(v5_6_latest)
.withFreePort()
.build()
mysqldConfig.getPort mustNotEqual 3310
}
"fail if building with user 'root'" in {
aMysqldConfig(v5_6_latest)
.withUser("root", "doesnotmatter")
.build() must throwA[IllegalArgumentException](message = "Usage of username 'root' is forbidden")
}
}
} | wix/wix-embedded-mysql | wix-embedded-mysql/src/test/scala/com/wix/mysql/config/MysqldConfigTest.scala | Scala | bsd-3-clause | 2,375 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers
import com.mohiva.play.silhouette.impl.providers.oauth1.TwitterProvider
import com.mohiva.play.silhouette.impl.providers.oauth2.{ GoogleProvider, FacebookProvider }
import com.mohiva.play.silhouette.impl.providers.openid.YahooProvider
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.test.PlaySpecification
/**
* Test case for the [[com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry]] class.
*/
class SocialProviderRegistrySpec extends PlaySpecification with Mockito {
"The `get` method" should {
"return a provider by its type" in new Context {
registry.get[GoogleProvider] must beSome(providers(1))
}
"return None if no provider for the given type exists" in new Context {
registry.get[YahooProvider] must beNone
}
"return a provider by its ID" in new Context {
registry.get(GoogleProvider.ID) must beSome(providers(1))
}
"return None if no provider for the given ID exists" in new Context {
registry.get(YahooProvider.ID) must beNone
}
}
"The `getSeq` method" should {
"return a list of providers by it's sub type" in new Context {
val list = registry.getSeq[OAuth2Provider]
list(0).id must be equalTo providers(0).id
list(1).id must be equalTo providers(1).id
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* Some social providers.
*/
val providers = {
val facebook = mock[FacebookProvider]
facebook.id returns FacebookProvider.ID
val google = mock[GoogleProvider]
google.id returns GoogleProvider.ID
val twitter = mock[TwitterProvider]
twitter.id returns TwitterProvider.ID
Seq(
facebook,
google,
twitter
)
}
/**
* The registry to test.
*/
val registry = SocialProviderRegistry(providers)
}
}
| rfranco/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/SocialProviderRegistrySpec.scala | Scala | apache-2.0 | 2,571 |
/*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.krasserm.ases
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.stream.testkit.{TestPublisher, TestSubscriber}
import akka.testkit.TestKit
import org.scalatest.{BeforeAndAfterAll, Suite}
import scala.collection.immutable.Seq
trait StreamSpec extends BeforeAndAfterAll { this: TestKit with Suite =>
implicit val materializer = ActorMaterializer()
val emitterId = "emitter"
override def afterAll(): Unit = {
materializer.shutdown()
TestKit.shutdownActorSystem(system)
super.afterAll()
}
def probes[I, O, M](flow: Flow[I, O, M]): (TestPublisher.Probe[I], TestSubscriber.Probe[O]) =
TestSource.probe[I].viaMat(flow)(Keep.left).toMat(TestSink.probe[O])(Keep.both).run()
def durables[A](emitted: Seq[Emitted[A]], offset: Int = 0): Seq[Durable[A]] =
emitted.zipWithIndex.map { case (e, i) => e.durable(i + offset) }
}
| krasserm/akka-stream-eventsourcing | src/test/scala/com/github/krasserm/ases/StreamSpec.scala | Scala | apache-2.0 | 1,584 |
package formless
package http4s
import org.http4s.server.blaze._
object Server {
val builder = BlazeBuilder.mountService(Service.view)
def serve() = builder.run.awaitShutdown()
}
| underscoreio/formless | http4s/src/main/scala/formless/http4s/Server.scala | Scala | apache-2.0 | 186 |
package com.pauldoo.euler.puzzle
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Puzzle1Test extends PuzzleTest {
def puzzle = Puzzle1;
def expectedAnswer = 233168;
} | pauldoo/projecteuler | test/com/pauldoo/euler/puzzle/Puzzle1Test.scala | Scala | isc | 231 |
package camel
import org.apache.camel.scala.dsl.builder.{RouteBuilderSupport, RouteBuilder}
import org.slf4j.LoggerFactory
import org.apache.camel.Exchange
import java.sql.Timestamp
import java.text.SimpleDateFormat
import scala.xml.Elem
import scala.util.{Try,Success,Failure}
/**
* Created by boris on 12/7/13.
*/
class FileRouterBuilder extends RouteBuilder with RouteBuilderSupport{
import FileRouterBuilder._
"file:data/incoming?doneFileName=${file:name}.ready" ==> {
log("log:got a new file")
convertBodyTo(classOf[String])
process{ e:Exchange =>
val lines = e.in.asInstanceOf[String].split("\\n").filterNot(_.trim().isEmpty).toList
lines match {
case mtch::timeStr::scoreStr::Nil => {
parseMsg(mtch,timeStr,scoreStr) match {
case Some((id,time,score:(Int,Int))) => {
DBModel.updateMatchScore(id,score,time)
val m = e.getIn()
m.setHeader("matchid",id)
val tstr = sdf.format(time)
val body = <update match={id.toString} time={tstr}>
<home>{score._1.toString}</home>
<guest>{score._2.toString}</guest>
</update>
e.in = body.toString()
}
case _ => {
logger.error(s"Got wrong match message:\\n $lines")
//send to error queue
}
}
}
case _ => {
logger.error(s"Got wrong match message:\\n $lines")
//send to error queue
}
}
}
choice{
when( _.header("matchid") != null){
to("jms:queue:updateToPlay")
}
otherwise {
log(s"Error while receiving message")
}
}
}
//This is the route from play; save to DB send back to play
"jms:queue:updateFromPlay" ==> {
log("Got message ${in.body}")
to("direct:updateScoreInDB")
process{ e:Exchange =>
println("inside")
}
choice{
when(_.getIn.getHeader("success",classOf[Boolean]) == true){
to("jms:queue:updateToPlay")
}
otherwise{
log("Processing message failed: ${in.body}")
}
}
}
"direct:updateScoreInDB" ==> {
process{ e:Exchange =>
val body = e.getIn.getBody(classOf[Elem])
FileRouterBuilder.parseMsgFromXML(body).map{ case (matchid,time,(home,guest)) =>
DBModel.updateMatchScore(matchid,(home,guest),time)
e.getIn.setHeader("success",true)
}
}
}
}
object FileRouterBuilder{
val logger = LoggerFactory.getLogger(classOf[FileRouterBuilder])
val sdf = new SimpleDateFormat("HH:mm:ss")
def parseMsgFromXML(body:Elem):Option[(Long,Timestamp,(Int,Int))] = {
Try({
val matchid = (body \\ "@match").text.toLong
val timeStr = (body \\ "@time").text
val home = (body \\ "home").text.toInt
val guest = (body \\ "guest").text.toInt
val time = new Timestamp(sdf.parse(timeStr).getTime)
(matchid,time,(home,guest))
}).toOption
}
def parseMsg(matchIdStr:String,timeStr:String,scoreStr:String):Option[(Long,Timestamp,(Int,Int))] = {
def parseId(matchId:String) = Try{ matchId.toLong }
def parseScore(matchId:String) = Try{
scoreStr.split(":").toList match {
case home::guest::Nil => (home.toInt,guest.toInt)
case _ => throw new IllegalArgumentException("Wrong format")
}
}
def parseTime(timeString:String) = Try{
new Timestamp(sdf.parse(timeStr).getTime)
}
val r = for{ id <- parseId(matchIdStr)
score <- parseScore(scoreStr)
time <- parseTime(timeStr)
}
yield (id,time,score)
r.toOption
}
} | bs76/camel-scala | camel-scala-router/src/main/scala/camel/router.scala | Scala | apache-2.0 | 3,723 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Helge Holzmann
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package de.l3s.web2warc.utils
import java.io.{InputStream, OutputStream, PrintWriter}
import java.net.{Socket, URI}
import org.apache.commons.io.IOUtils
import org.apache.http.ProtocolVersion
import org.apache.http.client.methods.HttpGet
import scala.collection.immutable.ListMap
import scala.util.Try
object SimpleHttpReader {
val DefaultPort = 80
def defaultHeaders = ListMap[String, String](
"Accept" -> "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
)
def get(url: String, headers: Map[String, String] = defaultHeaders): Array[Byte] = {
val uri = new URI(url)
val get = new HttpGet(uri)
get.setProtocolVersion(new ProtocolVersion("HTTP", 1, 0))
val uriPort = uri.getPort
val requestPort = if (uriPort < 0) DefaultPort else uriPort
var in: InputStream = null
var request: PrintWriter = null
var out: OutputStream = null
var socket: Socket = null
try {
socket = new Socket(uri.getHost, requestPort)
out = socket.getOutputStream
request = new PrintWriter(out)
request.println(get.getRequestLine)
request.println("Host: " + uri.getHost)
for ((k,v) <- headers) request.println(s"$k: $v")
request.println("")
request.flush()
in = socket.getInputStream
IOUtils.toByteArray(in)
} finally {
if (request != null) Try {request.close()}
if (out != null) Try {out.close()}
if (in != null) Try{in.close()}
if (socket != null) Try{socket.close()}
}
}
}
| helgeho/Web2Warc | src/main/scala/de/l3s/web2warc/utils/SimpleHttpReader.scala | Scala | mit | 2,676 |
package de.berlin.arzt.math
object Complex {
def squaredAbs(a: Double, b: Double): Double = a * a + b * b
def im(a: Double, b: Double): Double = b
def re(a: Double, b: Double): Double = a
def prodRe(a1: Double, b1: Double, a2: Double, b2: Double): Double = a1 * a2 - b1 * b2
def prodIm(a1: Double, b1: Double, a2: Double, b2: Double): Double = a1 * b2 + b1 * a2
def plusRe(a1: Double, b1: Double, a2: Double, b2: Double): Double = a1 + a2
def plusIm(a1: Double, b1: Double, a2: Double, b2: Double): Double = b1 + b2
def squareRe(a: Double, b: Double): Double = prodRe(a, b, a, b)
def squareIm(a: Double, b: Double): Double = prodIm(a, b, a, b)
}
| arzt/type-neo | src/main/scala/de/berlin/arzt/math/Complex.scala | Scala | gpl-2.0 | 675 |
import java.io.File
import sbt._
import sbt.Keys._
object Benchmarks {
val libToTest = SettingKey[ModuleID]("libToTest")
val benchOutput = SettingKey[File]("benchOutput")
val produceBench = TaskKey[File]("produceBench")
val runBench = TaskKey[Unit]("runBench")
val enableScalacProfiler = SettingKey[Boolean]("enableScalacProfiler")
def settings = Seq(
// TODO separation between deps and benchmark
libraryDependencies := Seq(libToTest.value.withSources(), "org.scala-lang" % "scala-compiler" % scalaVersion.value),
benchOutput := file(".") / "benchOut" / scalaVersion.value,
enableScalacProfiler := false,
createBenchImpl,
runBenchImpl
)
def createBenchImpl = produceBench := {
val dest = benchOutput.value.getAbsoluteFile
Option(dest.listFiles()).foreach(_.foreach(IO.delete))
// TODO add proper support for scala versions mangling
val libToTest = Benchmarks.libToTest.value.withName(Benchmarks.libToTest.value.name)
def isLibToTest(m: ModuleReport) =
m.module.organization == libToTest.organization && m.module.name == libToTest.name
def isScalaDep(m: ModuleReport) = m.module.organization == "org.scala-lang"
def compileConfig(u: UpdateReport) = u.configurations.find(_.configuration.name == "compile").get
println(compileConfig(updateClassifiers.value).modules.map(_.module))
val sourceJar =
compileConfig(updateClassifiers.value).modules.find(isLibToTest).get.artifacts.collectFirst {
case (artifact, file) if artifact.classifier == Some("sources") =>
file
}.get
IO.unzip(sourceJar, dest / "sources")
def jarsIn(moduleReport: ModuleReport) = moduleReport.artifacts.map(_._2)
val (scalaDeps, cpDeps) = compileConfig(update.value).modules.filterNot(isLibToTest).partition(isScalaDep)
val destBechJar = dest / "bench.jar"
IO.copyFile(Keys.`package`.in(Compile).value, destBechJar)
val scalaJarsMapping = scalaDeps.flatMap(jarsIn).map(d => (d, dest / "scalaJars" / d.getName))
scalaJarsMapping.foreach{ case (origin, dest) => IO.copyFile(origin, dest) }
// TODO add support for libs that declare scala compiler as dep
val scalaLib = scalaDeps.filter(_.module.name == "scala-library")
(cpDeps ++ scalaLib).flatMap(jarsIn).foreach(d => IO.copyFile(d, dest / "cpJars" / d.getName))
def relativize(file: File): File =
dest.toPath().relativize(file.toPath.toAbsolutePath).toFile
val appClasspath = (scalaJarsMapping.map(_._2) ++ Seq(destBechJar)).map(relativize)
// TODO add more scripts (run bench M times etc.)
val scriptLines = Seq(
"#!/bin/bash",
"cd `dirname $0`",
s"java -cp ${appClasspath.mkString(File.pathSeparator)} benchmarks.Main $$@"
)
val bashScriptFile = dest / "run.sh"
IO.write(bashScriptFile, scriptLines.mkString("\n"))
bashScriptFile.setExecutable(true)
if (enableScalacProfiler.value){
IO.write(dest / "scalac.opts", Seq(
"-Yprofile-enabled",
s"-Yprofile-destination ${relativize(dest / "output" / "profile.txt")}"
).mkString("\n"))
}
streams.value.log.success(s"Benchmark was created in ${dest.toPath}")
// TODO add code generation for params java options and other things
bashScriptFile.getAbsoluteFile
}
def runBenchImpl = runBench := {
val script = produceBench.value.getAbsolutePath.toString
streams.value.log.success(s"Running benchmark from $script")
import scala.sys.process._
script.!
}
}
| rorygraves/perf_tester | light/project/Benchmarks.scala | Scala | apache-2.0 | 3,506 |
/**
* Copyright (c) 2013, Regents of the University of California
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer. Redistributions in binary
* form must reproduce the above copyright notice, this list of conditions and the
* following disclaimer in the documentation and/or other materials provided with
* the distribution. Neither the name of the University of California, Berkeley
* nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission. THIS
* SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ClusterSchedulingSimulation
import scala.collection.mutable.HashMap
import scala.collection.mutable.ListBuffer
import ClusterSimulationProtos._
import java.io._
/**
* An experiment represents a series of runs of a simulator,
* across ranges of paramters. Exactly one of {L, C, Lambda}
* can be swept over per experiment, i.e. only one of
* avgJobInterarrivalTimeRange, constantThinkTimeRange, and
* perTaskThinkTimeRange can have size greater than one in a
* single Experiment instance.
*/
class Experiment(
name: String,
// Workloads setup.
workloadToSweepOver: String,
avgJobInterarrivalTimeRange: Option[Seq[Double]] = None,
workloadDescs: Seq[WorkloadDesc],
// Schedulers setup.
schedulerWorkloadsToSweepOver: Map[String, Seq[String]],
constantThinkTimeRange: Seq[Double],
perTaskThinkTimeRange: Seq[Double],
blackListPercentRange: Seq[Double],
// Workload -> scheduler mapping setup.
schedulerWorkloadMap: Map[String, Seq[String]],
// Simulator setup.
simulatorDesc: ClusterSimulatorDesc,
logging: Boolean = false,
outputDirectory: String = "experiment_results",
// Map from workloadName -> max % of cellState this prefill workload
// can account for. Any prefill workload generator with workloadName
// that is not contained in any of these maps will have no prefill
// generated for this experiment, and any with name that is in multiple
// of these maps will use the first limit that actually kicks in.
prefillCpuLimits: Map[String, Double] = Map(),
prefillMemLimits: Map[String, Double] = Map(),
// Default simulations to 10 minute timeout.
simulationTimeout: Double = 60.0*10.0) extends Runnable {
prefillCpuLimits.values.foreach(l => assert(l >= 0.0 && l <= 1.0))
prefillMemLimits.values.foreach(l => assert(l >= 0.0 && l <= 1.0))
var parametersSweepingOver = 0
avgJobInterarrivalTimeRange.foreach{opt: Seq[Double] => {
if (opt.length > 1) {
parametersSweepingOver += 1
}
}}
if (constantThinkTimeRange.length > 1) {parametersSweepingOver += 1}
if (perTaskThinkTimeRange.length > 1) {parametersSweepingOver += 1}
// assert(parametersSweepingOver <= 1)
override
def toString = name
def run() {
// Create the output directory if it doesn't exist.
(new File(outputDirectory)).mkdirs()
val output =
new java.io.FileOutputStream("%s/%s-%.0f.protobuf"
.format(outputDirectory,
name,
simulatorDesc.runTime))
val experimentResultSet = ExperimentResultSet.newBuilder()
// Parameter sweep over workloadDescs
workloadDescs.foreach(workloadDesc => {
println("\\nSet workloadDesc = %s %s"
.format(workloadDesc.cell, workloadDesc.assignmentPolicy))
// Save Experiment level stats into protobuf results.
val experimentEnv = ExperimentResultSet.ExperimentEnv.newBuilder()
experimentEnv.setCellName(workloadDesc.cell)
experimentEnv.setWorkloadSplitType(workloadDesc.assignmentPolicy)
experimentEnv.setIsPrefilled(
workloadDesc.prefillWorkloadGenerators.length > 0)
experimentEnv.setRunTime(simulatorDesc.runTime)
// Generate preFill workloads. The simulator doesn't modify
// these workloads like it does the workloads that are played during
// the simulation.
var prefillWorkloads = List[Workload]()
workloadDesc.prefillWorkloadGenerators
.filter(wlGen => {
prefillCpuLimits.contains(wlGen.workloadName) ||
prefillMemLimits.contains(wlGen.workloadName)
}).foreach(wlGen => {
val cpusMaxOpt = prefillCpuLimits.get(wlGen.workloadName).map(i => {
i * workloadDesc.cellStateDesc.numMachines *
workloadDesc.cellStateDesc.cpusPerMachine
})
val memMaxOpt = prefillMemLimits.get(wlGen.workloadName).map(i => {
i * workloadDesc.cellStateDesc.numMachines *
workloadDesc.cellStateDesc.memPerMachine
})
println(("Creating a new prefill workload from " +
"%s with maxCPU %s and maxMem %s")
.format(wlGen.workloadName, cpusMaxOpt, memMaxOpt))
val newWorkload = wlGen.newWorkload(simulatorDesc.runTime,
maxCpus = cpusMaxOpt,
maxMem = memMaxOpt)
for(job <- newWorkload.getJobs) {
assert(job.submitted == 0.0)
}
prefillWorkloads ::= newWorkload
})
// Parameter sweep over lambda.
// If we have a range for lambda, loop over it, else
// we just loop over a list holding a single element: None
val jobInterarrivalRange = avgJobInterarrivalTimeRange match {
case Some(paramsRange) => paramsRange.map(Some(_))
case None => List(None)
}
println("\\nSet up avgJobInterarrivalTimeRange: %s\\n"
.format(jobInterarrivalRange))
jobInterarrivalRange.foreach(avgJobInterarrivalTime => {
if (avgJobInterarrivalTime.isEmpty) {
println("Since we're not in a labmda sweep, not overwriting lambda.")
} else {
println("Curr avgJobInterarrivalTime: %s\\n"
.format(avgJobInterarrivalTime))
}
// Set up a list of workloads
var commonWorkloadSet = ListBuffer[Workload]()
var newAvgJobInterarrivalTime: Option[Double] = None
workloadDesc.workloadGenerators.foreach(workloadGenerator => {
if (workloadToSweepOver.equals(
workloadGenerator.workloadName)) {
// Only update the workload interarrival time if this is the
// workload we are supposed to sweep over. If this is not a
// lambda parameter sweep then updatedAvgJobInterarrivalTime
// will remain None after this line is executed.
newAvgJobInterarrivalTime = avgJobInterarrivalTime
}
println("Generating new Workload %s for window %f seconds long."
.format(workloadGenerator.workloadName, simulatorDesc.runTime))
val newWorkload =
workloadGenerator
.newWorkload(timeWindow = simulatorDesc.runTime,
updatedAvgJobInterarrivalTime = newAvgJobInterarrivalTime)
commonWorkloadSet.append(newWorkload)
})
// Parameter sweep over L.
perTaskThinkTimeRange.foreach(perTaskThinkTime => {
println("\\nSet perTaskThinkTime = %f".format(perTaskThinkTime))
// Parameter sweep over C.
constantThinkTimeRange.foreach(constantThinkTime => {
println("\\nSet constantThinkTime = %f".format(constantThinkTime))
// Parameter sweep over BlackListPercent (of cellstate).
blackListPercentRange.foreach(blackListPercent => {
println("\\nSet blackListPercent = %f".format(blackListPercent))
// Make a copy of the workloads that this run of the simulator
// will modify by using them to track statistics.
val workloads = ListBuffer[Workload]()
commonWorkloadSet.foreach(workload => {
workloads.append(workload.copy)
})
// Setup and and run the simulator.
val simulator =
simulatorDesc.newSimulator(constantThinkTime,
perTaskThinkTime,
blackListPercent,
schedulerWorkloadsToSweepOver,
schedulerWorkloadMap,
workloadDesc.cellStateDesc,
workloads,
prefillWorkloads,
logging)
println("Running simulation with run().")
val success: Boolean = simulator.run(Some(simulatorDesc.runTime),
Some(simulationTimeout))
if (success) {
// Simulation did not time out, so record stats.
/**
* Capture statistics into a protocolbuffer.
*/
val experimentResult =
ExperimentResultSet.ExperimentEnv.ExperimentResult.newBuilder()
experimentResult.setCellStateAvgCpuUtilization(
simulator.avgCpuUtilization / simulator.cellState.totalCpus)
experimentResult.setCellStateAvgMemUtilization(
simulator.avgMemUtilization / simulator.cellState.totalMem)
experimentResult.setCellStateAvgCpuLocked(
simulator.avgCpuLocked / simulator.cellState.totalCpus)
experimentResult.setCellStateAvgMemLocked(
simulator.avgMemLocked / simulator.cellState.totalMem)
// Save repeated stats about workloads.
workloads.foreach(workload => {
val workloadStats = ExperimentResultSet.
ExperimentEnv.
ExperimentResult.
WorkloadStats.newBuilder()
workloadStats.setWorkloadName(workload.name)
workloadStats.setNumJobs(workload.numJobs)
workloadStats.setNumJobsScheduled(
workload.getJobs.filter(_.numSchedulingAttempts > 0).length)
workload
workloadStats.setJobThinkTimes90Percentile(
workload.jobUsefulThinkTimesPercentile(0.9))
workloadStats.setAvgJobQueueTimesTillFirstScheduled(
workload.avgJobQueueTimeTillFirstScheduled)
workloadStats.setAvgJobQueueTimesTillFullyScheduled(
workload.avgJobQueueTimeTillFullyScheduled)
workloadStats.setJobQueueTimeTillFirstScheduled90Percentile(
workload.jobQueueTimeTillFirstScheduledPercentile(0.9))
workloadStats.setJobQueueTimeTillFullyScheduled90Percentile(
workload.jobQueueTimeTillFullyScheduledPercentile(0.9))
workloadStats.setNumSchedulingAttempts90Percentile(
workload.numSchedulingAttemptsPercentile(0.9))
workloadStats.setNumSchedulingAttempts99Percentile(
workload.numSchedulingAttemptsPercentile(0.99))
workloadStats.setNumTaskSchedulingAttempts90Percentile(
workload.numTaskSchedulingAttemptsPercentile(0.9))
workloadStats.setNumTaskSchedulingAttempts99Percentile(
workload.numTaskSchedulingAttemptsPercentile(0.99))
experimentResult.addWorkloadStats(workloadStats)
})
// Record workload specific details about the parameter sweeps.
experimentResult.setSweepWorkload(workloadToSweepOver)
experimentResult.setAvgJobInterarrivalTime(
avgJobInterarrivalTime.getOrElse(
workloads.filter(_.name == workloadToSweepOver)
.head.avgJobInterarrivalTime))
// Save repeated stats about schedulers.
simulator.schedulers.values.foreach(scheduler => {
val schedulerStats =
ExperimentResultSet.
ExperimentEnv.
ExperimentResult.
SchedulerStats.newBuilder()
schedulerStats.setSchedulerName(scheduler.name)
schedulerStats.setUsefulBusyTime(
scheduler.totalUsefulTimeScheduling)
schedulerStats.setWastedBusyTime(
scheduler.totalWastedTimeScheduling)
// Per scheduler metrics bucketed by day.
// Use floor since days are zero-indexed. For example, if the
// simulator only runs for 1/2 day, we should only have one
// bucket (day 0), so our range should be 0 to 0. In this example
// we would get floor(runTime / 86400) = floor(0.5) = 0.
val daysRan = math.floor(simulatorDesc.runTime/86400.0).toInt
println("Computing daily stats for days 0 through %d."
.format(daysRan))
(0 to daysRan).foreach {
day: Int => {
val perDayStats =
ExperimentResultSet.
ExperimentEnv.
ExperimentResult.
SchedulerStats.
PerDayStats.newBuilder()
perDayStats.setDayNum(day)
// Busy and wasted time bucketed by day.
perDayStats.setUsefulBusyTime(
scheduler.dailyUsefulTimeScheduling.getOrElse(day, 0.0))
println(("Writing dailyUsefulScheduling(day = %d) = %f for " +
"scheduler %s")
.format(day,
scheduler
.dailyUsefulTimeScheduling
.getOrElse(day, 0.0),
scheduler.name))
perDayStats.setWastedBusyTime(
scheduler.dailyWastedTimeScheduling.getOrElse(day, 0.0))
// Counters bucketed by day.
perDayStats.setNumSuccessfulTransactions(
scheduler.dailySuccessTransactions.getOrElse[Int](day, 0))
perDayStats.setNumFailedTransactions(
scheduler.dailyFailedTransactions.getOrElse[Int](day, 0))
schedulerStats.addPerDayStats(perDayStats)
}}
assert(scheduler.perWorkloadUsefulTimeScheduling.size ==
scheduler.perWorkloadWastedTimeScheduling.size,
"the maps held by Scheduler to track per workload " +
"useful and wasted time should be the same size " +
"(Scheduler.addJob() should ensure this).")
scheduler.perWorkloadUsefulTimeScheduling.foreach{
case (workloadName, workloadUsefulBusyTime) => {
val perWorkloadBusyTime =
ExperimentResultSet.
ExperimentEnv.
ExperimentResult.
SchedulerStats.
PerWorkloadBusyTime.newBuilder()
perWorkloadBusyTime.setWorkloadName(workloadName)
perWorkloadBusyTime.setUsefulBusyTime(workloadUsefulBusyTime)
perWorkloadBusyTime.setWastedBusyTime(
scheduler.perWorkloadWastedTimeScheduling(workloadName))
schedulerStats.addPerWorkloadBusyTime(perWorkloadBusyTime)
}}
// Counts of sched-level job transaction successes, failures,
// and retries.
schedulerStats.setNumSuccessfulTransactions(
scheduler.numSuccessfulTransactions)
schedulerStats.setNumFailedTransactions(
scheduler.numFailedTransactions)
schedulerStats.setNumNoResourcesFoundSchedulingAttempts(
scheduler.numNoResourcesFoundSchedulingAttempts)
schedulerStats.setNumRetriedTransactions(
scheduler.numRetriedTransactions)
schedulerStats.setNumJobsTimedOutScheduling(
scheduler.numJobsTimedOutScheduling)
// Counts of task transaction successes and failures.
schedulerStats.setNumSuccessfulTaskTransactions(
scheduler.numSuccessfulTaskTransactions)
schedulerStats.setNumFailedTaskTransactions(
scheduler.numFailedTaskTransactions)
schedulerStats.setIsMultiPath(scheduler.isMultiPath)
schedulerStats.setNumJobsLeftInQueue(scheduler.jobQueueSize)
schedulerStats.setFailedFindVictimAttempts(
scheduler.failedFindVictimAttempts)
experimentResult.addSchedulerStats(schedulerStats)
})
// Record scheduler specific details about the parameter sweeps.
schedulerWorkloadsToSweepOver
.foreach{case (schedName, workloadNames) => {
workloadNames.foreach(workloadName => {
val schedulerWorkload =
ExperimentResultSet.
ExperimentEnv.
ExperimentResult.
SchedulerWorkload.newBuilder()
schedulerWorkload.setSchedulerName(schedName)
schedulerWorkload.setWorkloadName(workloadName)
experimentResult.addSweepSchedulerWorkload(schedulerWorkload)
})
}}
experimentResult.setConstantThinkTime(constantThinkTime)
experimentResult.setPerTaskThinkTime(perTaskThinkTime)
// Save our results as a protocol buffer.
experimentEnv.addExperimentResult(experimentResult.build())
/**
* TODO(andyk): Once protocol buffer support is finished,
* remove this.
*/
// Create a sorted list of schedulers and workloads to compute
// a lot of the stats below, so that the we can be sure
// which column is which when we print the stats.
val sortedSchedulers = simulator
.schedulers.values.toList.sortWith(_.name < _.name)
val sortedWorkloads = workloads.toList.sortWith(_.name < _.name)
// Sorted names of workloads.
var workloadNames = sortedWorkloads.map(_.name).mkString(" ")
// Count the jobs in each workload.
var numJobs = sortedWorkloads.map(_.numJobs).mkString(" ")
// Count the jobs in each workload that were actually scheduled.
val numJobsScheduled = sortedWorkloads.map(workload => {
workload.getJobs.filter(_.numSchedulingAttempts > 0).length
}).mkString(" ")
// Sorted names of Schedulers.
val schedNames = sortedSchedulers.map(_.name).mkString(" ")
// Calculate per scheduler successful, failed, retried
// transaction conflict rates.
val schedSuccessfulTransactions = sortedSchedulers.map(sched => {
sched.numSuccessfulTransactions
}).mkString(" ")
val schedFailedTransactions = sortedSchedulers.map(sched => {
sched.numFailedTransactions
}).mkString(" ")
val schedNoResorucesFoundSchedAttempt = sortedSchedulers.map(sched => {
sched.numNoResourcesFoundSchedulingAttempts
}).mkString(" ")
val schedRetriedTransactions = sortedSchedulers.map(sched => {
sched.numRetriedTransactions
}).mkString(" ")
// Calculate per scheduler task transaction and conflict rates
val schedSuccessfulTaskTransactions = sortedSchedulers.map(sched => {
sched.numSuccessfulTaskTransactions
}).mkString(" ")
val schedFailedTaskTransactions = sortedSchedulers.map(sched => {
sched.numFailedTaskTransactions
}).mkString(" ")
val schedNumJobsTimedOutScheduling = sortedSchedulers.map(sched => {
sched.numJobsTimedOutScheduling
}).mkString(" ")
// Calculate per scheduler aggregate (useful + wasted) busy time.
val schedBusyTimes = sortedSchedulers.map(sched => {
println(("calculating busy time for sched %s as " +
"(%f + %f) / %f = %f.")
.format(sched.name,
sched.totalUsefulTimeScheduling,
sched.totalWastedTimeScheduling,
simulator.currentTime,
(sched.totalUsefulTimeScheduling +
sched.totalWastedTimeScheduling) /
simulator.currentTime))
(sched.totalUsefulTimeScheduling +
sched.totalWastedTimeScheduling) / simulator.currentTime
}).mkString(" ")
// Calculate per scheduler aggregate (useful + wasted) busy time.
val schedUsefulBusyTimes = sortedSchedulers.map(sched => {
sched.totalUsefulTimeScheduling / simulator.currentTime
}).mkString(" ")
// Calculate per scheduler aggregate (useful + wasted) busy time.
val schedWastedBusyTimes = sortedSchedulers.map(sched => {
sched.totalWastedTimeScheduling / simulator.currentTime
}).mkString(" ")
// Calculate per-scheduler per-workload useful + wasted busy time.
val perWorkloadSchedBusyTimes = sortedSchedulers.map(sched => {
// Sort by workload name.
val sortedSchedulingTimes =
sched.perWorkloadUsefulTimeScheduling.toList.sortWith(_._1<_._1)
sortedSchedulingTimes.map(nameTimePair => {
(nameTimePair._2 +
sched.perWorkloadWastedTimeScheduling(nameTimePair._1)) /
simulator.currentTime
}).mkString(" ")
}).mkString(" ")
// Calculate 90%tile per-workload time-scheduling for
// scheduled jobs.
// sortedWorkloads is a ListBuffer[Workload]
// Workload.jobs is a ListBuffer[Job].
val jobThinkTimes90Percentile = sortedWorkloads.map(workload => {
workload.jobUsefulThinkTimesPercentile(0.9)
}).mkString(" ")
// Calculate the average time jobs spent in scheduler's queue before
// its first task was first scheduled.
val avgJobQueueTimesTillFirstScheduled = sortedWorkloads.map(workload => {
workload.avgJobQueueTimeTillFirstScheduled
}).mkString(" ")
// Calculate the average time jobs spent in scheduler's queue before
// its final task was scheduled..
val avgJobQueueTimesTillFullyScheduled = sortedWorkloads.map(workload => {
workload.avgJobQueueTimeTillFullyScheduled
}).mkString(" ")
// Calculate the 90%tile per-workload jobQueueTime*-s for
// scheduled jobs.
val jobQueueTimeTillFirstScheduled90Percentile =
sortedWorkloads.map(workload => {
workload.jobQueueTimeTillFirstScheduledPercentile(0.9)
}).mkString(" ")
val jobQueueTimeTillFullyScheduled90Percentile =
sortedWorkloads.map(workload => {
workload.jobQueueTimeTillFullyScheduledPercentile(0.9)
}).mkString(" ")
val numSchedulingAttempts90Percentile =
sortedWorkloads.map(workload => {
workload.numSchedulingAttemptsPercentile(0.9)
}).mkString(" ")
val numSchedulingAttempts99Percentile =
sortedWorkloads.map(workload => {
workload.numSchedulingAttemptsPercentile(0.99)
}).mkString(" ")
val numSchedulingAttemptsMax =
sortedWorkloads.map(workload => {
workload.getJobs.map(_.numSchedulingAttempts).max
}).mkString(" ")
val numTaskSchedulingAttempts90Percentile =
sortedWorkloads.map(workload => {
workload.numTaskSchedulingAttemptsPercentile(0.9)
}).mkString(" ")
val numTaskSchedulingAttempts99Percentile =
sortedWorkloads.map(workload => {
workload.numTaskSchedulingAttemptsPercentile(0.99)
}).mkString(" ")
val numTaskSchedulingAttemptsMax =
sortedWorkloads.map(workload => {
workload.getJobs.map(_.numTaskSchedulingAttempts).max
}).mkString(" ")
// Per-scheduler stats.
val schedulerIsMultiPaths = sortedSchedulers.map(sched => {
if (sched.isMultiPath) "1"
else "0"
}).mkString(" ")
val schedulerJobQueueSizes =
sortedSchedulers.map(_.jobQueueSize).mkString(" ")
val prettyLine = ("cell: %s \\n" +
"assignment policy: %s \\n" +
"runtime: %f \\n" +
"avg cpu util: %f \\n" +
"avg mem util: %f \\n" +
"num workloads %d \\n" +
"workload names: %s \\n" +
"numjobs: %s \\n" +
"num jobs scheduled: %s \\n" +
"perWorkloadSchedBusyTimes: %s \\n" +
"jobThinkTimes90Percentile: %s \\n" +
"avgJobQueueTimesTillFirstScheduled: %s \\n" +
"avgJobQueueTimesTillFullyScheduled: %s \\n" +
"jobQueueTimeTillFirstScheduled90Percentile: %s \\n" +
"jobQueueTimeTillFullyScheduled90Percentile: %s \\n" +
"numSchedulingAttempts90Percentile: %s \\n" +
"numSchedulingAttempts99Percentile: %s \\n" +
"numSchedulingAttemptsMax: %s \\n" +
"numTaskSchedulingAttempts90Percentile: %s \\n" +
"numTaskSchedulingAttempts99Percentile: %s \\n" +
"numTaskSchedulingAttemptsMax: %s \\n" +
"simulator.schedulers.size: %d \\n" +
"schedNames: %s \\n" +
"schedBusyTimes: %s \\n" +
"schedUsefulBusyTimes: %s \\n" +
"schedWastedBusyTimes: %s \\n" +
"schedSuccessfulTransactions: %s \\n" +
"schedFailedTransactions: %s \\n" +
"schedNoResorucesFoundSchedAttempt: %s \\n" +
"schedRetriedTransactions: %s \\n" +
"schedSuccessfulTaskTransactions: %s \\n" +
"schedFailedTaskTransactions: %s \\n" +
"schedNumJobsTimedOutScheduling: %s \\n" +
"schedulerIsMultiPaths: %s \\n" +
"schedulerNumJobsLeftInQueue: %s \\n" +
"workloadToSweepOver: %s \\n" +
"avgJobInterarrivalTime: %f \\n" +
"constantThinkTime: %f \\n" +
"perTaskThinkTime %f").format(
workloadDesc.cell, // %s
workloadDesc.assignmentPolicy, // %s
simulatorDesc.runTime, // %f
simulator.avgCpuUtilization /
simulator.cellState.totalCpus, // %f
simulator.avgMemUtilization /
simulator.cellState.totalMem, // %f
workloads.length, // %d
workloadNames, // %s
numJobs, // %s
numJobsScheduled, // %s
perWorkloadSchedBusyTimes, // %s
jobThinkTimes90Percentile, // %s
avgJobQueueTimesTillFirstScheduled, // %s
avgJobQueueTimesTillFullyScheduled, // %s
jobQueueTimeTillFirstScheduled90Percentile, // %s
jobQueueTimeTillFullyScheduled90Percentile, // %s
numSchedulingAttempts90Percentile, // %s
numSchedulingAttempts99Percentile, // %s
numSchedulingAttemptsMax, // %s
numTaskSchedulingAttempts90Percentile, // %s
numTaskSchedulingAttempts99Percentile, // %s
numTaskSchedulingAttemptsMax, // %s
simulator.schedulers.size, // %d
schedNames, // %s
schedBusyTimes, // %s
schedUsefulBusyTimes, // %s
schedWastedBusyTimes, // %s
schedSuccessfulTransactions, // %s
schedFailedTransactions, // %s
schedNoResorucesFoundSchedAttempt, // %s
schedRetriedTransactions, // %s
schedSuccessfulTaskTransactions, // %s
schedFailedTaskTransactions, // %s
schedNumJobsTimedOutScheduling, // %s
schedulerIsMultiPaths, // %s
schedulerJobQueueSizes,
workloadToSweepOver, // %s
avgJobInterarrivalTime.getOrElse(
workloads.filter(_.name == workloadToSweepOver) // %f
.head.avgJobInterarrivalTime),
constantThinkTime, // %f
perTaskThinkTime) // %f
println(prettyLine + "\\n")
} else { // if (success)
println("Simulation timed out.")
}
}) // blackListPercent
}) // C
}) // L
}) // lambda
experimentResultSet.addExperimentEnv(experimentEnv)
}) // WorkloadDescs
experimentResultSet.build().writeTo(output)
output.close()
}
}
| google/cluster-scheduler-simulator | src/main/scala/ExperimentRunner.scala | Scala | bsd-3-clause | 33,666 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * [email protected]
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.power.blocks
import java.util.Random
import com.itszuvalex.femtocraft.Femtocraft
import com.itszuvalex.femtocraft.power.tiles.TileEntityFemtoCable
import com.itszuvalex.femtocraft.proxy.ProxyClient
import com.itszuvalex.femtocraft.render.RenderUtils
import cpw.mods.fml.relauncher.{Side, SideOnly}
import net.minecraft.block.Block
import net.minecraft.block.material.Material
import net.minecraft.client.renderer.texture.IIconRegister
import net.minecraft.tileentity.TileEntity
import net.minecraft.world.World
class BlockFemtoCable() extends BlockMicroCable() {
setCreativeTab(Femtocraft.femtocraftTab)
setBlockName("blockFemtoCable")
setHardness(1.0f)
setStepSound(Block.soundTypeMetal)
setBlockBounds()
setTickRandomly(true)
override def setBlockBounds() {
this.minX = {this.minY = {this.minZ = 4.0D / 16.0D; this.minZ}; this.minY}
this.maxX = {this.maxY = {this.maxZ = 12.0D / 16.0D; this.maxZ}; this.maxY}
}
override def randomDisplayTick(par1World: World, x: Int, y: Int, z: Int, par5Random: Random) {
val spawnX: Double = x + getBlockBoundsMinX + par5Random.nextFloat * (getBlockBoundsMaxX - getBlockBoundsMinX)
val spawnY: Double = y + getBlockBoundsMinY + par5Random.nextFloat * (getBlockBoundsMaxY - getBlockBoundsMinY)
val spawnZ: Double = z + getBlockBoundsMinZ + par5Random.nextFloat * (getBlockBoundsMaxZ - getBlockBoundsMinZ)
RenderUtils.spawnParticle(par1World, RenderUtils.FEMTO_POWER_PARTICLE, spawnX, spawnY, spawnZ)
}
override def getRenderType = ProxyClient.femtoCableRenderID
override def createNewTileEntity(world: World, metadata: Int): TileEntity = new TileEntityFemtoCable
@SideOnly(Side.CLIENT) override def registerBlockIcons(par1IconRegister: IIconRegister) {
this.blockIcon = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableCoil")
coreBorder = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableCoreBorder")
connector = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableConnector")
coil = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableCoil")
coilEdge = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableCoilEdge")
border = par1IconRegister.registerIcon(Femtocraft.ID.toLowerCase + ":" + "femtoCableBorder")
}
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/power/blocks/BlockFemtoCable.scala | Scala | gpl-2.0 | 3,409 |
package com.goyeau.kubernetes.client.operation
import cats.effect.Async
import cats.syntax.either._
import com.goyeau.kubernetes.client.util.Uris.addLabels
import com.goyeau.kubernetes.client.{KubeConfig, WatchEvent}
import fs2.Stream
import io.circe.jawn.CirceSupportParser
import io.circe.{Decoder, Json}
import org.typelevel.jawn.fs2._
import org.http4s.Method._
import org.http4s._
import org.http4s.client.Client
import org.typelevel.jawn.Facade
private[client] trait Watchable[F[_], Resource] {
protected def httpClient: Client[F]
implicit protected val F: Async[F]
protected def config: KubeConfig
protected def resourceUri: Uri
protected def watchResourceUri: Uri = resourceUri
implicit protected def resourceDecoder: Decoder[Resource]
implicit val parserFacade: Facade[Json] = new CirceSupportParser(None, false).facade
def watch(labels: Map[String, String] = Map.empty): Stream[F, Either[String, WatchEvent[Resource]]] = {
val uri = addLabels(labels, config.server.resolve(watchResourceUri))
val req = Request[F](GET, uri.withQueryParam("watch", "1")).withOptionalAuthorization(config.authorization)
jsonStream(req).map(_.as[WatchEvent[Resource]].leftMap(_.getMessage))
}
private def jsonStream(req: Request[F]): Stream[F, Json] =
httpClient.stream(req).flatMap(_.body.chunks.parseJsonStream)
}
| joan38/kubernetes-client | kubernetes-client/src/com/goyeau/kubernetes/client/operation/Watchable.scala | Scala | apache-2.0 | 1,346 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2014 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package remotely
import org.scalatest.matchers.{Matcher,MatchResult}
import org.scalatest.{FlatSpec,Matchers,BeforeAndAfterAll}
import scalaz.concurrent.Task
import scalaz.stream.Process
import remotely.transport.netty.{NettyServer, NettyTransport}
import scala.concurrent.duration.DurationInt
import java.util.concurrent.Executors
class UberSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
behavior of "permutations"
it should "work" in {
val input: Process[Task,Int] = Process.emitAll(Vector(1,2,3))
val permuted : IndexedSeq[Process[Task,Int]] = Endpoint.permutations(input).runLog.run
permuted.size should be (6)
val all = permuted.map(_.runLog.run).toSet
all should be(Set(IndexedSeq(1,2,3), IndexedSeq(1,3,2), IndexedSeq(2,1,3), IndexedSeq(2,3,1), IndexedSeq(3,1,2), IndexedSeq(3,2,1)))
}
behavior of "isEmpty"
it should "work" in {
val empty: Process[Task,Int] = Process.halt
Endpoint.isEmpty(empty).run should be (true)
val notEmpty: Process[Task,Int] = Process.emit(1)
Endpoint.isEmpty(notEmpty).run should be (false)
val alsoNot: Process[Task,Int] = Process.eval(Task.now(1))
Endpoint.isEmpty(alsoNot).run should be (false)
}
behavior of "transpose"
it should "work" in {
val input = IndexedSeq(IndexedSeq("a", "b", "c"),IndexedSeq("q", "w", "e"), IndexedSeq("1", "2", "3"))
val inputStream: Process[Task,Process[Task,String]] = Process.emitAll(input.map(Process.emitAll(_)))
val transposed: IndexedSeq[IndexedSeq[String]] = Endpoint.transpose(inputStream).runLog.run.map(_.runLog.run)
transposed should be (input.transpose)
}
val addr1 = new java.net.InetSocketAddress("localhost", 9000)
val addr2 = new java.net.InetSocketAddress("localhost", 9009)
val server1 = new CountServer
val server2 = new CountServer
val shutdown1 = NettyServer.serve(addr1, server1.environment).run
val shutdown2 = NettyServer.serve(addr2, server2.environment).run
override def afterAll() {
shutdown1.run
shutdown2.run
}
val endpoint1 = (NettyTransport.single(addr1) map Endpoint.single).run
val endpoint2 = (NettyTransport.single(addr2) map Endpoint.single).run
def endpoints: Process[Nothing,Endpoint] = Process.emitAll(List(endpoint1, endpoint2)) ++ endpoints
val endpointUber = Endpoint.uber(1 second, 10 seconds, 10, endpoints)
behavior of "uber"
ignore should "work" in { // this seems to hang
import Response.Context
import Remote.implicits._
import codecs._
val call = evaluate(endpointUber, Monitoring.empty)(CountClient.ping(1))
val i: Int = call.apply(Context.empty).run
val j: Int = call.apply(Context.empty).run
j should be (2)
}
}
| lech-glowiak/remotely | core/src/test/scala/UberSpec.scala | Scala | apache-2.0 | 3,512 |
package workload.rampups
import io.gatling.core.Predef._
/**
* Created by ebour.
*/
class AtOnceRampup extends workload.rampups.Rampup {
val userCount = System.getProperty("userCount", "1").toInt
override def getRampup: io.gatling.core.controller.inject.InjectionStep = {
atOnceUsers(userCount)
}
override def getDescription: String = {
userCount+" users are injected at once"
}
}
| ebour/mytoolbox | gatling/src/test/scala/workload/rampups/AtOnceRampup.scala | Scala | mit | 407 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
class PartitionableSinkTest extends TableTestBase {
private val util = batchTestUtil()
util.addTableSource[(Long, Long, Long)]("MyTable", 'a, 'b, 'c)
createTable("sink", shuffleBy = false)
private def createTable(name: String, shuffleBy: Boolean): Unit = {
util.tableEnv.executeSql(
s"""
|create table $name (
| a bigint,
| b bigint,
| c bigint
|) partitioned by (b, c) with (
| 'connector' = 'filesystem',
| 'path' = '/non',
| ${if (shuffleBy) "'sink.shuffle-by-partition.enable'='true'," else ""}
| 'format' = 'testcsv'
|)
|""".stripMargin)
}
@Test
def testStatic(): Unit = {
util.verifyPlanInsert("INSERT INTO sink PARTITION (b=1, c=1) SELECT a FROM MyTable")
}
@Test
def testDynamic(): Unit = {
util.verifyPlanInsert("INSERT INTO sink SELECT a, b, c FROM MyTable")
}
@Test
def testDynamicShuffleBy(): Unit = {
createTable("sinkShuffleBy", shuffleBy = true)
util.verifyPlanInsert("INSERT INTO sinkShuffleBy SELECT a, b, c FROM MyTable")
}
@Test
def testPartial(): Unit = {
util.verifyPlanInsert("INSERT INTO sink PARTITION (b=1) SELECT a, c FROM MyTable")
}
@Test(expected = classOf[ValidationException])
def testWrongStatic(): Unit = {
util.verifyPlanInsert("INSERT INTO sink PARTITION (a=1) SELECT b, c FROM MyTable")
}
@Test(expected = classOf[ValidationException])
def testWrongFields(): Unit = {
util.verifyPlanInsert("INSERT INTO sink PARTITION (b=1) SELECT a, b, c FROM MyTable")
}
@Test
def testStaticWithValues(): Unit = {
thrown.expect(classOf[ValidationException])
thrown.expectMessage(
"INSERT INTO <table> PARTITION statement only support SELECT clause for now," +
" 'VALUES ROW(5)' is not supported yet")
util.verifyPlanInsert("INSERT INTO sink PARTITION (b=1, c=1) VALUES (5)")
}
}
| GJL/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/PartitionableSinkTest.scala | Scala | apache-2.0 | 2,949 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.optimize.program.{BatchOptimizeContext, FlinkChainedProgram, FlinkHepRuleSetProgramBuilder, HEP_RULES_EXECUTION_TYPE}
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Tests for [[SimplifyFilterConditionRule]].
*/
class SimplifyFilterConditionRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"FilterSimplifyExpressions",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(RuleSets.ofList(SimplifyFilterConditionRule.EXTENDED))
.build()
)
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Long, String)]("x", 'a, 'b, 'c)
util.addTableSource[(Int, Long, String)]("y", 'd, 'e, 'f)
util.addTableSource[(Int, Long, String)]("z", 'i, 'j, 'k)
}
@Test
def testSimpleCondition(): Unit = {
util.verifyRelPlan(
"SELECT * FROM x WHERE (a = 1 AND b = 2) OR (NOT(a <> 1) AND c = 3) AND true")
}
@Test
def testSimplifyConditionInSubQuery1(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE EXISTS " +
"(SELECT * FROM y WHERE (d = 1 AND e = 2) OR (NOT (d <> 1) AND e = 3)) AND true"
util.verifyRelPlan(sqlQuery)
}
@Test
def testSimplifyConditionInSubQuery2(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE (a = 1 AND b = 2) OR (NOT (a <> 1) AND b = 3) " +
"AND true AND EXISTS (SELECT * FROM y WHERE d > 10)"
util.verifyRelPlan(sqlQuery)
}
@Test
def testSimplifyConditionInSubQuery3(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE EXISTS " +
"(SELECT * FROM y WHERE d IN " +
"(SELECT i FROM z WHERE (i = 1 AND j = 2) OR (NOT (i <> 1) AND j = 3) AND true) AND e > 10)"
util.verifyRelPlan(sqlQuery)
}
@Test
def testComplexCondition1(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE " +
"(a = 1 AND b = 2) OR (NOT(a <> 1) AND c = 3) AND true AND EXISTS " +
"(SELECT * FROM y WHERE x.a = y.d AND 2=2 AND " +
"(SELECT count(*) FROM z WHERE i = 5 AND j = 6) > 0)"
util.verifyRelPlan(sqlQuery)
}
@Test
def testComplexCondition2(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE " +
"(a = 1 AND b = 2) OR (NOT(a <> 1) AND c = 3) AND true AND EXISTS " +
"(SELECT * FROM y WHERE x.a = y.d AND " +
"(SELECT count(*) FROM z WHERE (i = 5 AND j = 6) OR (NOT (i <> 5) AND j = 7) AND true) > 0)"
util.verifyRelPlan(sqlQuery)
}
@Test
def testComplexCondition3(): Unit = {
val sqlQuery = "SELECT * FROM x WHERE " +
"(a = 1 AND b = 2) OR (NOT(a <> 1) AND c = 3) AND true AND EXISTS " +
"(SELECT * FROM y WHERE x.a = y.d AND 2=2 AND " +
"(SELECT count(*) FROM z WHERE (i = 5 AND j = 6) OR (NOT (i <> 5) AND j = 7) AND true) > 0)"
util.verifyRelPlan(sqlQuery)
}
}
| tillrohrmann/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/SimplifyFilterConditionRuleTest.scala | Scala | apache-2.0 | 4,066 |
package ir.web.bing
import java.net.URL
import ir.Query
import ir.web
/**
* <pre>
* Created on 6/1/15.
* </pre>
* @param searchedPages searched pages
* @param query query
* @param startIndex starting index
* @param currentUrl current URL
* @param nextUrl next URL
* @author K.Sakamoto
*/
class BingSearchedPageList(override val searchedPages: Array[web.SearchedPage],
override val query: Query,
override val startIndex: Int,
override val currentUrl: URL,
override val nextUrl: URL) extends web.SearchedPageList(searchedPages, query, startIndex, currentUrl, nextUrl) | ktr-skmt/FelisCatusZero | src/main/scala/ir/web/bing/BingSearchedPageList.scala | Scala | apache-2.0 | 686 |
/*
* Copyright 2013 TeamNexus
*
* TeamNexus Licenses this file to you under the MIT License (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://opensource.org/licenses/mit-license.php
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.nexus.data.json
class JsonNumber(private final val string: String) extends JsonValue {
if(this.string == null) throw new NullPointerException("Input may not be null")
override def toString: String = string
private [json] def write(writer: JsonWriter) = writer.write(string)
override def isNumber = true
override def asInt = string.toInt
override def asLong = string.toLong
override def asFloat = string.toFloat
override def asDouble = string.toDouble
override def hashCode = string.hashCode
override def equals(obj: Any): Boolean =
if(obj == null) false
else if(this.getClass ne obj.getClass) false
else this.string == obj.toString
} | nailed/nailed-legacy | src/main/scala/com/nexus/data/json/JsonNumber.scala | Scala | unlicense | 1,287 |
package com.typesafe.sbt.packager.linux
import sbt._
trait LinuxMappingDSL {
/** DSL for packaging files into .deb */
def packageMapping(files: (File, String)*) = LinuxPackageMapping(files)
/**
* @param dir - use some directory, e.g. target.value
* @param files
*/
def packageTemplateMapping(files: String*)(dir: File = new File(sys.props("java.io.tmpdir"))) =
LinuxPackageMapping(files map ((dir, _)))
// TODO can the packager.MappingsHelper be used here?
/**
* @see #mapDirectoryAndContents
* @param dirs - directories to map
*/
def packageDirectoryAndContentsMapping(dirs: (File, String)*) =
LinuxPackageMapping(mapDirectoryAndContents(dirs: _*))
/**
* This method includes files and directories.
*
* @param dirs - directories to map
*/
def mapDirectoryAndContents(dirs: (File, String)*): Seq[(File, String)] =
for {
(src, dest) <- dirs
path <- (src ***).get
} yield path -> path.toString.replaceFirst(src.toString, dest)
}
object Mapper extends LinuxMappingDSL
| kodemaniak/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/linux/LinuxMappingDSL.scala | Scala | bsd-2-clause | 1,058 |
package instagram.api.yaml
import play.api.mvc.{Action, Controller}
import play.api.data.validation.Constraint
import de.zalando.play.controllers._
import PlayBodyParsing._
import PlayValidations._
import scala.math.BigInt
import scala.math.BigDecimal
// ----- constraints and wrapper validations -----
class MediaIdOptConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaIdOptValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaIdOptConstraints(instance))
}
class UsersUser_idFollowsGetUser_idConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class UsersUser_idFollowsGetUser_idValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new UsersUser_idFollowsGetUser_idConstraints(instance))
}
class TagsTag_nameMediaRecentGetTag_nameConstraints(override val instance: String) extends ValidationBase[String] {
override def constraints: Seq[Constraint[String]] =
Seq()
}
class TagsTag_nameMediaRecentGetTag_nameValidator(instance: String) extends RecursiveValidator {
override val validators = Seq(new TagsTag_nameMediaRecentGetTag_nameConstraints(instance))
}
class LocationsLocation_idGetLocation_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class LocationsLocation_idGetLocation_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new LocationsLocation_idGetLocation_idConstraints(instance))
}
class LocationLatitudeOptConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class LocationLatitudeOptValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new LocationLatitudeOptConstraints(instance))
}
class MediaMedia_idCommentsDeleteMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idCommentsDeleteMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idCommentsDeleteMedia_idConstraints(instance))
}
class MediaFilterOptConstraints(override val instance: String) extends ValidationBase[String] {
override def constraints: Seq[Constraint[String]] =
Seq()
}
class MediaFilterOptValidator(instance: String) extends RecursiveValidator {
override val validators = Seq(new MediaFilterOptConstraints(instance))
}
class MediaMedia_idLikesGetMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idLikesGetMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idLikesGetMedia_idConstraints(instance))
}
class TagsTag_nameGetTag_nameConstraints(override val instance: String) extends ValidationBase[String] {
override def constraints: Seq[Constraint[String]] =
Seq()
}
class TagsTag_nameGetTag_nameValidator(instance: String) extends RecursiveValidator {
override val validators = Seq(new TagsTag_nameGetTag_nameConstraints(instance))
}
class MediaMedia_idLikesDeleteMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idLikesDeleteMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idLikesDeleteMedia_idConstraints(instance))
}
class MediaMedia_idCommentsGetMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idCommentsGetMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idCommentsGetMedia_idConstraints(instance))
}
class MediaSearchGetDISTANCEConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq(max(BigInt("5000"), false))
}
class MediaSearchGetDISTANCEValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaSearchGetDISTANCEConstraints(instance))
}
class LocationsLocation_idMediaRecentGetLocation_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class LocationsLocation_idMediaRecentGetLocation_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new LocationsLocation_idMediaRecentGetLocation_idConstraints(instance))
}
class UsersUser_idFollowed_byGetUser_idConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class UsersUser_idFollowed_byGetUser_idValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new UsersUser_idFollowed_byGetUser_idConstraints(instance))
}
class MediaMedia_idGetMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idGetMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idGetMedia_idConstraints(instance))
}
class MediaMedia_idCommentsPostMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idCommentsPostMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idCommentsPostMedia_idConstraints(instance))
}
class MediaShortcodeGetShortcodeConstraints(override val instance: String) extends ValidationBase[String] {
override def constraints: Seq[Constraint[String]] =
Seq()
}
class MediaShortcodeGetShortcodeValidator(instance: String) extends RecursiveValidator {
override val validators = Seq(new MediaShortcodeGetShortcodeConstraints(instance))
}
class UsersUser_idGetUser_idConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class UsersUser_idGetUser_idValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new UsersUser_idGetUser_idConstraints(instance))
}
class GeographiesGeo_idMediaRecentGetGeo_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class GeographiesGeo_idMediaRecentGetGeo_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new GeographiesGeo_idMediaRecentGetGeo_idConstraints(instance))
}
class MediaMedia_idLikesPostMedia_idConstraints(override val instance: BigInt) extends ValidationBase[BigInt] {
override def constraints: Seq[Constraint[BigInt]] =
Seq()
}
class MediaMedia_idLikesPostMedia_idValidator(instance: BigInt) extends RecursiveValidator {
override val validators = Seq(new MediaMedia_idLikesPostMedia_idConstraints(instance))
}
class UsersSearchGetQConstraints(override val instance: String) extends ValidationBase[String] {
override def constraints: Seq[Constraint[String]] =
Seq()
}
class UsersSearchGetQValidator(instance: String) extends RecursiveValidator {
override val validators = Seq(new UsersSearchGetQConstraints(instance))
}
class UsersUser_idRelationshipPostUser_idConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class UsersUser_idRelationshipPostUser_idValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new UsersUser_idRelationshipPostUser_idConstraints(instance))
}
class UsersUser_idMediaRecentGetUser_idConstraints(override val instance: BigDecimal) extends ValidationBase[BigDecimal] {
override def constraints: Seq[Constraint[BigDecimal]] =
Seq()
}
class UsersUser_idMediaRecentGetUser_idValidator(instance: BigDecimal) extends RecursiveValidator {
override val validators = Seq(new UsersUser_idMediaRecentGetUser_idConstraints(instance))
}
// ----- complex type validators -----
// ----- option delegating validators -----
class MediaIdValidator(instance: MediaId) extends RecursiveValidator {
override val validators = instance.toSeq.map { new MediaIdOptValidator(_) }
}
class LocationLatitudeValidator(instance: LocationLatitude) extends RecursiveValidator {
override val validators = instance.toSeq.map { new LocationLatitudeOptValidator(_) }
}
class MediaFilterValidator(instance: MediaFilter) extends RecursiveValidator {
override val validators = instance.toSeq.map { new MediaFilterOptValidator(_) }
}
// ----- array delegating validators -----
// ----- catch all simple validators -----
// ----- call validations -----
class UsersUser_idGetValidator(user_id: BigDecimal) extends RecursiveValidator {
override val validators = Seq(
new UsersUser_idGetUser_idValidator(user_id)
)
}
class UsersUser_idFollowed_byGetValidator(user_id: BigDecimal) extends RecursiveValidator {
override val validators = Seq(
new UsersUser_idFollowed_byGetUser_idValidator(user_id)
)
}
class MediaMedia_idLikesGetValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idLikesGetMedia_idValidator(media_id)
)
}
class LocationsSearchGetValidator(foursquare_v2_id: MediaId, facebook_places_id: MediaId, distance: MediaId, lat: LocationLatitude, foursquare_id: MediaId, lng: LocationLatitude) extends RecursiveValidator {
override val validators = Seq(
new MediaIdValidator(foursquare_v2_id),
new MediaIdValidator(facebook_places_id),
new MediaIdValidator(distance),
new LocationLatitudeValidator(lat),
new MediaIdValidator(foursquare_id),
new LocationLatitudeValidator(lng)
)
}
class MediaMedia_idCommentsDeleteValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idCommentsDeleteMedia_idValidator(media_id)
)
}
class UsersSelfMediaLikedGetValidator(count: MediaId, max_like_id: MediaId) extends RecursiveValidator {
override val validators = Seq(
new MediaIdValidator(count),
new MediaIdValidator(max_like_id)
)
}
class TagsSearchGetValidator(q: MediaFilter) extends RecursiveValidator {
override val validators = Seq(
new MediaFilterValidator(q)
)
}
class MediaMedia_idCommentsGetValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idCommentsGetMedia_idValidator(media_id)
)
}
class MediaMedia_idLikesDeleteValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idLikesDeleteMedia_idValidator(media_id)
)
}
class MediaMedia_idGetValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idGetMedia_idValidator(media_id)
)
}
class MediaShortcodeGetValidator(shortcode: String) extends RecursiveValidator {
override val validators = Seq(
new MediaShortcodeGetShortcodeValidator(shortcode)
)
}
class UsersSearchGetValidator(q: String, count: MediaFilter) extends RecursiveValidator {
override val validators = Seq(
new UsersSearchGetQValidator(q),
new MediaFilterValidator(count)
)
}
class MediaMedia_idCommentsPostValidator(media_id: BigInt, tEXT: LocationLatitude) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idCommentsPostMedia_idValidator(media_id),
new LocationLatitudeValidator(tEXT)
)
}
class MediaMedia_idLikesPostValidator(media_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new MediaMedia_idLikesPostMedia_idValidator(media_id)
)
}
class UsersUser_idRelationshipPostValidator(user_id: BigDecimal, action: UsersUser_idRelationshipPostAction) extends RecursiveValidator {
override val validators = Seq(
new UsersUser_idRelationshipPostUser_idValidator(user_id),
new UsersUser_idRelationshipPostActionValidator(action)
)
}
class TagsTag_nameGetValidator(tag_name: String) extends RecursiveValidator {
override val validators = Seq(
new TagsTag_nameGetTag_nameValidator(tag_name)
)
}
class LocationsLocation_idGetValidator(location_id: BigInt) extends RecursiveValidator {
override val validators = Seq(
new LocationsLocation_idGetLocation_idValidator(location_id)
)
}
class LocationsLocation_idMediaRecentGetValidator(location_id: BigInt, max_timestamp: MediaId, min_timestamp: MediaId, min_id: MediaFilter, max_id: MediaFilter) extends RecursiveValidator {
override val validators = Seq(
new LocationsLocation_idMediaRecentGetLocation_idValidator(location_id),
new MediaIdValidator(max_timestamp),
new MediaIdValidator(min_timestamp),
new MediaFilterValidator(min_id),
new MediaFilterValidator(max_id)
)
}
class MediaSearchGetValidator(mAX_TIMESTAMP: MediaId, dISTANCE: BigInt, lNG: LocationLatitude, mIN_TIMESTAMP: MediaId, lAT: LocationLatitude) extends RecursiveValidator {
override val validators = Seq(
new MediaIdValidator(mAX_TIMESTAMP),
new MediaSearchGetDISTANCEValidator(dISTANCE),
new LocationLatitudeValidator(lNG),
new MediaIdValidator(mIN_TIMESTAMP),
new LocationLatitudeValidator(lAT)
)
}
class TagsTag_nameMediaRecentGetValidator(tag_name: String) extends RecursiveValidator {
override val validators = Seq(
new TagsTag_nameMediaRecentGetTag_nameValidator(tag_name)
)
}
class UsersUser_idFollowsGetValidator(user_id: BigDecimal) extends RecursiveValidator {
override val validators = Seq(
new UsersUser_idFollowsGetUser_idValidator(user_id)
)
}
class UsersUser_idMediaRecentGetValidator(user_id: BigDecimal, max_timestamp: MediaId, min_id: MediaFilter, min_timestamp: MediaId, max_id: MediaFilter, count: MediaId) extends RecursiveValidator {
override val validators = Seq(
new UsersUser_idMediaRecentGetUser_idValidator(user_id),
new MediaIdValidator(max_timestamp),
new MediaFilterValidator(min_id),
new MediaIdValidator(min_timestamp),
new MediaFilterValidator(max_id),
new MediaIdValidator(count)
)
}
class UsersSelfFeedGetValidator(count: MediaId, max_id: MediaId, min_id: MediaId) extends RecursiveValidator {
override val validators = Seq(
new MediaIdValidator(count),
new MediaIdValidator(max_id),
new MediaIdValidator(min_id)
)
}
class GeographiesGeo_idMediaRecentGetValidator(geo_id: BigInt, count: MediaId, min_id: MediaId) extends RecursiveValidator {
override val validators = Seq(
new GeographiesGeo_idMediaRecentGetGeo_idValidator(geo_id),
new MediaIdValidator(count),
new MediaIdValidator(min_id)
)
}
| zalando/play-swagger | play-scala-generator/src/test/resources/expected_results/validation/instagram_api_yaml.scala | Scala | mit | 15,965 |
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package actor {
import common._
trait ILAExecute {
def execute(f: () => Unit): Unit
def shutdown(): Unit
}
object LAScheduler {
@volatile
var onSameThread = false
/**
* Set this variable to the number of threads to allocate in the thread pool
*/
@volatile var threadPoolSize = 16 // issue 194
@volatile var maxThreadPoolSize = threadPoolSize * 25
@volatile
var createExecutor: () => ILAExecute = () => {
new ILAExecute {
import _root_.java.util.concurrent._
private val es = // Executors.newFixedThreadPool(threadPoolSize)
new ThreadPoolExecutor(threadPoolSize,
maxThreadPoolSize,
60,
TimeUnit.SECONDS,
new LinkedBlockingQueue)
def execute(f: () => Unit): Unit =
es.execute(new Runnable{def run() {f()}})
def shutdown(): Unit = {
es.shutdown()
}
}
}
@volatile
var exec: ILAExecute = _
def execute(f: () => Unit) {
synchronized {
if (exec eq null) {
exec = createExecutor()
}
exec.execute(f)
}
}
def shutdown() {
synchronized {
if (exec ne null) {
exec.shutdown()
}
exec = null
}
}
}
trait SpecializedLiftActor[T] extends SimpleActor[T] {
@volatile private[this] var processing = false
private[this] val baseMailbox: MailboxItem = new SpecialMailbox
@volatile private[this] var msgList: List[T] = Nil
@volatile private[this] var priorityMsgList: List[T] = Nil
@volatile private[this] var startCnt = 0
private class MailboxItem(val item: T) {
var next: MailboxItem = _
var prev: MailboxItem = _
/*
def find(f: MailboxItem => Boolean): Box[MailboxItem] =
if (f(this)) Full(this) else next.find(f)
*/
def remove() {
val newPrev = prev
prev.next = next
next.prev = prev
}
def insertAfter(newItem: MailboxItem): MailboxItem = {
next.prev = newItem
newItem.prev = this
newItem.next = this.next
next = newItem
newItem
}
def insertBefore(newItem: MailboxItem): MailboxItem = {
prev.next = newItem
newItem.prev = this.prev
newItem.next = this
prev = newItem
newItem
}
}
private class SpecialMailbox extends MailboxItem(null.asInstanceOf[T]) {
// override def find(f: MailboxItem => Boolean): Box[MailboxItem] = Empty
next = this
prev = this
}
private def findMailboxItem(start: MailboxItem, f: MailboxItem => Boolean): Box[MailboxItem] =
start match {
case x: SpecialMailbox => Empty
case x if f(x) => Full(x)
case x => findMailboxItem(x.next, f)
}
def !(msg: T): Unit = {
val toDo: () => Unit = baseMailbox.synchronized {
msgList ::= msg
if (!processing) {
if (LAScheduler.onSameThread) {
processing = true
() => processMailbox(true)
} else {
if (startCnt == 0) {
startCnt += 1
() => LAScheduler.execute(() => processMailbox(false))
} else
() => {}
}
}
else () => {}
}
toDo()
}
/**
* This method inserts the message at the head of the mailbox
* It's protected because this functionality may or may not want
* to be exposed'
*/
protected def insertMsgAtHeadOfQueue_!(msg: T): Unit = {
val toDo: () => Unit = baseMailbox.synchronized {
this.priorityMsgList ::= msg
if (!processing) {
if (LAScheduler.onSameThread) {
processing = true
() => processMailbox(true)
} else {
if (startCnt == 0) {
startCnt += 1
() => LAScheduler.execute(() => processMailbox(false))
} else
() => {}
}
}
else () => {}
}
toDo()
}
private def processMailbox(ignoreProcessing: Boolean) {
around {
proc2(ignoreProcessing)
}
}
/**
* A list of LoanWrappers that will be executed around the evaluation of mailboxes
*/
protected def aroundLoans: List[CommonLoanWrapper] = Nil
/**
* You can wrap calls around the evaluation of the mailbox. This allows you to set up
* the environment
*/
protected def around[R](f: => R): R = aroundLoans match {
case Nil => f
case xs => CommonLoanWrapper(xs)(f)
}
private def proc2(ignoreProcessing: Boolean) {
var clearProcessing = true
baseMailbox.synchronized {
if (!ignoreProcessing && processing) return
processing = true
if (startCnt > 0) startCnt = 0
}
val eh = exceptionHandler
def putListIntoMB(): Unit = {
if (!priorityMsgList.isEmpty) {
priorityMsgList.foldRight(baseMailbox)((msg, mb) => mb.insertAfter(new MailboxItem(msg)))
priorityMsgList = Nil
}
if (!msgList.isEmpty) {
msgList.foldLeft(baseMailbox)((mb, msg) => mb.insertBefore(new MailboxItem(msg)))
msgList = Nil
}
}
try {
while (true) {
baseMailbox.synchronized {
putListIntoMB()
}
var keepOnDoingHighPriory = true
while (keepOnDoingHighPriory) {
val hiPriPfBox = highPriorityReceive
if (hiPriPfBox.isDefined) {
val hiPriPf = hiPriPfBox.open_!
findMailboxItem(baseMailbox.next, mb => testTranslate(hiPriPf.isDefinedAt)(mb.item)) match {
case Full(mb) =>
mb.remove()
try {
execTranslate(hiPriPf)(mb.item)
} catch {
case e: Exception => if (eh.isDefinedAt(e)) eh(e)
}
case _ =>
baseMailbox.synchronized {
if (msgList.isEmpty) {
keepOnDoingHighPriory = false
}
else {
putListIntoMB()
}
}
} }
else {keepOnDoingHighPriory = false}
}
val pf = messageHandler
findMailboxItem(baseMailbox.next, mb => testTranslate(pf.isDefinedAt)(mb.item)) match {
case Full(mb) =>
mb.remove()
try {
execTranslate(pf)(mb.item)
} catch {
case e: Exception => if (eh.isDefinedAt(e)) eh(e)
}
case _ =>
baseMailbox.synchronized {
if (msgList.isEmpty) {
processing = false
clearProcessing = false
return
}
else {
putListIntoMB()
}
}
}
}
} catch {
case e =>
if (eh.isDefinedAt(e)) eh(e)
throw e
} finally {
if (clearProcessing) {
baseMailbox.synchronized {
processing = false
}
}
}
}
protected def testTranslate(f: T => Boolean)(v: T): Boolean = f(v)
protected def execTranslate(f: T => Unit)(v: T): Unit = f(v)
protected def messageHandler: PartialFunction[T, Unit]
protected def highPriorityReceive: Box[PartialFunction[T, Unit]] = Empty
protected def exceptionHandler: PartialFunction[Throwable, Unit] = {
case e => ActorLogger.error("Actor threw an exception", e)
}
}
object ActorLogger extends Logger {
}
private final case class MsgWithResp(msg: Any, future: LAFuture[Any])
trait LiftActor extends SpecializedLiftActor[Any]
with GenericActor[Any]
with ForwardableActor[Any, Any] {
@volatile
private[this] var responseFuture: LAFuture[Any] = null
protected final def forwardMessageTo(msg: Any, forwardTo: TypedActor[Any, Any]) {
if (null ne responseFuture) {
forwardTo match {
case la: LiftActor => la ! MsgWithResp(msg, responseFuture)
case other =>
reply(other !? msg)
}
} else forwardTo ! msg
}
def !<(msg: Any): LAFuture[Any] = {
val future = new LAFuture[Any]
this ! MsgWithResp(msg, future)
future
}
def !?(msg: Any): Any = {
val future = new LAFuture[Any]
this ! MsgWithResp(msg, future)
future.get
}
/**
* Compatible with Scala Actors' !? method
*/
def !?(timeout: Long, message: Any): Box[Any] =
this !! (message, timeout)
def !!(msg: Any, timeout: Long): Box[Any] = {
val future = new LAFuture[Any]
this ! MsgWithResp(msg, future)
future.get(timeout)
}
def !!(msg: Any): Box[Any] = {
val future = new LAFuture[Any]
this ! MsgWithResp(msg, future)
Full(future.get)
}
override protected def testTranslate(f: Any => Boolean)(v: Any) = v match {
case MsgWithResp(msg, _) => f(msg)
case v => f(v)
}
override protected def execTranslate(f: Any => Unit)(v: Any) = v match {
case MsgWithResp(msg, future) =>
responseFuture = future
try {
f(msg)
} finally {
responseFuture = null
}
case v => f(v)
}
protected def reply(v: Any) {
if (null ne responseFuture) {
responseFuture.satisfy(v)
}
}
}
}
}
| wsaccaco/lift | framework/lift-base/lift-actor/src/main/scala/net/liftweb/actor/LiftActor.scala | Scala | apache-2.0 | 9,823 |
package com.coiney.akka.rabbit.protocol
sealed trait RabbitResponse
case class Success(request: RabbitRequest, result: Option[Any] = None) extends RabbitResponse
case class Failure(request: RabbitRequest, cause: Throwable) extends RabbitResponse
case class DisconnectedError(request: RabbitRequest) extends RabbitResponse
| Coiney/akka-rabbit | akka-rabbit-core/src/main/scala/com/coiney/akka/rabbit/protocol/RabbitResponseProtocol.scala | Scala | bsd-3-clause | 324 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.dllib.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.dllib.nn.internal.{AveragePooling3D, Sequential => KSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class AveragePooling3DSpec extends KerasBaseSpec {
"AveragePooling3D" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 12, 12, 12])
|input = np.random.random([2, 3, 12, 12, 12])
|output_tensor = AveragePooling3D(dim_ordering="th")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val layer = AveragePooling3D[Float](inputShape = Shape(3, 12, 12, 12))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 3, 6, 6, 6))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
}
class AveragePooling3DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = AveragePooling3D[Float](inputShape = Shape(3, 12, 12, 12))
layer.build(Shape(2, 3, 12, 12, 12))
val input = Tensor[Float](2, 3, 12, 12, 12).apply1(_ => Random.nextFloat())
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/keras/nn/AveragePooling3DSpec.scala | Scala | apache-2.0 | 2,149 |
/*
* Copyright 2016 Nikolay Smelik
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalabot.common.web
import akka.actor.{Actor, ActorLogging, ActorSystem, PoisonPill}
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
/**
* Created by Nikolay.Smelik on 7/6/2016.
*/
class Webhook(host: String, port: Int) extends Actor with ActorLogging {
implicit val system: ActorSystem = context.system
implicit val executionContext: ExecutionContext = context.system.dispatcher
implicit val materializer = ActorMaterializer()
private[this] var routesMap: Map[String, Route] = Map.empty
private[this] var binder: Future[ServerBinding] = _
private[this] var isWorking: Boolean = false
def startListening(routes: Route): Unit = {
binder = Http().bindAndHandle(routes, host, port)
binder.onFailure {
case ex: Exception => ex.printStackTrace()
}
if (!isWorking) log.info(s"Webhook start on $host:$port")
isWorking = true
}
def restartListening(routes: Route) = {
val futureResult = binder flatMap(_.unbind())
Await.result(futureResult, 5 seconds)
startListening(routes)
}
override def receive: Receive = {
case AddRoute(id, route) =>
routesMap += (id -> route)
val newRoutes = routesMap.values.foldLeft(reject.asInstanceOf[Route])({
(accum, route) => accum ~ route
})
if (isWorking) restartListening(newRoutes) else startListening(newRoutes)
case StartWebhook => if (isWorking) restartListening(reject) else startListening(reject)
case StopWebhook =>
binder.flatMap(_.unbind()).onComplete(_ => {
log.info("Webhook stopped")
routesMap = Map.empty
isWorking = false
})
case PoisonPill => binder.flatMap(_.unbind())
}
}
case class AddRoute(id: String, route: Route)
case object StartWebhook
case object StopWebhook
| kerzok/ScalaBot | BotApi/src/main/scala/scalabot/common/web/Webhook.scala | Scala | apache-2.0 | 2,619 |
package slick.migration.ast
import scala.slick.ast.FieldSymbol
/**
* Internal lightweight data structure, containing
* information for schema manipulation about an index
* @param table The Slick table object
* @param name The name of the index
* @param unique Whether the column can contain duplicates
* @param columns The columns that this index applies to, as `scala.slick.ast.FieldSymbol`
*/
private[migration] case class IndexInfo(table: TableInfo, name: String, unique: Boolean = false, columns: Seq[FieldSymbol] = Seq())
| itryapitsin/slick-migration | core/src/main/scala/slick/migration/ast/IndexInfo.scala | Scala | apache-2.0 | 536 |
package com.twitter.finatra.kafkastreams.config
import com.twitter.conversions.DurationOps._
import com.twitter.conversions.StorageUnitOps._
import java.util
import org.apache.kafka.common.config.TopicConfig.{
CLEANUP_POLICY_COMPACT,
CLEANUP_POLICY_CONFIG,
DELETE_RETENTION_MS_CONFIG,
SEGMENT_BYTES_CONFIG
}
import scala.collection.JavaConverters._
import scala.collection.mutable
object DefaultTopicConfig {
/**
* Default changelog topic configs generally suitable for non-windowed use cases using FinatraTransformer.
* We explicitly do not enable cleanup-policy: compact,delete
* because we'd rather rely on FinatraTransformer PersistentTimers to handle expiration/deletes
* (which gives us more control over when and how expiration's can occur).
*/
def FinatraChangelogConfig: util.Map[String, String] = mutable
.Map(
CLEANUP_POLICY_CONFIG -> CLEANUP_POLICY_COMPACT,
SEGMENT_BYTES_CONFIG -> 100.megabytes.inBytes.toString,
DELETE_RETENTION_MS_CONFIG -> 5.minutes.inMillis.toString //configure delete retention such that standby replicas have 5 minutes to read deletes
).asJava
}
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/config/DefaultTopicConfig.scala | Scala | apache-2.0 | 1,138 |
/*-
* #%L
* Core runtime for OOXOO
* %%
* Copyright (C) 2006 - 2017 Open Design Flow
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* #L%
*/
package com.idyria.osi.ooxoo.core.buffers.datatypes.hash
import com.idyria.osi.ooxoo.core.buffers.datatypes.XSDStringBuffer
import org.odfi.tea.hash.HashUtils
import scala.language.implicitConversions
/**
* This buffer does not hash on stream in and out, only when value is set if not
* streaming in
*
*/
class SHA256StringBuffer extends XSDStringBuffer {
override def set(data:String) = {
super.set(HashUtils.hashBytesAsHex(data.getBytes, "SHA-256"))
}
override def equals(comp:String) = {
this.data == comp ||
this.data == HashUtils.hashBytesAsHex(comp.getBytes, "SHA-256")
}
}
object SHA256StringBuffer {
implicit def convertFromStringToSHA256Buffer(str:String) : SHA256StringBuffer = {
var b = new SHA256StringBuffer
b.set(str)
b
}
}
| richnou/ooxoo-core | ooxoo-core/src/main/scala/com/idyria/osi/ooxoo/core/buffers/datatypes/hash/SHA256String.scala | Scala | agpl-3.0 | 1,574 |
package japgolly.scalajs
import japgolly.scalajs.react.util.DefaultEffects._
package object react
extends japgolly.scalajs.react.internal.CoreGeneralF[Sync]
with japgolly.scalajs.react.internal.ReactCallbackExtensions
with japgolly.scalajs.react.callback.Exports
with japgolly.scalajs.react.ReactCats
| japgolly/scalajs-react | coreBundleCBIO/src/main/scala/japgolly/scalajs/react/package.scala | Scala | apache-2.0 | 320 |
package chapter1
import org.scalatest.{FreeSpec, Matchers}
class Question4Spec extends FreeSpec with Matchers {
import Question4._
"Should be replaced" in {
replace("Mr John Smith ".toCharArray) shouldBe "Mr%20John%20Smith"
replace("Mr JohnSmith ".toCharArray) shouldBe "Mr%20%20JohnSmith"
replace(" Mr JohnSmith ".toCharArray) shouldBe "%20Mr%20JohnSmith"
}
}
| alexandrnikitin/algorithm-sandbox | scala/src/test/scala/chapter1/Question4Spec.scala | Scala | mit | 391 |
package com.v_standard.vsp.compiler
import com.typesafe.scalalogging.slf4j.Logging
import com.v_standard.vsp.script.ScriptDefine
import com.v_standard.utils.ResourceUtil
import com.v_standard.utils.ResourceUtil.using
import java.io.File
/**
* トークントレイト。
*/
trait Token {
/** 文字列表現 */
protected val tokenStr = new StringBuilder
/**
* 文字追加。
*
* @param ch 文字
*/
def +=(ch: Char) = tokenStr.append(ch)
/**
* 文字列追加。
*
* @param str 文字列
*/
def +=(str: String) = tokenStr.append(str)
/**
* スクリプト表現取得。
*
* @return スクリプト表現
*/
def toScript: String
}
/**
* 文字列トークンクラス。
*/
class StringToken() extends Token {
override def toScript: String = {
val sb = new StringBuilder
tokenStr.foreach {
case '\t' => sb.append("\\t")
case '\r' => sb.append("\\r")
case '\n' => sb.append("\\n")
case '\\' => sb.append("\\\\")
case '"' => sb.append("\\\"")
case c => sb.append(c)
}
"print(\"" + sb.toString + "\");\n"
}
}
/**
* 出力トークンクラス。
*/
class PrintToken extends Token {
override def toScript: String = {
"print(" + ScriptDefine.SCRIPT_OBJ_NAME + ".escape((" + tokenStr.toString + ") == null ? \"\" : (" +
tokenStr.toString + ")));\n"
}
}
/**
* 構文トークンクラス。
*/
class SyntaxToken extends Token {
override def toScript: String = {
tokenStr.toString + "\n"
}
}
/**
* インクルードトークンクラス。
*/
class IncludeToken(context: ScriptConverterContext) extends Token with Logging {
override def toScript: String = {
if (ScriptDefine.MAX_INCLUDE <= context.deep)
throw new IllegalStateException("Failed to include. count(" + context.deep + ")")
val f = new File(context.config.baseDir.getPath, tokenStr.toString.trim)
logger.debug("Include file: " + f.getAbsolutePath)
using(ResourceUtil.getSource(f)) { r =>
val res = ScriptConverter.convert(r, context.config, context.deep + 1)
if (!res._2) context.textOnly = false
context.includeFiles += f.getCanonicalFile
context.includeFiles ++= res._3
res._1
}
}
}
| VanishStandard/vsp | src/main/scala/com/v_standard/vsp/compiler/Token.scala | Scala | bsd-3-clause | 2,255 |
package logful.server
import java.util.concurrent.TimeUnit
import io.gatling.core.Predef._
import logful.server.config.LogFileReqConfig
import scala.concurrent.duration.FiniteDuration
class SmallLogRampUserPerSecSimulation extends Simulation {
val from = 100
val to = 1300
val time = 60
val during = new FiniteDuration(time, TimeUnit.SECONDS)
val second = during.toSeconds
val c = new LogFileReqConfig((0.6 * ((from + to) * time)).toInt)
setUp(c.scn.inject(rampUsersPerSec(from) to to during during).protocols(c httpProtocol))
}
| foxundermoon/gatling-test | src/gatling/scala/logful/server/SmallLogRampUserPerSecSimulation.scala | Scala | mit | 550 |
package scalarules.test
object Exported {
def message: String = {
// terrible, don't do this in real code:
val msg = Class.forName("scalarules.test.Runtime")
.newInstance
.toString
"you all, everybody. " + msg
}
}
| bazelbuild/rules_scala | test_version/version_specific_tests_dir/Exported.scala | Scala | apache-2.0 | 243 |
/*
* Copyright 2017-2018 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package freestyle.free.loggingJVM.log4s
object implicits extends freestyle.tagless.loggingJVM.log4s.Implicits | frees-io/freestyle | modules/logging/jvm/src/main/scala/free/log4s.scala | Scala | apache-2.0 | 739 |
/*
* Copyright 2013-2016 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs.syntax
import configs.testutil.instance.config._
import configs.testutil.instance.string._
import configs.testutil.instance.symbol._
import configs.{ConfigObject, ConfigValue}
import scala.jdk.CollectionConverters._
import scalaprops.Property.forAll
import scalaprops.Scalaprops
import scalaz.Monoid
import scalaz.syntax.equal._
object RichConfigObjectTest extends Scalaprops {
val + = forAll { (co: ConfigObject, k: Symbol, v: Int) =>
val result = co + (k -> v)
ConfigValue.fromAny(v).exists(_ === result.get(k.name))
}
val - = forAll { (co: ConfigObject, d: Symbol) =>
val k = co.keySet().asScala.headOption.fold(d)(Symbol(_))
(co.isEmpty || co.get(k.name) != null) && {
val result = co - k
result.get(k.name) == null
}
}
val `++ Seq` =
forAll { (co: ConfigObject, kvs: List[(Symbol, Int)], dupSize: Int) =>
val dup = co.asScala.keys.take(dupSize).map(Symbol(_) -> 42).toSeq
val result = co ++ kvs ++ dup
(kvs ++ dup).groupBy(_._1).forall {
case (k, vs) => ConfigValue.fromAny(vs.last._2).exists(_ === result.get(k.name))
case _ => true
}
}
val `++ Map` =
forAll { (co: ConfigObject, kvs: Map[Symbol, Int], dupSize: Int) =>
val dup = co.asScala.keys.take(dupSize).map(Symbol(_) -> 42).toMap
val result = co ++ kvs ++ dup
(kvs ++ dup).forall {
case (k, v) => ConfigValue.fromAny(v).exists(_ === result.get(k.name))
}
}
val `++ ConfigObject` = forAll { (co1: ConfigObject, co2: ConfigObject) =>
val result = co1 ++ co2
co2.asScala.forall {
case (k, v) => result.get(k) === v
}
}
val withComments = forAll { (co: ConfigObject, xs: List[String]) =>
val wc: ConfigObject = co.withComments(xs)
wc.origin().comments() == xs.asJava
}
implicit lazy val configObjectMonoid: Monoid[ConfigObject] =
Monoid.instance(_ ++ _, ConfigObject.empty)
val `++/empty monoid` = scalaprops.scalazlaws.monoid.all[ConfigObject]
}
| kxbmap/configs | core/src/test/scala/configs/syntax/RichConfigObjectTest.scala | Scala | apache-2.0 | 2,612 |
package org.bitcoins.testkit
import akka.actor.ActorSystem
import com.typesafe.config._
import org.bitcoins.dlc.oracle.config.DLCOracleAppConfig
import org.bitcoins.server.BitcoinSAppConfig
import org.bitcoins.testkit.keymanager.KeyManagerTestUtil
import org.bitcoins.testkit.util.FileUtil
import org.bitcoins.testkit.util.TorUtil.torEnabled
import org.bitcoins.testkitcore.Implicits.GeneratorOps
import org.bitcoins.testkitcore.gen.{NumberGenerator, StringGenerators}
import java.nio.file._
import scala.concurrent.ExecutionContext
object BitcoinSTestAppConfig {
/** Generates a temp directory with the prefix 'bitcoin-s- */
def tmpDir(): Path = Files.createTempDirectory("bitcoin-s-")
def genWalletNameConf: Config = {
val walletNameOpt = if (NumberGenerator.bool.sampleSome) {
Some(StringGenerators.genNonEmptyString.sampleSome)
} else None
walletNameOpt match {
case Some(walletName) =>
ConfigFactory.parseString(
s"bitcoin-s.wallet.walletName = $walletName"
)
case None => ConfigFactory.empty()
}
}
/** App configuration suitable for test purposes:
*
* 1) Data directory is set to user temp directory
* 2) Logging is turned down to WARN
*/
def getSpvTestConfig(config: Config*)(implicit
system: ActorSystem): BitcoinSAppConfig = {
val overrideConf = ConfigFactory.parseString {
s"""
|bitcoin-s {
| node {
| mode = spv
| }
| wallet {
| allowExternalDLCAddresses = true
| }
| proxy.enabled = $torEnabled
| tor.enabled = $torEnabled
| tor.use-random-ports = false
|}
""".stripMargin
}
BitcoinSAppConfig(tmpDir(), (overrideConf +: config).toVector)
}
def getSpvWithEmbeddedDbTestConfig(
pgUrl: () => Option[String],
config: Vector[Config])(implicit
system: ActorSystem): BitcoinSAppConfig = {
val overrideConf = ConfigFactory
.parseString {
s"""
|bitcoin-s {
| node {
| mode = spv
| }
| proxy.enabled = $torEnabled
| tor.enabled = $torEnabled
| tor.use-random-ports = false
|}
""".stripMargin
}
.withFallback(genWalletNameConf)
BitcoinSAppConfig(
tmpDir(),
(overrideConf +: configWithEmbeddedDb(project = None, pgUrl) +: config))
}
def getNeutrinoTestConfig(config: Config*)(implicit
system: ActorSystem): BitcoinSAppConfig = {
val overrideConf = ConfigFactory.parseString {
s"""
|bitcoin-s {
| node {
| mode = neutrino
| relay = true
| }
|
| proxy.enabled = $torEnabled
| tor.enabled = $torEnabled
| tor.use-random-ports = false
|}
""".stripMargin
}
BitcoinSAppConfig(tmpDir(), (overrideConf +: config).toVector)
}
def getNeutrinoWithEmbeddedDbTestConfig(
pgUrl: () => Option[String],
config: Config*)(implicit system: ActorSystem): BitcoinSAppConfig = {
val overrideConf = ConfigFactory
.parseString {
s"""
|bitcoin-s {
| node {
| mode = neutrino
| relay = true
| }
| proxy.enabled = $torEnabled
| tor.enabled = $torEnabled
| tor.use-random-ports = false
|}
""".stripMargin
}
.withFallback(genWalletNameConf)
BitcoinSAppConfig(
tmpDir(),
(overrideConf +: configWithEmbeddedDb(project = None,
pgUrl) +: config).toVector)
}
def getDLCOracleAppConfig(config: Config*)(implicit
ec: ExecutionContext): DLCOracleAppConfig = {
val overrideConf = KeyManagerTestUtil.aesPasswordOpt match {
case Some(value) =>
ConfigFactory.parseString {
s"""
|bitcoin-s.oracle.aesPassword = $value
""".stripMargin
}
case None =>
ConfigFactory.empty()
}
DLCOracleAppConfig(tmpDir(), (overrideConf +: config).toVector)
}
def getDLCOracleWithEmbeddedDbTestConfig(
pgUrl: () => Option[String],
config: Config*)(implicit ec: ExecutionContext): DLCOracleAppConfig = {
val overrideConf = KeyManagerTestUtil.aesPasswordOpt match {
case Some(value) =>
ConfigFactory.parseString {
s"""
|bitcoin-s.oracle.aesPassword = $value
""".stripMargin
}
case None =>
ConfigFactory.empty()
}
DLCOracleAppConfig(
tmpDir(),
(overrideConf +: configWithEmbeddedDb(project = None,
pgUrl) +: config).toVector)
}
sealed trait ProjectType
object ProjectType {
case object Wallet extends ProjectType
case object Node extends ProjectType
case object Chain extends ProjectType
case object Oracle extends ProjectType
case object DLC extends ProjectType
case object Test extends ProjectType
val all = List(Wallet, Node, Chain, Oracle, DLC, Test)
}
/** Generates a Typesafe config with DBs set to memory
* databases for the given project (or all, if no
* project is given). This configuration can then be
* given as a override to other configs.
*/
def configWithEmbeddedDb(
project: Option[ProjectType],
pgUrl: () => Option[String]): Config = {
def pgConfigForProject(project: ProjectType): String = {
val name = project.toString.toLowerCase()
val url = pgUrl().getOrElse(
throw new RuntimeException(s"Cannot get db url for $project"))
val parts = url.split(":")
require(parts.size >= 3 && parts(0) == "jdbc",
s"`$url` must be a valid JDBC URL")
val str = parts(3)
val endOfPortStr = str.indexOf('/')
val (port, _) = str.splitAt(endOfPortStr)
val projectString = project match {
case ProjectType.Wallet => "wallet"
case ProjectType.Chain => "chain"
case ProjectType.Node => "node"
case ProjectType.Oracle => "oracle"
case ProjectType.DLC => "dlc"
case ProjectType.Test => "test"
}
val poolName =
s"bitcoin-s-$projectString-pool-${System.currentTimeMillis()}"
s""" $name.profile = "slick.jdbc.PostgresProfile$$"
| $name.db {
| driverName = postgres
| name = postgres
| url = "$url"
| driver = "org.postgresql.Driver"
| user = "postgres"
| password = "postgres"
| poolName = "$poolName"
| port = $port
| numThreads = 1
| keepAliveConnection = true
| }""".stripMargin
}
def configForProject(project: ProjectType): String = {
if (pgUrl().isDefined)
pgConfigForProject(project)
else
""
}
val confStr = project match {
case None => ProjectType.all.map(configForProject).mkString("\\n")
case Some(p) => configForProject(p)
}
val nestedConfStr = s"""
| bitcoin-s {
| $confStr
| }
|""".stripMargin
ConfigFactory.parseString(nestedConfStr)
}
def deleteAppConfig(app: BitcoinSAppConfig): Boolean = {
FileUtil.deleteTmpDir(app.walletConf.baseDatadir) &&
FileUtil.deleteTmpDir(app.chainConf.baseDatadir) &&
FileUtil.deleteTmpDir(app.nodeConf.baseDatadir)
}
}
| bitcoin-s/bitcoin-s | testkit/src/main/scala/org/bitcoins/testkit/BitcoinSTestAppConfig.scala | Scala | mit | 7,564 |
/**
* Author:
* Yujian Zhang <yujian{dot}zhang[at]gmail(dot)com>
* Description:
* Check for popCount.
* License:
* GNU General Public License v2
* http://www.gnu.org/licenses/gpl-2.0.html
* Copyright (C) 2010-2012 Yujian Zhang
*/
import org.scalatest.junit.JUnit3Suite
import org.scalatest.prop.Checkers
import org.scalacheck.Prop._
import org.scalacheck.Arbitrary._
import net.whily.unichess.engine.PopCount._
class PopCountSuite extends JUnit3Suite with Checkers {
def testPopCount() {
def naivePopCount(x: Long): Long = {
var count = 0L
var y = x
for (i ← 0 until 64) {
count = count + (y & 1L)
y = y >> 1
}
count
}
assert(popCount(-1L) === 64)
check((x: Long) ⇒ popCount(x) == naivePopCount(x))
}
}
| whily/unichess | src/test/scala/net/whily/unichess/engine/PopCountCheck.scala | Scala | gpl-2.0 | 798 |
package week04
object session {
//Peano numbers
abstract class Nat {
def isZero: Boolean
def predecessor: Nat
def successor: Nat = new Succ(this)
def +(that: Nat): Nat
def -(that: Nat): Nat
def numerical: Integer
override def toString() = numerical.toString()
}
object Zero extends Nat {
def isZero: Boolean = true
def predecessor: Nat = throw new Error("negative number")
def +(that: Nat): Nat = that
def -(that: Nat): Nat = if (that.isZero) Zero else throw new Error("negative number")
def numerical: Integer = 0
}
class Succ(n: Nat) extends Nat {
def isZero: Boolean = false
def predecessor: Nat = n
def +(that: Nat): Nat = new Succ(n + that)
def -(that: Nat): Nat = if (that.isZero) this else n - that.predecessor
def numerical: Integer = n.numerical + 1
};import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(906); val res$0 =
Zero.successor + Zero.successor.successor.successor;System.out.println("""res0: week04.session.Nat = """ + $show(res$0));$skip(12); val res$1 =
List();System.out.println("""res1: List[Nothing] = """ + $show(res$1));$skip(14); val res$2 =
List(1,"2");System.out.println("""res2: List[Any] = """ + $show(res$2));$skip(12); val res$3 =
List(1,2);System.out.println("""res3: List[Int] = """ + $show(res$3));$skip(16); val res$4 =
List("1","2");System.out.println("""res4: List[String] = """ + $show(res$4));$skip(25); val res$5 =
1 :: 2 :: List(3,4);System.out.println("""res5: List[Int] = """ + $show(res$5));$skip(26); val res$6 =
List(1,2) ::: List(3,4);System.out.println("""res6: List[Int] = """ + $show(res$6));$skip(21); val res$7 =
List().::(1).::(2);System.out.println("""res7: List[Int] = """ + $show(res$7));$skip(11); val res$8 =
1 :: Nil;System.out.println("""res8: List[Int] = """ + $show(res$8))}
}
| panga/progfun-assignments | progfun/.worksheet/src/week04.session.scala | Scala | mit | 1,921 |
package com.geeksville.dapi
import akka.actor.Actor
import akka.actor.ActorLogging
import com.geeksville.mavlink.TimestampedMessage
import com.geeksville.dapi.model.Vehicle
import akka.actor.Props
import com.geeksville.mavlink.LogBinaryMavlink
import akka.actor.ActorRef
import akka.actor.PoisonPill
import java.io.File
import com.geeksville.flight.VehicleModel
import com.geeksville.akka.InstrumentedActor
import com.geeksville.mavlink.SendYoungest
import org.mavlink.messages.MAVLinkMessage
import com.geeksville.dapi.model.Mission
import scala.concurrent.blocking
import java.io.BufferedInputStream
import java.io.FileInputStream
import java.util.UUID
import com.geeksville.dapi.model.MissionSummary
import com.geeksville.mavlink.TimestampedMessage
import com.geeksville.util.Throttled
import com.geeksville.akka.NamedActorClient
import akka.actor.ActorRefFactory
import com.geeksville.akka.MockAkka
import org.mavlink.messages.ardupilotmega.msg_heartbeat
import scala.concurrent.duration._
import com.github.aselab.activerecord.dsl._
import com.geeksville.mavlink.FlushNowMessage
import java.sql.Timestamp
import com.geeksville.apiproxy.APIConstants
import scala.collection.mutable.HashSet
import com.geeksville.mavlink.MavlinkUtils
/// Sent to the LiveVehicleActor when a GCS connects to the server
/// @param wantsControl if true this GCS wants to control a vehicle which is already connected to the LiveVehicleActor
case class GCSConnected(wantsControl: Boolean)
case class GCSDisconnected()
/// Send from LiveVehicleActor to GCSActor when we need to tell the GCS to hang up (probably because the vehicle just called in through a different GCS)
case object VehicleDisconnected
// We would like the live vehicle to reply with an Option[Array[Byte]] of tlog bytes
case object GetTLogMessage
/**
* An actor that represents a connection to a live vehicle. GCSAdapters use this object to store mavlink from vehicle and publishes from this object
* can cause GCSAdapters to send messages to the vehicle (from the web).
*
* Supported message types:
* TimestampedMessage - used to add to the running log/new data received from the vehicle
* GCSConnected - sent by the GCSActor when the vehicle first connects
* GCSDisconnected - sent by the GCSActor when the vehicle disconnects
*/
class LiveVehicleActor(val vehicle: Vehicle, canAcceptCommands: Boolean)
extends VehicleModel(maxUpdatePeriod = 5000) with ActorLogging {
import LiveVehicleActor._
import context._
/// Our LogBinaryMavlink actor
private var tloggerOpt: Option[ActorRef] = None
/// We reserve a tlog ID at mission start - but don't use it until mission end
private var tlogId: Option[UUID] = None
private var myTlogFile: Option[File] = None
/// The mission we are creating
private var missionOpt: Option[Mission] = None
/// This is the GCS providing the connection to our vehicle
private var gcsActor: Option[ActorRef] = None
/// This is the set of GCSes that are trying to _control_ our vehicle
private val controllingGCSes = HashSet[ActorRef]()
private case object SendUpdateTickMsg
// We periodically send mission updates to any interested subscriber (mainly so SpaceSupervisor can
// stay up to date)
val updateTickInterval = 60 seconds
val updateTickSender = system.scheduler.schedule(updateTickInterval,
updateTickInterval,
self,
SendUpdateTickMsg)
// Since we are on a server, we don't want to inadvertently spam the vehicle
this.listenOnly = !canAcceptCommands
// autoWaypointDownload = false
autoParameterDownload = false
maxStreamRate = Some(1) // Tell vehicle to stream at 1Hz
override def toString = s"LiveVehicle: $vehicle"
/**
* We always claim to be a ground controller (FIXME, find a better way to pick a number)
* 255 is mission planner
* 253 is andropilot
*/
override def systemId = 252
override def postStop() {
updateTickSender.cancel()
super.postStop()
}
override def onReceive = mReceive.orElse(super.onReceive)
private def mReceive: InstrumentedActor.Receiver = {
case GCSConnected(wantsControl) =>
log.debug(s"GCS connected (GCS=$sender) wantsControl=$wantsControl")
if (!wantsControl) {
// It is possible for a GCS to drop a connection and then callback into a 'live'
// vehicle instance. In that case, we just mark that gcs as our new owner
gcsActor.foreach { old =>
log.warning(s"Vehicle reconnection, hanging up on old GCS $old")
stopMission()
old ! VehicleDisconnected
}
gcsActor = Some(sender)
} else {
log.debug(s"WebController $sender connected")
controllingGCSes.add(sender)
}
case GCSDisconnected() =>
// Vehicle should only be connected through one gcs actor at a time
if (Some(sender) == gcsActor) {
log.debug("GCS to vehicle disconnected")
gcsActor = None
stopMission() // In case client forgot
// FIXME - should I kill myself? - FIXME - need to use supervisors to do reference counting
// Do this someplace else?
controllingGCSes.foreach(_ ! VehicleDisconnected)
controllingGCSes.clear()
self ! PoisonPill
} else {
log.debug("WebController disconnected")
// Confirm that the sender really was a controller
assert(controllingGCSes.remove(sender))
}
case msg: StartMissionMsg =>
log.debug(s"Handling $msg")
startMission(msg)
case msg: StopMissionMsg =>
log.debug(s"Handling $msg")
// Update user preferences on keeping this tlog at all
missionOpt.foreach(_.keep = msg.keep)
stopMission(msg.notes)
case SendUpdateTickMsg =>
sendMissionUpdate()
case GetTLogMessage =>
sender ! getTlogBytes()
// Handle messages inbound from one of our connected GCSlinks
case msg: TimestampedMessage =>
val isFromVehicle = sender == gcsActor.get
//log.debug(s"Received ${MavlinkUtils.toString(msg.msg)} fromVehicle=$isFromVehicle")
// Forward msgs from vehicle to any GCSes who are trying to control it and vis a versa
val forwardTo: Iterable[ActorRef] = if (isFromVehicle)
controllingGCSes
else
gcsActor
if (!forwardTo.isEmpty) {
// log.debug(s"Forwarding ${MavlinkUtils.toString(msg.msg)} to $forwardTo")
forwardTo.foreach(_ ! SendMavlinkToGCS(msg.msg))
}
// Log to the file
tloggerOpt.foreach { _ ! msg }
// Let our vehicle model update current time
super.onReceive(msg)
// Update our live model (be careful here to not requeue the message, but rather handle it in this same callback
// to preserve order
if (receive.isDefinedAt(msg.msg))
receive(msg.msg)
}
/**
* Called when we want to send messages to the vehicle
*
* m must be a SendYoungest or a MAVLinkMessage
*/
override protected def handlePacket(m: Any) {
val msg = m match {
case x: MAVLinkMessage => x
case SendYoungest(x) => x
}
assert(msg != null)
// Some messages we never want to send to the client
val isBlacklist = msg match {
case x: msg_heartbeat if x.sysId == systemId => // Our embedded model is sending this - no need to waste bandwidth
true
case _ =>
false
}
if (!isBlacklist) {
log.debug(s"handlePacket: forwarding ${MavlinkUtils.toString(msg)} to vehicle")
if (listenOnly)
throw new Exception(s"$vehicle can not accept $msg")
else
gcsActor.foreach(_ ! SendMavlinkToGCS(msg))
}
}
/**
* We modify the actor to copy to S3 after the file is closed
*/
private class TlogToS3Actor(filename: File, mission: Mission) extends LogBinaryMavlink(filename, deleteIfBoring = false, wantImprovedFilename = false) {
override protected def onFileClose() {
if (mission.keep) {
log.debug(s"Copying to s3: $tlogId")
// Copy to S3
val src = new BufferedInputStream(new FileInputStream(file), 8192)
Mission.putBytes(tlogId.get.toString, src, file.length(), APIConstants.flogMimeType)
} else
log.warning("Mission marked as no-keep - not copying to S3")
tlogId = None
file.delete()
}
}
def getTlogBytes(): Option[Array[Byte]] = {
try {
myTlogFile.map { finalfile =>
// FIXME - super skanky - we use knowledge on where the temp file is stored
val file = new File(finalfile.getCanonicalPath() + ".tmp")
log.info(s"Reading working tlog from $file")
// Tell our tlogger to write to disk
// FIXME - wait for a reply
tloggerOpt.foreach { _ ! FlushNowMessage }
log.info(s"Returning tlog bytes to live actor client")
com.geeksville.util.Using.using(new FileInputStream(file)) { source =>
val byteArray = new Array[Byte](file.length.toInt)
source.read(byteArray)
byteArray
}
}
} catch {
case ex: Exception =>
log.error(s"Failed getting tlog due to $ex", ex)
None
}
}
def summary = MissionSummary(startTime.map { t => new Timestamp(TimestampedMessage.usecsToMsecs(t)) },
currentTime.map { t => new Timestamp(TimestampedMessage.usecsToMsecs(t)) },
maxAltitude, maxGroundSpeed, maxAirSpeed, -1, flightDuration, endPosition.map(_.lat), endPosition.map(_.lon), parameters.size,
softwareVersion = buildVersion, softwareGit = buildGit)
private def sendMissionUpdate() {
// we write updates to DB and are careful to reuse old summary ids
// if this becomes expensive we could remove db writes
// We only send updates when we have an active mission
missionOpt.foreach { m =>
updateDBSummary()
vehicle.updateFromMission(this)
//log.debug(s"Generating mission update (starttime=${m.summary.startTime}, curtime=$currentTime, loc=$endPosition): $m")
publishEvent(MissionUpdate(m))
}
}
/// Update our summary record in the DB
private def updateDBSummary() {
missionOpt.foreach { m =>
val ns = summary
val s: MissionSummary = m.summary
// Super yucky copies of summary updates
s.startTime = ns.startTime
s.endTime = ns.endTime
s.maxAlt = ns.maxAlt
s.maxGroundSpeed = ns.maxGroundSpeed
s.maxAirSpeed = ns.maxAirSpeed
s.maxG = ns.maxG
s.flightDuration = ns.flightDuration
s.latitude = ns.latitude
s.longitude = ns.longitude
s.softwareVersion = ns.softwareVersion
s.softwareGit = ns.softwareGit
// Don't copy text - it will be genned as needed
// s.text = ns.text
s.text = s.createText()
s.save()
}
}
private def startMission(msg: StartMissionMsg) = blocking {
// The following can fail if the client sends multiple start msgs
assert(!tlogId.isDefined)
tlogId = Some(UUID.randomUUID())
log.debug(s"Starting tlog $tlogId")
val f = LogBinaryMavlink.getFilename() // FIXME - create in temp directory instead
myTlogFile = Some(f)
val m = Mission.create(vehicle)
missionOpt = Some(m)
startTime = None
m.notes = msg.notes
tloggerOpt = Some(context.actorOf(Props(new TlogToS3Actor(f, m)), "tlogger"))
// Pull privacy from vehicle if not specified
var viewPriv = msg.viewPrivacy.getOrElse(AccessCode.DEFAULT).id
if (viewPriv == AccessCode.DEFAULT_VALUE)
viewPriv = vehicle.viewPrivacy
m.viewPrivacy = viewPriv
m.keep = msg.keep
m.isLive = true
m.save()
// Add the initial summary record
val s = summary
s.create
s.mission := m
s.save()
// Find the space controller for our location
val space = SpaceSupervisor.find()
eventStream.subscribe(space, (x: Any) => true) // HUGE FIXME - we should subscribe only to the messages we care about
publishEvent(MissionStart(m))
log.debug(s"wrote Mission: $m")
}
private def stopMission(notes: Option[String] = None) {
log.debug("Stopping mission")
// Close the tlog and upload to s3
tloggerOpt.foreach { a =>
a ! PoisonPill
tloggerOpt = None
}
myTlogFile = None
missionOpt.foreach { m =>
blocking {
m.isLive = false
m.tlogId = tlogId.map(_.toString)
updateDBSummary()
vehicle.updateFromMission(this)
publishEvent(MissionStop(m))
val interesting = m.isInteresting
if (m.keep && interesting) {
log.debug("Saving mission")
m.save()
} else {
if (!interesting)
log.warning("mission ended up boring, deleting")
else
log.warning("No-keep mission, deleting")
m.delete()
}
}
missionOpt = None
tlogId = None
}
}
}
object LiveVehicleActor {
private implicit val context: ActorRefFactory = MockAkka.system
private val actors = new NamedActorClient("live")
private val msgLogThrottle = new Throttled(5000)
/**
* Find the supervisor responsible for a particular vehicle
*/
def findOrCreate(vehicle: Vehicle, canAcceptCommands: Boolean) = actors.getOrCreate(vehicle.uuid.toString, Props(new LiveVehicleActor(vehicle, canAcceptCommands)))
def find(vehicle: Vehicle) = actors.get(vehicle.uuid.toString)
}
| dronekit/dronekit-server | src/main/scala/com/geeksville/dapi/LiveVehicleActor.scala | Scala | gpl-3.0 | 13,330 |
import sbt._
import org.scalajs.sbtplugin.cross.CrossType
/**
* ScalaJS CrossType that removed the project name from
* the default cross-build directory structure,
* creating the following layout:
*
* ```
* - root/
* - shared / src / {main,test} / scala
* - jvm / src / {main,test} / scala
* - js / src / {main,test} / scala
* ```
*/
object DoodleCrossType extends CrossType {
def projectDir(crossBase: File, projectType: String): File =
(crossBase / ".." / projectType).getCanonicalFile
def sharedSrcDir(projectBase: File, conf: String): Option[File] =
Some((projectBase / ".." / "shared" / "src" / conf / "scala").getCanonicalFile)
}
| Angeldude/doodle | project/DoodleCrossType.scala | Scala | apache-2.0 | 681 |
package fm.common.test.classutil
trait TestTrait {
def foo: String
} | frugalmechanic/fm-common | jvm/src/test/scala/fm/common/test/classutil/TestTrait.scala | Scala | apache-2.0 | 71 |
package controllers
import play.api.mvc._
import play.api.libs
import play.api.libs.json._
import models._
// if one the method is not implemented just set it = TODO
// e.g. def detail = TODO
abstract class tControllerCRUD[V] extends Controller with Secured{
implicit val myJsonFormat:play.api.libs.json.OFormat[V]
val myForm:play.api.data.Form[V]
def listing:play.api.mvc.EssentialAction
// todo: go beyond simple definition
/* = IsAuthenticated{user => implicit request =>
gCtrl.singleF.bindFromRequest.fold(
e => BadRequest("error"),
v => Ok("ok")
)
}*/
def detail:play.api.mvc.EssentialAction
def update:play.api.mvc.EssentialAction
def delete:play.api.mvc.EssentialAction
} | Nexysweb/play-helpers | traits/tController.scala | Scala | gpl-2.0 | 715 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.holdenkarau.spark.testing
import scala.math.abs
import scala.util.hashing.MurmurHash3
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.hive._
import org.apache.spark.sql.types.StructType
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FunSuite
/**
* :: Experimental ::
* Base class for testing Spark DataFrames.
*/
trait DataFrameSuiteBase extends FunSuite with BeforeAndAfterAll
with SharedSparkContext {
val maxCount = 10
@transient private var _sqlContext: HiveContext = _
def sqlContext: HiveContext = _sqlContext
override def beforeAll() {
super.beforeAll()
_sqlContext = new HiveContext(sc)
}
override def afterAll() {
super.afterAll()
_sqlContext = null
}
/**
* Compares if two [[DataFrame]]s are equal, checks the schema and then if that matches
* checks if the rows are equal.
*/
def equalDataFrames(expected: DataFrame, result: DataFrame) {
equalSchema(expected.schema, result.schema)
expected.rdd.cache()
result.rdd.cache()
val expectedRDD = zipWithIndex(expected.rdd)
val resultRDD = zipWithIndex(result.rdd)
assert(expectedRDD.count() == resultRDD.count())
val unequal = expectedRDD.cogroup(resultRDD).filter{case (idx, (r1, r2)) =>
!(r1.isEmpty || r2.isEmpty) &&
!(r1.head.equals(r2.head) || DataFrameSuiteBase.approxEquals(r1.head, r2.head, 0.0))
}.take(maxCount)
assert(unequal === List())
expected.rdd.unpersist()
result.rdd.unpersist()
}
/**
* Zip RDD's with precise indexes. This is used so we can join two DataFrame's
* Rows together regardless of if the source is different but still compare based on
* the order.
*/
private def zipWithIndex[T](input: RDD[T]): RDD[(Int, T)] = {
val counts = input.mapPartitions{itr => Iterator(itr.size)}.collect()
val countSums = counts.scanLeft(0)(_ + _).zipWithIndex.map{case (x, y) => (y, x)}.toMap
input.mapPartitionsWithIndex{case (idx, itr) => itr.zipWithIndex.map{case (y, i) =>
(i + countSums(idx), y)}
}
}
/**
* Compares if two [[DataFrame]]s are equal, checks that the schemas are the same.
* When comparing inexact fields uses tol.
*/
def approxEqualDataFrames(expected: DataFrame, result: DataFrame, tol: Double) {
equalSchema(expected.schema, result.schema)
expected.rdd.cache()
result.rdd.cache()
val expectedRDD = zipWithIndex(expected.rdd)
val resultRDD = zipWithIndex(result.rdd)
val cogrouped = expectedRDD.cogroup(resultRDD)
val unequal = cogrouped.filter{case (idx, (r1, r2)) =>
(r1.isEmpty || r2.isEmpty) || (
!DataFrameSuiteBase.approxEquals(r1.head, r2.head, tol))
}.take(maxCount)
expected.rdd.unpersist()
result.rdd.unpersist()
assert(unequal === List())
}
/**
* Compares the schema
*/
def equalSchema(expected: StructType, result: StructType): Unit = {
assert(expected.treeString === result.treeString)
}
}
object DataFrameSuiteBase {
/** Approximate equality, based on equals from [[Row]] */
def approxEquals(r1: Row, r2: Row, tol: Double): Boolean = {
if (r1.length != r2.length) {
return false
} else {
var i = 0
val length = r1.length
while (i < length) {
if (r1.isNullAt(i) != r2.isNullAt(i)) {
return false
}
if (!r1.isNullAt(i)) {
val o1 = r1.get(i)
val o2 = r2.get(i)
o1 match {
case b1: Array[Byte] =>
if (!o2.isInstanceOf[Array[Byte]] ||
!java.util.Arrays.equals(b1, o2.asInstanceOf[Array[Byte]])) {
return false
}
case f1: Float if java.lang.Float.isNaN(f1) =>
if (!o2.isInstanceOf[Float] || ! java.lang.Float.isNaN(o2.asInstanceOf[Float])) {
return false
}
case d1: Double if java.lang.Double.isNaN(d1) =>
if (!o2.isInstanceOf[Double] || ! java.lang.Double.isNaN(o2.asInstanceOf[Double])) {
return false
}
case d1: java.math.BigDecimal if o2.isInstanceOf[java.math.BigDecimal] =>
if (d1.compareTo(o2.asInstanceOf[java.math.BigDecimal]) != 0) {
return false
}
case f1: Float if o2.isInstanceOf[Float] =>
if (abs(f1-o2.asInstanceOf[Float]) > tol) {
return false
}
case d1: Double if o2.isInstanceOf[Double] =>
if (abs(d1-o2.asInstanceOf[Double]) > tol) {
return false
}
case _ => if (o1 != o2) {
return false
}
}
}
i += 1
}
}
true
}
}
| jnadler/spark-testing-base | src/main/1.3/scala/com/holdenkarau/spark/testing/DataFrameSuiteBase.scala | Scala | apache-2.0 | 5,568 |
package travelling
import java.util.Properties
import java.io._
import scala.collection.mutable.Map
import scala.collection.JavaConversions._
object Letter {
val letters: Map[String, Letter] = Map()
val DEFAULT_LETTER = new Letter("\u0000", new Polygon(List(Vec(0, 0), Vec(40, 0), Vec(40, 100), Vec(0, 100), Vec(0, 0))))
def exists(character: String) = letters.contains(character)
def apply(character: String) = letters.getOrElse(character, DEFAULT_LETTER)
def parse() {
try {
letters.clear
val properties = new Properties
properties.load(new FileInputStream(Letters.lettersFile))
for (entity <- properties.keys()) {
val character = Entities.unEscape(entity.toString)
val s = properties.get(entity.toString)
val letter = parse(character.toString, s.toString)
letters.put(character.toString, letter)
}
} catch {
case ex: IOException => {
println("Error parsing properties" + ex.getMessage())
}
}
}
def parse(character: String, s: String): Letter = {
val points = s.split(" ").toList.map(Vec(_))
val poly = new Polygon(points)
//new Letter(character, poly.resampledByAmount(100))
new Letter(character, poly)
}
def save() {
try {
val properties = new Properties
for ((c, letter) <- letters) {
val entity = Entities.escape(letter.character(0))
val s = letter.toPropertiesString
properties.put(entity, s)
}
properties.store(new FileOutputStream(Letters.lettersFile), "")
} catch {
case ex: IOException => {
println("Error saving properties" + ex.getMessage())
}
}
}
}
class Letter(val character: String, var shape: Polygon) {
def replacePoint(index: Int, newPoint: Vec) {
var newPoints: List[Vec] = List()
for (i <- 0 until shape.points.size) {
var point = shape.points(i)
if (i == index) point = newPoint
newPoints = newPoints ::: List(point)
}
shape = new Polygon(newPoints)
}
def removePoint(index: Int) {
var newPoints: List[Vec] = List()
for (i <- 0 until shape.points.size) {
var point = shape.points(i)
if (i != index)
newPoints = newPoints ::: List(point)
}
shape = new Polygon(newPoints)
}
def splitSegment(index: Int) {
var newPoints: List[Vec] = List()
for (i <- 0 until shape.points.size) {
val point = shape.points(i)
if (i == index) {
val nextPoint = shape.points(i + 1)
val middlePoint = Vec.linePoint(0.5f, point.x, point.y, nextPoint.x, nextPoint.y)
newPoints = newPoints ::: List(point)
newPoints = newPoints ::: List(middlePoint)
} else {
newPoints = newPoints ::: List(point)
}
}
shape = new Polygon(newPoints)
}
def translate(offset: Vec) {
var newPoints: List[Vec] = List()
for (p <- shape.points) {
newPoints = newPoints ::: List(p + offset)
}
shape = new Polygon(newPoints)
}
def toPropertiesString() = {
val pointLetters = shape.points.map(p => "%.0f,%.0f".format(p.x, p.y))
pointLetters.mkString(" ")
}
override def toString = character
}
| fdb/travelling_letters | src/main/scala/travelling/Letter.scala | Scala | lgpl-3.0 | 3,187 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.hbase.example
import org.apache.spark.SparkContext
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
import org.apache.spark.SparkConf
import com.cloudera.spark.hbase.HBaseContext
object HBaseBulkPutExampleFromFile {
def main(args: Array[String]) {
if (args.length == 0) {
System.out.println("HBaseBulkPutExampleFromFile {tableName} {columnFamily} {inputFile}");
return;
}
val tableName = args(0)
val columnFamily = args(1)
val inputFile = args(2)
val sparkConf = new SparkConf().setMaster("local[*]").setAppName("HBaseBulkPutExampleFromFile " +
tableName + " " + columnFamily + " " + inputFile)
val sc = new SparkContext(sparkConf)
var rdd = sc.hadoopFile(
inputFile,
classOf[TextInputFormat],
classOf[LongWritable],
classOf[Text]).map(v => {
System.out.println("reading-" + v._2.toString())
v._2.toString()
})
val conf = HBaseConfiguration.create();
conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hdfs-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
val hbaseContext = new HBaseContext(sc, conf);
hbaseContext.bulkPut[String](rdd,
tableName,
(putRecord) => {
System.out.println("hbase-" + putRecord)
val put = new Put(Bytes.toBytes("Value- " + putRecord))
put.add(Bytes.toBytes("c"), Bytes.toBytes("1"), Bytes.toBytes(putRecord.length()))
put
},
true);
}
} | jovigb/SparkOnHBase | src/main/scala/com/cloudera/spark/hbase/example/HBaseBulkPutExampleFromFile.scala | Scala | apache-2.0 | 2,700 |
/*
* @author Philip Stutz
*
* Copyright 2011 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.coordinator
import scala.Array.canBuildFrom
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.concurrent.future
import scala.language.postfixOps
import scala.util.Random
import com.signalcollect.Edge
import com.signalcollect.GraphEditor
import com.signalcollect.Vertex
import com.signalcollect.interfaces.AggregationOperation
import com.signalcollect.interfaces.ComplexAggregation
import com.signalcollect.interfaces.EdgeId
import com.signalcollect.interfaces.VertexToWorkerMapper
import com.signalcollect.interfaces.WorkerApi
import com.signalcollect.interfaces.WorkerStatistics
import com.signalcollect.interfaces.WorkerStatistics.apply
import com.signalcollect.interfaces.NodeStatistics
/**
* Class that allows to interact with all the workers as if there were just one worker.
*/
class DefaultWorkerApi[Id, Signal](
val workers: Array[WorkerApi[Id, Signal]],
val mapper: VertexToWorkerMapper[Id])
extends WorkerApi[Id, Signal] {
protected val random = new Random
override def toString = "DefaultWorkerApi"
protected def futures[G](f: WorkerApi[Id, Signal] => G): Array[Future[G]] = {
workers map (worker => Future { f(worker) })
}
protected def get[G](f: Future[G]): G = Await.result(f, timeout)
protected def get[G](fs: Array[Future[G]]): List[G] = {
val futureOfCollection = Future.sequence(fs.toList)
get(futureOfCollection)
}
protected val timeout = 2 hours
def getIndividualWorkerStatistics: List[WorkerStatistics] = {
get(futures(_.getWorkerStatistics))
}
override def getWorkerStatistics: WorkerStatistics = {
getIndividualWorkerStatistics.fold(WorkerStatistics())(_ + _)
}
// TODO: Move to node.
def getIndividualNodeStatistics: List[NodeStatistics] = {
get(futures(_.getNodeStatistics))
}
override def getNodeStatistics: NodeStatistics = {
getIndividualNodeStatistics.fold(NodeStatistics())(_ + _)
}
override def signalStep = {
val stepResults = get(futures(_.signalStep))
stepResults.forall(_ == true)
}
override def collectStep: Boolean = {
val stepResults = get(futures(_.collectStep))
stepResults.forall(_ == true)
}
override def startComputation {
get(futures(_.startComputation))
}
override def pauseComputation = {
get(futures(_.pauseComputation))
}
override def recalculateScores = {
get(futures(_.recalculateScores))
}
override def recalculateScoresForVertexWithId(vertexId: Id) = {
workers(mapper.getWorkerIdForVertexId(vertexId)).recalculateScoresForVertexWithId(vertexId)
}
override def forVertexWithId[VertexType <: Vertex[Id, _, Id, Signal], ResultType](vertexId: Id, f: VertexType => ResultType): ResultType = {
workers(mapper.getWorkerIdForVertexId(vertexId)).forVertexWithId(vertexId, f)
}
override def foreachVertex(f: (Vertex[Id, _, Id, Signal]) => Unit) {
get(futures(_.foreachVertex(f)))
}
override def foreachVertexWithGraphEditor(f: GraphEditor[Id, Signal] => Vertex[Id, _, Id, Signal] => Unit) {
get(futures(_.foreachVertexWithGraphEditor(f)))
}
override def aggregateOnWorker[WorkerResult](aggregationOperation: ComplexAggregation[WorkerResult, _]): WorkerResult = {
throw new UnsupportedOperationException("DefaultWorkerApi does not support this operation.")
}
override def aggregateAll[WorkerResult, EndResult](aggregationOperation: ComplexAggregation[WorkerResult, EndResult]): EndResult = {
// TODO: Identify and fix bug that appears on large graphs with the TopK aggregator when using futures.
//val aggregateArray = futures(_.aggregateOnWorker(aggregationOperation)) map get
val workerAggregates = get(futures(_.aggregateOnWorker(aggregationOperation)))
aggregationOperation.aggregationOnCoordinator(workerAggregates)
}
override def setSignalThreshold(t: Double) {
get(futures(_.setSignalThreshold(t)))
}
override def setCollectThreshold(t: Double) = {
get(futures(_.setCollectThreshold(t)))
}
override def reset {
get(futures(_.reset))
}
override def initializeIdleDetection {
get(futures(_.initializeIdleDetection))
}
//----------------GraphEditor, BLOCKING variant-------------------------
/**
* Adds `vertex` to the graph.
*
* @note If a vertex with the same id already exists, then this operation will be ignored and NO warning is logged.
*/
override def addVertex(vertex: Vertex[Id, _, Id, Signal]) {
workers(mapper.getWorkerIdForVertexId(vertex.id)).addVertex(vertex)
}
/**
* Adds `edge` to the graph.
*
* @note If no vertex with the required source id is found, then the operation is ignored and a warning is logged.
* @note If an edge with the same id already exists, then this operation will be ignored and NO warning is logged.
*/
override def addEdge(sourceId: Id, edge: Edge[Id]) {
workers(mapper.getWorkerIdForVertexId(sourceId)).addEdge(sourceId, edge)
}
/**
* Processes `signal` on the worker that has the vertex with
* `vertex.id==edgeId.targetId`.
* Blocks until the operation has completed.
*/
override def processSignalWithSourceId(signal: Signal, targetId: Id, sourceId: Id) {
workers(mapper.getWorkerIdForVertexId(targetId)).processSignalWithSourceId(signal, targetId, sourceId)
}
/**
* Processes `signal` on the worker that has the vertex with
* `vertex.id==edgeId.targetId`.
* Blocks until the operation has completed.
*/
override def processSignalWithoutSourceId(signal: Signal, targetId: Id) {
workers(mapper.getWorkerIdForVertexId(targetId)).processSignalWithoutSourceId(signal, targetId)
}
/**
* Removes the vertex with id `vertexId` from the graph.
*
* @note If no vertex with this id is found, then the operation is ignored and a warning is logged.
*/
override def removeVertex(vertexId: Id) {
workers(mapper.getWorkerIdForVertexId(vertexId)).removeVertex(vertexId)
}
/**
* Removes the edge with id `edgeId` from the graph.
*
* @note If no vertex with the required source id is found, then the operation is ignored and a warning is logged.
* @note If no edge with with this id is found, then this operation will be ignored and a warning is logged.
*/
override def removeEdge(edgeId: EdgeId[Id]) {
workers(mapper.getWorkerIdForVertexId(edgeId.sourceId)).removeEdge(edgeId)
}
/**
* Runs a graph loading function on a worker
*/
def modifyGraph(graphModification: GraphEditor[Id, Signal] => Unit, vertexIdHint: Option[Id] = None) {
workers(workerIdForHint(vertexIdHint)).modifyGraph(graphModification)
}
/**
* Loads a graph using the provided iterator of `graphModification` functions.
*
* @note Does not block.
* @note The vertexIdHint can be used to supply a characteristic vertex ID to give a hint to the system on which worker
* the loading function will be able to exploit locality.
* @note For distributed graph loading use separate calls of this method with vertexIdHints targeting different workers.
*/
def loadGraph(graphModifications: Iterator[GraphEditor[Id, Signal] => Unit], vertexIdHint: Option[Id]) {
workers(workerIdForHint(vertexIdHint)).loadGraph(graphModifications)
}
def snapshot {
get(futures(_.snapshot))
}
def restore {
get(futures(_.restore))
}
def deleteSnapshot = {
get(futures(_.deleteSnapshot))
}
protected def workerIdForHint(vertexIdHint: Option[Id]): Int = {
if (vertexIdHint.isDefined) {
mapper.getWorkerIdForVertexId(vertexIdHint.get)
} else {
random.nextInt(workers.length)
}
}
}
| danihegglin/DynDCO | src/main/scala/com/signalcollect/coordinator/DefaultWorkerApi.scala | Scala | apache-2.0 | 8,408 |
package libref
import leon.lang._
import leon.lang.synthesis.choose
import scala.language.postfixOps
import scala.language.implicitConversions
package object collection {
// implicit def setAsList[A] (set: Set[A]): List[A] = choose {
// (x: List[A]) => set == x.content
// }
case class SetOps[A] (set: Set[A]) {
def toList: List[A] = choose {
(x: List[A]) => set == x.content
}
def size = toList.size
def filter (p: A => Boolean): Set[A] =
toList.filter(p).content
def forall (p: A => Boolean): Boolean =
toList.forall(p)
def exists (p: A => Boolean): Boolean =
toList.exists(p)
}
implicit def extendedSet[A] (set: Set[A]) = SetOps(set)
} | fmlab-iis/LibRef | collection/package.scala | Scala | gpl-3.0 | 713 |
package filodb.akkabootstrapper
import akka.cluster.Cluster
import com.typesafe.config.ConfigFactory
import org.scalatest.WordSpecLike
class ValidSeedValidatorSpec extends BaseSeedNodeDiscoverySpec(AbstractTestKit.head) {
"Valid WhitelistSeedValidator" must {
"return expected valid seed nodes for valid configuration" in {
val strategy = new WhitelistClusterSeedDiscovery(cluster, settings)
strategy.invalidSeedNodes.isEmpty shouldBe true
strategy.validSeedNodes.size shouldEqual settings.seedsWhitelist.size
strategy.discoverClusterSeeds.size shouldEqual strategy.validSeedNodes.size
}
}
}
class InvalidSeedValidatorSpec extends AbstractTestKit(
ConfigFactory.parseString(
s"""
|akka-bootstrapper.whitelist.seeds = [
| "akka.tcp://[email protected]:0", "akka://test:127.0.0.1:0", "akka.tcp://test@localhost" ]
""".stripMargin).withFallback(AbstractTestKit.rootConfig))
with WordSpecLike {
"Invalid WhitelistSeedValidator" must {
"return expected invalid seed nodes for invalid configuration" in {
val settings = new AkkaBootstrapperSettings(system.settings.config)
val strategy = new WhitelistClusterSeedDiscovery(Cluster(system), settings)
strategy.invalidSeedNodes.size shouldEqual settings.seedsWhitelist.size - 1
strategy.validSeedNodes.size shouldEqual settings.seedsWhitelist.size - 2
strategy.validSeedNodes.contains(strategy.cluster.selfAddress) shouldBe false
intercept[java.net.MalformedURLException](strategy.discoverClusterSeeds)
}
}
}
| velvia/FiloDB | akka-bootstrapper/src/test/scala/filodb/akkabootstrapper/SeedValidatorSpec.scala | Scala | apache-2.0 | 1,564 |
package org.scalaide.core.lexical
import org.junit.Assert._
import org.junit.Test
import org.junit.Before
import org.eclipse.jface.text._
class ScalaDocumentPartitionerTest {
@Test
def no_partition_change(): Unit = {
// 000000000011111111112222222222333333333344444444445
// 012345678901234567890123456789012345678901234567890
check("""/* comment */ "foo" /* comment */""", Replace(start = 5, finish = 7, text = "foo"), expectedNoRegion)
}
@Test
def modify_single_partition(): Unit = {
// 000000000011111111112222222222333333333344444444445
// 012345678901234567890123456789012345678901234567890
check("""/* comment */ "foo" /* comment */""", Insertion(point = 16, text = "XXX"), expectedNoRegion)
check("""/* comment */ "foo" /* comment *//* comment */""", Replace(start = 14, finish = 18, text = "/* */"), expectedRegion(14, 5))
}
@Test
def delete_partition_at_start_and_end_of_file(): Unit = {
// 000000000011111111112222222222333333333344444444445
// 012345678901234567890123456789012345678901234567890
check("""/* comment */ 42""", Deletion(start = 0, finish = 12), expectedRegion(0, 0))
check("""/* comment */ 42""", Deletion(start = 0, finish = 15), expectedRegion(0, 0))
check("""/* comment */ 42""", Deletion(start = 13, finish = 15), expectedRegion(13, 0))
}
private def expectedRegion(offset: Int, length: Int) = new Region(offset, length)
private def expectedNoRegion: IRegion = null
private def check(source: String, replacement: Replacement, expectedRegion: IRegion): Unit = {
val partitioner = ScalaCodePartitioner.documentPartitioner()
val actualRegion = changedPartitionsRegion(partitioner, source, replacement)
assertEquals(expectedRegion, actualRegion)
}
private def changedPartitionsRegion(partitioner: IDocumentPartitioner with IDocumentPartitionerExtension, source: String, replacement: Replacement): IRegion = {
implicit val doc = new Document(source)
partitioner.connect(doc)
val documentEvent = replacement.docEvent
doc.replace(documentEvent.getOffset, documentEvent.getLength, documentEvent.getText)
partitioner.documentChanged2(documentEvent)
}
sealed trait Replacement {
def docEvent(implicit doc: IDocument): DocumentEvent
}
case class Replace(start: Int, finish: Int, text: String) extends Replacement {
def docEvent(implicit doc: IDocument): DocumentEvent = new DocumentEvent(doc, start, finish - start + 1, text)
}
case class Deletion(start: Int, finish: Int) extends Replacement {
def docEvent(implicit doc: IDocument): DocumentEvent = new DocumentEvent(doc, start, finish - start + 1, "")
}
case class Insertion(point: Int, text: String) extends Replacement {
def docEvent(implicit doc: IDocument): DocumentEvent = new DocumentEvent(doc, point, 0, text)
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/lexical/ScalaDocumentPartitionerTest.scala | Scala | bsd-3-clause | 2,884 |
/*
* Copyright 2020 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.mesh
import scalismo.color.RGBA
import scalismo.geometry.{_3D, Point}
/**
* colored mesh with RGBA color per vertex
* @param shape positions
* @param color color of mesh surface, per point
*/
case class VertexColorMesh3D(shape: TriangleMesh3D, color: SurfacePointProperty[RGBA]) {
require(shape.triangulation == color.triangulation)
def transform(trafo: Point[_3D] => Point[_3D]): VertexColorMesh3D = {
val s = shape.transform { trafo }
copy(shape = s)
}
}
| unibas-gravis/scalismo | src/main/scala/scalismo/mesh/VertexColorMesh3D.scala | Scala | apache-2.0 | 1,135 |
package ru.finagram.api
/**
* This object represents an incoming callback query from a callback button in an inline keyboard.
* If the button that originated the query was attached to a message sent by the bot,
* the field message will be presented. If the button was attached to a message sent
* via the bot (in inline mode), the field inline_message_id will be presented.
*
* @param id Unique identifier for this query.
* @param from Sender.
* @param data Data associated with the callback button.
* Be aware that a bad client can send arbitrary data in this field.
* @param message Message with the callback button that originated the query.
* Note that message content and message date will not be available
* if the message is too old.
* @param inlineMessageId Identifier of the message sent via the bot in inline mode,
* that originated the query.
*/
case class CallbackQuery(
id: String,
from: User,
data: String,
message: Option[Message] = None,
inlineMessageId: Option[String] = None
)
| finagram/finagram | src/main/scala/ru/finagram/api/CallbackQuery.scala | Scala | mit | 1,086 |
/*
* WindowMaxIndex.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape
package stream
import akka.stream.{Attributes, FanInShape2, Inlet, Outlet}
import de.sciss.fscape.stream.impl.logic.FilterWindowedInAOutB
import de.sciss.fscape.stream.impl.{Handlers, NodeImpl, StageImpl}
object WindowMaxIndex {
def apply[A, E <: BufElem[A]](in: Outlet[E], size: OutI)(implicit b: Builder, tpe: StreamType[A, E]): OutI = {
val stage0 = new Stage[A, E](b.layer)
val stage = b.add(stage0)
b.connect(in , stage.in0)
b.connect(size, stage.in1)
stage.out
}
private final val name = "WindowMaxIndex"
private type Shp[E] = FanInShape2[E, BufI, BufI]
private final class Stage[A, E <: BufElem[A]](layer: Layer)(implicit a: Allocator, tpe: StreamType[A, E])
extends StageImpl[Shp[E]](name) { stage =>
val shape: Shape = new FanInShape2(
in0 = Inlet[E](s"${stage.name}.in" ),
in1 = InI (s"${stage.name}.size"),
out = OutI (s"${stage.name}.out" )
)
def createLogic(attr: Attributes): NodeImpl[Shape] = {
val res: Logic[_, _] = if (tpe.isDouble) {
new Logic[Double, BufD](shape.asInstanceOf[Shp[BufD]], layer)(_ > _)
} else if (tpe.isInt) {
new Logic[Int , BufI](shape.asInstanceOf[Shp[BufI]], layer)(_ > _)
} else {
assert (tpe.isLong)
new Logic[Long , BufL](shape.asInstanceOf[Shp[BufL]], layer)(_ > _)
}
res.asInstanceOf[Logic[A, E]]
}
}
private final class Logic[@specialized(Args) A, E <: BufElem[A]](shape: Shp[E], layer: Layer)
(gt: (A, A) => Boolean)
(implicit a: Allocator, protected val tpe: StreamType[A, E])
extends FilterWindowedInAOutB[A, E, Int, BufI, A, Shp[E]](name, layer, shape)(shape.in0, shape.out) {
private[this] val hSize = Handlers.InIAux(this, shape.in1)(math.max(0 , _))
private[this] var index : Int = _
private[this] var maxValue: A = _
protected def clearWindowTail(): Unit = ()
protected def newWindowBuffer(n: Int): Array[A] = tpe.newArray(n)
protected def tryObtainWinParams(): Boolean = {
val ok = hSize.hasNext
if (ok) {
hSize.next()
index = -1
maxValue = tpe.minValue
}
ok
}
protected def winBufSize: Int = 0
override protected def readWinSize : Long = hSize.value
override protected def writeWinSize : Long = 1
protected def processWindow(): Unit = ()
override protected def readIntoWindow(n: Int): Unit = {
val in = hIn.array
val inOff = hIn.offset
var i = inOff
val stop = i + n
var _index = index
var _max = maxValue
val d = readOff.toInt - inOff
while (i < stop) {
val v = in(i)
if (gt(v, _max)) {
_max = v
_index = i + d
}
i += 1
}
maxValue = _max
index = _index
hIn.advance(n)
}
override protected def writeFromWindow(n: Int): Unit = {
assert (n == 1)
hOut.next(index)
}
}
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/stream/WindowMaxIndex.scala | Scala | agpl-3.0 | 3,412 |
package org.jetbrains.plugins.scala.codeInsight.hints
import java.{util => ju}
import com.intellij.codeInsight.hints.settings.{InlayProviderSettingsModel, InlaySettingsProvider}
import com.intellij.lang.Language
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.codeInsight.hints.methodChains.ScalaMethodChainInlayHintsSettingsModel
import org.jetbrains.plugins.scala.codeInsight.hints.rangeHints.{ExclusiveRangeHintSettingsModel, RangeHintsForToAndUntilSettingsModel}
class ScalaTypeHintsSettingsProvider extends InlaySettingsProvider {
override def createModels(project: Project, language: Language): ju.List[InlayProviderSettingsModel] =
if (language == ScalaLanguage.INSTANCE) ju.Arrays.asList(
new ScalaTypeHintsSettingsModel(project),
new TypeMismatchHintsSettingsModel(project),
new ScalaMethodChainInlayHintsSettingsModel(project),
new RangeHintsForToAndUntilSettingsModel(project),
new ExclusiveRangeHintSettingsModel(project),
new ScalaGeneralTypeHintsSettingsModel
)
else ju.Collections.emptyList()
override def getSupportedLanguages(project: Project): ju.Collection[Language] =
ju.Collections.singletonList(ScalaLanguage.INSTANCE)
}
| JetBrains/intellij-scala | scala/codeInsight/src/org/jetbrains/plugins/scala/codeInsight/hints/ScalaTypeHintsSettingsProvider.scala | Scala | apache-2.0 | 1,282 |
package stormlantern.dockertestkit.orchestration
class Orchestration {
}
| dlouwers/reactive-consul | docker-testkit/src/main/scala/stormlantern/dockertestkit/orchestration/Orchestration.scala | Scala | mit | 75 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.nio.ByteOrder
import java.nio.charset.StandardCharsets
import java.util.{ArrayList => JArrayList}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Failure
import scala.util.Try
import net.razorvine.pickle.{Pickler, Unpickler}
import org.apache.spark.SparkException
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
/** Utilities for serialization / deserialization between Python and Java, using Pickle. */
private[spark] object SerDeUtil extends Logging {
// Unpickle array.array generated by Python 2.6
class ArrayConstructor extends net.razorvine.pickle.objects.ArrayConstructor {
// /* Description of types */
// static struct arraydescr descriptors[] = {
// {'c', sizeof(char), c_getitem, c_setitem},
// {'b', sizeof(char), b_getitem, b_setitem},
// {'B', sizeof(char), BB_getitem, BB_setitem},
// #ifdef Py_USING_UNICODE
// {'u', sizeof(Py_UNICODE), u_getitem, u_setitem},
// #endif
// {'h', sizeof(short), h_getitem, h_setitem},
// {'H', sizeof(short), HH_getitem, HH_setitem},
// {'i', sizeof(int), i_getitem, i_setitem},
// {'I', sizeof(int), II_getitem, II_setitem},
// {'l', sizeof(long), l_getitem, l_setitem},
// {'L', sizeof(long), LL_getitem, LL_setitem},
// {'f', sizeof(float), f_getitem, f_setitem},
// {'d', sizeof(double), d_getitem, d_setitem},
// {'\\0', 0, 0, 0} /* Sentinel */
// };
// TODO: support Py_UNICODE with 2 bytes
val machineCodes: Map[Char, Int] = if (ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
Map('c' -> 1, 'B' -> 0, 'b' -> 1, 'H' -> 3, 'h' -> 5, 'I' -> 7, 'i' -> 9,
'L' -> 11, 'l' -> 13, 'f' -> 15, 'd' -> 17, 'u' -> 21
)
} else {
Map('c' -> 1, 'B' -> 0, 'b' -> 1, 'H' -> 2, 'h' -> 4, 'I' -> 6, 'i' -> 8,
'L' -> 10, 'l' -> 12, 'f' -> 14, 'd' -> 16, 'u' -> 20
)
}
override def construct(args: Array[Object]): Object = {
if (args.length == 1) {
construct(args ++ Array(""))
} else if (args.length == 2 && args(1).isInstanceOf[String]) {
val typecode = args(0).asInstanceOf[String].charAt(0)
// This must be ISO 8859-1 / Latin 1, not UTF-8, to interoperate correctly
val data = args(1).asInstanceOf[String].getBytes(StandardCharsets.ISO_8859_1)
construct(typecode, machineCodes(typecode), data)
} else {
super.construct(args)
}
}
}
private var initialized = false
// This should be called before trying to unpickle array.array from Python
// In cluster mode, this should be put in closure
def initialize(): Unit = {
synchronized{
if (!initialized) {
Unpickler.registerConstructor("array", "array", new ArrayConstructor())
initialized = true
}
}
}
initialize()
/**
* Convert an RDD of Java objects to Array (no recursive conversions).
* It is only used by pyspark.sql.
*/
def toJavaArray(jrdd: JavaRDD[Any]): JavaRDD[Array[_]] = {
jrdd.rdd.map {
case objs: JArrayList[_] =>
objs.toArray
case obj if obj.getClass.isArray =>
obj.asInstanceOf[Array[_]].toArray
}.toJavaRDD()
}
/**
* Choose batch size based on size of objects
*/
private[spark] class AutoBatchedPickler(iter: Iterator[Any]) extends Iterator[Array[Byte]] {
private val pickle = new Pickler()
private var batch = 1
private val buffer = new mutable.ArrayBuffer[Any]
override def hasNext: Boolean = iter.hasNext
override def next(): Array[Byte] = {
while (iter.hasNext && buffer.length < batch) {
buffer += iter.next()
}
val bytes = pickle.dumps(buffer.toArray)
val size = bytes.length
// let 1M < size < 10M
if (size < 1024 * 1024) {
batch *= 2
} else if (size > 1024 * 1024 * 10 && batch > 1) {
batch /= 2
}
buffer.clear()
bytes
}
}
/**
* Convert an RDD of Java objects to an RDD of serialized Python objects, that is usable by
* PySpark.
*/
def javaToPython(jRDD: JavaRDD[_]): JavaRDD[Array[Byte]] = {
jRDD.rdd.mapPartitions { iter => new AutoBatchedPickler(iter) }
}
/**
* Convert an RDD of serialized Python objects to RDD of objects, that is usable by PySpark.
*/
def pythonToJava(pyRDD: JavaRDD[Array[Byte]], batched: Boolean): JavaRDD[Any] = {
pyRDD.rdd.mapPartitions { iter =>
initialize()
val unpickle = new Unpickler
iter.flatMap { row =>
val obj = unpickle.loads(row)
if (batched) {
obj match {
case array: Array[Any] => array.toSeq
case _ => obj.asInstanceOf[JArrayList[_]].asScala
}
} else {
Seq(obj)
}
}
}.toJavaRDD()
}
private def checkPickle(t: (Any, Any)): (Boolean, Boolean) = {
val pickle = new Pickler
val kt = Try {
pickle.dumps(t._1)
}
val vt = Try {
pickle.dumps(t._2)
}
(kt, vt) match {
case (Failure(kf), Failure(vf)) =>
logWarning(s"""
|Failed to pickle Java object as key: ${t._1.getClass.getSimpleName}, falling back
|to 'toString'. Error: ${kf.getMessage}""".stripMargin)
logWarning(s"""
|Failed to pickle Java object as value: ${t._2.getClass.getSimpleName}, falling back
|to 'toString'. Error: ${vf.getMessage}""".stripMargin)
(true, true)
case (Failure(kf), _) =>
logWarning(s"""
|Failed to pickle Java object as key: ${t._1.getClass.getSimpleName}, falling back
|to 'toString'. Error: ${kf.getMessage}""".stripMargin)
(true, false)
case (_, Failure(vf)) =>
logWarning(s"""
|Failed to pickle Java object as value: ${t._2.getClass.getSimpleName}, falling back
|to 'toString'. Error: ${vf.getMessage}""".stripMargin)
(false, true)
case _ =>
(false, false)
}
}
/**
* Convert an RDD of key-value pairs to an RDD of serialized Python objects, that is usable
* by PySpark. By default, if serialization fails, toString is called and the string
* representation is serialized
*/
def pairRDDToPython(rdd: RDD[(Any, Any)], batchSize: Int): RDD[Array[Byte]] = {
val (keyFailed, valueFailed) = rdd.take(1) match {
case Array() => (false, false)
case Array(first) => checkPickle(first)
}
rdd.mapPartitions { iter =>
val cleaned = iter.map { case (k, v) =>
val key = if (keyFailed) k.toString else k
val value = if (valueFailed) v.toString else v
Array[Any](key, value)
}
if (batchSize == 0) {
new AutoBatchedPickler(cleaned)
} else {
val pickle = new Pickler
cleaned.grouped(batchSize).map(batched => pickle.dumps(batched.asJava))
}
}
}
/**
* Convert an RDD of serialized Python tuple (K, V) to RDD[(K, V)].
*/
def pythonToPairRDD[K, V](pyRDD: RDD[Array[Byte]], batched: Boolean): RDD[(K, V)] = {
def isPair(obj: Any): Boolean = {
Option(obj.getClass.getComponentType).exists(!_.isPrimitive) &&
obj.asInstanceOf[Array[_]].length == 2
}
val rdd = pythonToJava(pyRDD, batched).rdd
rdd.take(1) match {
case Array(obj) if isPair(obj) =>
// we only accept (K, V)
case Array() =>
// we also accept empty collections
case Array(other) => throw new SparkException(
s"RDD element of type ${other.getClass.getName} cannot be used")
}
rdd.map { obj =>
val arr = obj.asInstanceOf[Array[_]]
(arr.head.asInstanceOf[K], arr.last.asInstanceOf[V])
}
}
}
| wangyixiaohuihui/spark2-annotation | core/src/main/scala/org/apache/spark/api/python/SerDeUtil.scala | Scala | apache-2.0 | 8,899 |
package net.magik6k.lxcadmin
import net.magik6k.jwwf.core.{MainFrame, User}
import net.magik6k.lxcadmin.panel.MainPanel
class Client extends User {
override def initializeUser(rootFrame: MainFrame) {
try {
rootFrame.setTitle("LXC Admin")
rootFrame.put(new MainPanel(this))
} catch {
case e: Exception => e.printStackTrace()
}
}
}
| magik6k/LxcAdmin | src/main/scala/net/magik6k/lxcadmin/Client.scala | Scala | mit | 349 |
package timing
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
object Time {
def sync[A](name: String)(func: () => A): A = {
println(s"Started $name")
val start = System.currentTimeMillis
val answer = func()
val finish = System.currentTimeMillis
println(s"Finished $name after ${finish - start}ms")
answer
}
def async[A](name: String)(func: () => Future[A]): Future[A] = {
println(s"Started $name")
val start = System.currentTimeMillis
func().map { a =>
val finish = System.currentTimeMillis
println(s"Finished $name after ${finish - start}ms")
a
}
}
} | underscoreio/essential-scala-code | src/main/scala/timing/Time.scala | Scala | apache-2.0 | 659 |
package grammarcomp
package benchmarks
import grammar._
import grammar.CFGrammar._
import java.io._
import generators.GrammarBoundingHelper
object AntlrJavascriptGrammar extends Benchmark {
import GrammarReaders._
def benchmarkName = "AntlrJSGrammar"
def benchmarkSource = "https://github.com/antlr/grammars-v4"
val filename = "antlr-grammars/ECMAScript.gram"
def ebnfGrammar = {
GrammarReaders.readFromFile(filename)
}
//val boundFilename = "java-antlr-bounded"
// /lazy val boundedGrammar = GrammarBoundingHelper.createBoundedGrammar(grammar,Some(1))
} | epfl-lara/GrammarComparison | src/main/scala/grammarcomp/benchmarks/JavascriptGrammar3.scala | Scala | mit | 589 |
package fixtures
abstract class Fixture {
def createFixtures(): Unit
} | soupytwist/knit | app/fixtures/Fixture.scala | Scala | gpl-3.0 | 74 |
package at.logic.gapt.formats.leancop
import at.logic.gapt.formats.ClasspathInputFile
import at.logic.gapt.utils.SatMatchers
import org.specs2.mutable.Specification
class LeanCoPParserTest extends Specification with SatMatchers {
"irrationals" in {
LeanCoPParser.getExpansionProof( ClasspathInputFile( "irrationals.leancop.s" ) ) must beLike {
case Some( expansion ) =>
expansion.deep must beEValidSequent
}
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/formats/leancop/LeanCoPParserTest.scala | Scala | gpl-3.0 | 442 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.streaming
import scala.collection.mutable.LinkedList
import scala.reflect.ClassTag
import scala.util.Random
import akka.actor.{Actor, ActorRef, Props, actorRef2Scala}
import org.apache.spark.{SparkConf, SecurityManager}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.StreamingContext.toPairDStreamFunctions
import org.apache.spark.util.AkkaUtils
import org.apache.spark.streaming.receiver.ActorHelper
case class SubscribeReceiver(receiverActor: ActorRef)
case class UnsubscribeReceiver(receiverActor: ActorRef)
/**
* Sends the random content to every receiver subscribed with 1/2
* second delay.
*/
class FeederActor extends Actor {
val rand = new Random()
var receivers: LinkedList[ActorRef] = new LinkedList[ActorRef]()
val strings: Array[String] = Array("words ", "may ", "count ")
def makeMessage(): String = {
val x = rand.nextInt(3)
strings(x) + strings(2 - x)
}
/*
* A thread to generate random messages
*/
new Thread() {
override def run() {
while (true) {
Thread.sleep(500)
receivers.foreach(_ ! makeMessage)
}
}
}.start()
def receive: Receive = {
case SubscribeReceiver(receiverActor: ActorRef) =>
println("received subscribe from %s".format(receiverActor.toString))
receivers = LinkedList(receiverActor) ++ receivers
case UnsubscribeReceiver(receiverActor: ActorRef) =>
println("received unsubscribe from %s".format(receiverActor.toString))
receivers = receivers.dropWhile(x => x eq receiverActor)
}
}
/**
* A sample actor as receiver, is also simplest. This receiver actor
* goes and subscribe to a typical publisher/feeder actor and receives
* data.
*
* @see [[org.apache.spark.examples.streaming.FeederActor]]
*/
class SampleActorReceiver[T: ClassTag](urlOfPublisher: String)
extends Actor with ActorHelper {
lazy private val remotePublisher = context.actorSelection(urlOfPublisher)
override def preStart(): Unit = remotePublisher ! SubscribeReceiver(context.self)
def receive: PartialFunction[Any, Unit] = {
case msg => store(msg.asInstanceOf[T])
}
override def postStop(): Unit = remotePublisher ! UnsubscribeReceiver(context.self)
}
/**
* A sample feeder actor
*
* Usage: FeederActor <hostname> <port>
* <hostname> and <port> describe the AkkaSystem that Spark Sample feeder would start on.
*/
object FeederActor {
def main(args: Array[String]) {
if (args.length < 2){
System.err.println("Usage: FeederActor <hostname> <port>\\n")
System.exit(1)
}
val Seq(host, port) = args.toSeq
val conf = new SparkConf
val actorSystem = AkkaUtils.createActorSystem("test", host, port.toInt, conf = conf,
securityManager = new SecurityManager(conf))._1
val feeder = actorSystem.actorOf(Props[FeederActor], "FeederActor")
println("Feeder started as:" + feeder)
actorSystem.awaitTermination()
}
}
/**
* A sample word count program demonstrating the use of plugging in
* Actor as Receiver
* Usage: ActorWordCount <hostname> <port>
* <hostname> and <port> describe the AkkaSystem that Spark Sample feeder is running on.
*
* To run this example locally, you may run Feeder Actor as
* `$ bin/run-example org.apache.spark.examples.streaming.FeederActor 127.0.1.1 9999`
* and then run the example
* `$ bin/run-example org.apache.spark.examples.streaming.ActorWordCount 127.0.1.1 9999`
*/
object ActorWordCount {
def main(args: Array[String]) {
if (args.length < 2) {
System.err.println(
"Usage: ActorWordCount <hostname> <port>")
System.exit(1)
}
StreamingExamples.setStreamingLogLevels()
val Seq(host, port) = args.toSeq
val sparkConf = new SparkConf().setAppName("ActorWordCount")
// Create the context and set the batch size
val ssc = new StreamingContext(sparkConf, Seconds(2))
/*
* Following is the use of actorStream to plug in custom actor as receiver
*
* An important point to note:
* Since Actor may exist outside the spark framework, It is thus user's responsibility
* to ensure the type safety, i.e type of data received and InputDstream
* should be same.
*
* For example: Both actorStream and SampleActorReceiver are parameterized
* to same type to ensure type safety.
*/
val lines = ssc.actorStream[String](
Props(new SampleActorReceiver[String]("akka.tcp://test@%s:%s/user/FeederActor".format(
host, port.toInt))), "SampleReceiver")
// compute wordcount
lines.flatMap(_.split("\\\\s+")).map(x => (x, 1)).reduceByKey(_ + _).print()
ssc.start()
ssc.awaitTermination()
}
}
| shenbaise/mltoy | src/main/scala/org/apache/spark/examples/streaming/ActorWordCount.scala | Scala | apache-2.0 | 5,533 |
package com.github.jeanadrien.gatling.mqtt.client
import akka.actor.ActorRef
import com.github.jeanadrien.gatling.mqtt.client.MqttCommands.{ConnectAck, PublishAck, SubscribeAck}
import com.github.jeanadrien.gatling.mqtt.client.MqttQoS.MqttQoS
import org.eclipse.paho.client.mqttv3.persist.MemoryPersistence
import org.eclipse.paho.client.mqttv3.{MqttConnectOptions, MqttMessage, MqttClient => PahoClient}
/**
*
*/
class PahoMqttClient(config : MqttClientConfiguration, gatlingMqttId : String) extends MqttClient(gatlingMqttId) {
private val persistence = new MemoryPersistence();
private def qosIntValue(qos : MqttQoS) : Int = qos match {
case MqttQoS.AtMostOnce => 0
case MqttQoS.AtLeastOnce => 1
case MqttQoS.ExactlyOnce => 2
}
val broker = config.host
val clientId = config.clientId.getOrElse(PahoClient.generateClientId())
val pahoClient = new PahoClient(broker, clientId, persistence)
val connOpts = new MqttConnectOptions
connOpts.setCleanSession(config.cleanSession)
// connOpts.setConnectionTimeout() // TODO
connOpts.setKeepAliveInterval(config.keepAlive)
config.version match {
case Some("3.1") => connOpts.setMqttVersion(MqttConnectOptions.MQTT_VERSION_3_1)
case Some("3.1.1") => connOpts.setMqttVersion(MqttConnectOptions.MQTT_VERSION_3_1_1)
case _ => // nop
}
config.password.map(_.toCharArray).foreach(connOpts.setPassword _)
config.username.foreach(connOpts.setUserName _)
// connOpts.setServerURIs() ??
// connOpts.setSocketFactory() ??
config.will.foreach { will =>
connOpts.setWill(
will.topic,
will.message.getBytes,
qosIntValue(will.qos),
will.willRetain
)
}
// setup listener
val listener = new PahoConnectionListener(self)
pahoClient.setCallback(listener)
// FIXME: Throttling
// FIXME: Reconnect Part
// FIXME: Socketconfig
override protected def connect(replyTo : ActorRef) : Unit = {
pahoClient.connect(connOpts);
replyTo ! ConnectAck
}
override protected def subscribe(topics : List[(String, MqttQoS)], replyTo : ActorRef) : Unit = {
pahoClient.subscribe(topics.map(_._1).toArray, topics.map(_._2).map(qosIntValue).toArray)
replyTo ! SubscribeAck
}
override protected def publish(
topic : String, payload : Array[Byte],
mqttQoS : MqttQoS, retain : Boolean,
replyTo : ActorRef
) : Unit = {
val message = new MqttMessage(payload)
message.setQos(qosIntValue(mqttQoS))
message.setRetained(retain)
pahoClient.publish(topic, message)
replyTo ! PublishAck
}
override protected def close() = {
pahoClient.disconnect()
pahoClient.close()
}
}
| jeanadrien/gatling-mqtt-protocol | src/main/scala/com/github/jeanadrien/gatling/mqtt/client/PahoMqttClient.scala | Scala | apache-2.0 | 2,831 |
package utils
import java.io.InputStream
import dispatch._
import Defaults._
import com.sksamuel.elastic4s._
import com.sksamuel.elastic4s.ElasticDsl._
import play.api.Logger
import scala.sys.process._
import scala.concurrent.Await
import scala.concurrent.duration._
import traits._
import scala.async.Async.{ async, await }
import scalax.io._
import scala.io._
import scala.io.Source._
import java.io.BufferedInputStream
import java.io.File
class ScreenshotUtils {
this: SearchClientContainer with UrlContainer =>
val currentLocation = System.getProperty("user.dir")
private def serveFile(docId: String): Option[File] = {
val destination = s"$currentLocation/screenshots/$docId"
try {
val f = new java.io.File(destination)
if (f.exists()) Some(f) else None
} catch {
case t: Throwable => None
}
}
private def takeScreenShot(docId: String, siteUrl: String): Option[File] = {
val destination = s"$currentLocation/screenshots/$docId"
val exec = s"xvfb-run --auto-servernum --server-num=1 python $currentLocation/screenshots.py $siteUrl $destination"
val result = exec.!!
serveFile(docId)
}
private def getScreenShot(docId: String): Future[Option[File]] = {
async {
//try to serve the file from disk
serveFile(docId) match {
case Some(data) => Some(data)
//if not found take pic and save to disk
case None =>
val doc = await { client.execute(get id docId from "lr/lr_doc") }
val siteUrl = doc.getSource().get("url").asInstanceOf[String]
takeScreenShot(docId, siteUrl)
}
}
}
def getScreenshot(docId: String): Future[Option[File]] = {
if (docId == "{{result._id}}") Future(None)
else getScreenShot(docId)
}
} | adlnet/LR-Search | app/utils/ScreenshotUtils.scala | Scala | apache-2.0 | 1,763 |
package ${package}.conf
/**
* container class for configuration parameters.
*/
case class JobParameter(
param1: String = "192.0.2.1",
param2: Int = 2003
) | scray/scray-archetype | src/main/resources/archetype-resources/src/main/scala/conf/JobParameter.scala | Scala | apache-2.0 | 179 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.