code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package ohnosequences.metapasta
import ohnosequences.awstools.s3.ObjectAddress
import ohnosequences.metapasta.reporting.SampleId
object S3Paths {
def mergedFastq(resultsObject: ObjectAddress, sample: String): ObjectAddress = resultsObject / sample / "reads" / (sample + ".merged.fastq")
def notMergedFastq(resultsObject: ObjectAddress, sample: String): (ObjectAddress, ObjectAddress) = {
(resultsObject / sample / "reads" / (sample + ".notMerged1.fastq"), resultsObject / sample / "reads" / (sample + ".notMerged2.fastq"))
}
def mergedNoHitFasta(resultsObject: ObjectAddress, sample: String) : ObjectAddress = resultsObject / sample / "reads" / (sample + ".noHit.fasta")
def mergedNoTaxIdFasta(resultsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) : ObjectAddress = resultsObject / sample / "reads" / (sample +"." + assignmentType + ".noTaxId.fasta")
def mergedAssignedFasta(resultsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) : ObjectAddress = resultsObject / sample / "reads" / (sample + "." + assignmentType + ".assigned.fasta")
def mergedNotAssignedFasta(resultsObject: ObjectAddress, sample: String, assignmentType: AssignmentType):ObjectAddress =resultsObject / sample / "reads" / (sample + "." + assignmentType + ".notAssigned.fasta")
def noHitFastas(readsObject: ObjectAddress, sample: String) : ObjectAddress = readsObject / sample / "noHit"
def noTaxIdFastas(readsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) : ObjectAddress = readsObject / (sample + "###" + assignmentType) / "noTaxId"
def notAssignedFastas(readsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) : ObjectAddress = readsObject / (sample + "###" + assignmentType) / "notAssigned"
def assignedFastas(readsObject: ObjectAddress, sample: String, assignmentType: AssignmentType): ObjectAddress = readsObject / (sample + "###" + assignmentType) / "assigned"
def noHitFasta(readsObject: ObjectAddress, chunk: ChunkId): ObjectAddress = {
noHitFastas(readsObject, chunk.sample.id) / (chunk.start + "_" + chunk.end + ".fasta")
}
def noTaxIdFasta(readsObject: ObjectAddress, chunk: ChunkId, assignmentType: AssignmentType): ObjectAddress = {
noTaxIdFastas(readsObject, chunk.sample.id, assignmentType) / (chunk.sample.id + chunk.start + "_" + chunk.end + ".fasta")
}
def notAssignedFasta(readsObject: ObjectAddress, chunk: ChunkId, assignmentType: AssignmentType): ObjectAddress = {
notAssignedFastas(readsObject, chunk.sample.id, assignmentType) / (chunk.sample.id + chunk.start + "_" + chunk.end + ".fasta")
}
def assignedFasta(readsObject: ObjectAddress, chunk: ChunkId, assignmentType: AssignmentType): ObjectAddress = {
assignedFastas(readsObject, chunk.sample.id, assignmentType) / (chunk.sample.id + chunk.start + "_" + chunk.end + ".fasta")
}
def treeDot(resultsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) = {
resultsObject / sample / (sample + "." + assignmentType + ".tree.dot")
}
def treePdf(resultsObject: ObjectAddress, sample: String, assignmentType: AssignmentType) = {
resultsObject / sample / (sample + "." + assignmentType + ".tree.pdf")
}
def blastOut(readsObject: ObjectAddress, chunk: ChunkId): ObjectAddress = {
readsObject / chunk.sample.id / "blast" / (chunk.sample.id + chunk.start + "_" + chunk.end + ".blast")
}
}
| INTERCROSSING/metapasta | src/main/scala/ohnosequences/metapasta/S3Paths.scala | Scala | agpl-3.0 | 3,439 |
package fpinscala.parallelism
import java.util.concurrent._
object Par {
type Par[A] = ExecutorService => Future[A]
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
def unit[A](a: A): Par[A] = (es: ExecutorService) => UnitFuture(a) // `unit` is represented as a function that returns a `UnitFuture`, which is a simple implementation of `Future` that just wraps a constant value. It doesn't use the `ExecutorService` at all. It's always done and can't be cancelled. Its `get` method simply returns the value that we gave it.
private case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def get(timeout: Long, units: TimeUnit) = get
def isCancelled = false
def cancel(evenIfRunning: Boolean): Boolean = false
}
def map2[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] = // `map2` doesn't evaluate the call to `f` in a separate logical thread, in accord with our design choice of having `fork` be the sole function in the API for controlling parallelism. We can always do `fork(map2(a,b)(f))` if we want the evaluation of `f` to occur in a separate thread.
(es: ExecutorService) => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get, bf.get)) // This implementation of `map2` does _not_ respect timeouts, and eagerly waits for the returned futures. This means that even if you have passed in "forked" arguments, using this map2 on them will make them wait. It simply passes the `ExecutorService` on to both `Par` values, waits for the results of the Futures `af` and `bf`, applies `f` to them, and wraps them in a `UnitFuture`. In order to respect timeouts, we'd need a new `Future` implementation that records the amount of time spent evaluating `af`, then subtracts that time from the available time allocated for evaluating `bf`.
}
def map3[A, B, C, D](a: Par[A], b: Par[B], c: Par[C])(f: (A, B, C) => D): Par[D] = {
val g = (a: A, b: B) => (c: C) => f(a, b, c)
val h = (p: C => D, c: C) => p(c)
map2(map2(a, b)(g), c)(h)
}
def map4[A, B, C, D, E](a: Par[A], b: Par[B], c: Par[C], d: Par[D])(f: (A, B, C, D) => E): Par[E] = {
val g = (a: A, b: B, c: C) => (d: D) => f(a, b, c, d)
val h = (p: D => E, d: D) => p(d)
map2(map3(a, b, c)(g), d)(h)
}
def map5[A, B, C, D, E, F](a: Par[A], b: Par[B], c: Par[C], d: Par[D], e: Par[E])(f: (A, B, C, D, E) => F): Par[F] = {
val g = (a: A, b: B, c: C, d: D) => (e: E) => f(a, b, c, d, e)
val h = (p: E => F, e: E) => p(e)
map2(map4(a, b, c, d)(g), e)(h)
}
def fork[A](a: => Par[A]): Par[A] = // This is the simplest and most natural implementation of `fork`, but there are some problems with it--for one, the outer `Callable` will block waiting for the "inner" task to complete. Since this blocking occupies a thread in our thread pool, or whatever resource backs the `ExecutorService`, this implies that we're losing out on some potential parallelism. Essentially, we're using two threads when one should suffice. This is a symptom of a more serious problem with the implementation, and we will discuss this later in the chapter.
es => es.submit(new Callable[A] {
def call = a(es).get
})
def lazyUnit[A](a: => A): Par[A] = fork(unit(a))
def asyncF[A,B](f: A => B): A => Par[B] = a => lazyUnit(f(a))
def map[A,B](pa: Par[A])(f: A => B): Par[B] =
map2(pa, unit(()))((a,_) => f(a))
def sortPar(parList: Par[List[Int]]) = map(parList)(_.sorted)
def sequence[A](ps: List[Par[A]]): Par[List[A]] =
map(sequenceBalanced(ps.toIndexedSeq))(_.toList)
def sequenceBalanced[A](ps: IndexedSeq[Par[A]]): Par[IndexedSeq[A]] =
if (ps.isEmpty) unit(Vector())
else if (ps.size == 1) map(ps.head)(a => Vector(a))
else {
val (l, r) = ps.splitAt(ps.size / 2)
map2(sequenceBalanced(l), sequenceBalanced(r))(_ ++ _)
}
def parFold[A](ps: List[A], z: A)(f : (A, A) => A): Par[A] =
parFoldBalanced(ps.toIndexedSeq, z)(f)
def parFoldBalanced[A](ps: IndexedSeq[A], z: A)(f : (A, A) => A): Par[A] =
if (ps.isEmpty) unit(z)
else if (ps.size == 1) lazyUnit(f(z, ps.head))
else {
val (l, r) = ps.splitAt(ps.size / 2)
map2(parFoldBalanced(l, z)(f), parFoldBalanced(r, z)(f))(f)
}
def parMap[A,B](ps: List[A])(f: A => B): Par[List[B]] = fork {
val fbs: List[Par[B]] = ps.map(asyncF(f))
sequence(fbs)
}
def parMapFold[A, B](l : List[A], z : B)(f1 : A => B, f2 : (B, B) => B) : Par[B] =
flatMap(parMap(l)(f1))(parFold(_, z)(f2))
def flatMap[A,B](p: Par[A])(choices: A => Par[B]): Par[B] =
es => {
val k = run(es)(p).get
run(es)(choices(k))
}
def parFilter[A](as: List[A])(f: A => Boolean): Par[List[A]] =
map(parMap(as)(a => if (f(a)) List(a) else List.empty[A]))(_.flatten)
def parFilterViaOption[A](as: List[A])(f: A => Boolean): Par[List[A]] =
map(parMap(as)(a => if (f(a)) Some(a) else None))(_.flatten)
def equal[A](e: ExecutorService)(p: Par[A], p2: Par[A]): Boolean =
p(e).get == p2(e).get
def delay[A](fa: => Par[A]): Par[A] =
es => fa(es)
def choice[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
es =>
if (run(es)(cond).get) t(es) // Notice we are blocking on the result of `cond`.
else f(es)
def choiceN[A](n: Par[Int])(choices: List[Par[A]]): Par[A] =
es =>
run(es)(choices(run(es)(n) get))
def choiceViaChoiceN[A](cond: Par[Boolean])(t: Par[A], f: Par[A]): Par[A] =
choiceN(map(cond)(if (_) 0 else 1))(t :: f :: Nil)
def choiceMap[K,V](key: Par[K])(choices: Map[K,Par[V]]): Par[V] =
es =>
choices(run(es)(key) get)(es)
def chooser[A,B](pa: Par[A])(choices: A => Par[B]): Par[B] =
es =>
choices(run(es)(pa) get)(es)
def join[A](a: Par[Par[A]]): Par[A] =
es =>
map(a)(run(es)(_) get)(es)
def joinViaFlatMap[A](a: Par[Par[A]]): Par[A] =
flatMap(a)(a => a)
def flatMapViaJoin[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
join(map(p)(f))
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
class ParOps[A](p: Par[A]) {
}
}
object Examples {
import Par._
def sum(ints: IndexedSeq[Int]): Int = // `IndexedSeq` is a superclass of random-access sequences like `Vector` in the standard library. Unlike lists, these sequences provide an efficient `splitAt` method for dividing them into two parts at a particular index.
if (ints.size <= 1)
ints.headOption getOrElse 0 // `headOption` is a method defined on all collections in Scala. We saw this function in chapter 3.
else {
val (l,r) = ints.splitAt(ints.length/2) // Divide the sequence in half using the `splitAt` function.
sum(l) + sum(r) // Recursively sum both halves and add the results together.
}
def countWords(ls : List[String]) : Par[Int] =
parMapFold(ls, 0)(_.split("""\s+""").length, _ + _)
}
| ailveen/fpinscala | exercises/src/main/scala/fpinscala/parallelism/Par.scala | Scala | mit | 6,906 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.controller.test.migration
import scala.Vector
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.TestHelpers
import common.WskTestHelpers
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import spray.json.DefaultJsonProtocol._
import spray.json._
import org.apache.openwhisk.core.controller.WhiskActionsApi
import org.apache.openwhisk.core.controller.test.ControllerTestCommon
import org.apache.openwhisk.core.controller.test.WhiskAuthHelpers
import org.apache.openwhisk.core.entity._
/**
* Tests migration of a new implementation of sequences: old style sequences can be updated and retrieved - standalone tests
*/
@RunWith(classOf[JUnitRunner])
class SequenceActionApiMigrationTests
extends ControllerTestCommon
with WhiskActionsApi
with TestHelpers
with WskTestHelpers {
behavior of "Sequence Action API Migration"
val creds = WhiskAuthHelpers.newIdentity()
val namespace = EntityPath(creds.subject.asString)
val collectionPath = s"/${EntityPath.DEFAULT}/${collection.path}"
def aname() = MakeName.next("seq_migration_tests")
private def seqParameters(seq: Vector[String]) = Parameters("_actions", seq.toJson)
it should "list old-style sequence action with explicit namespace" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val actions = (1 to 2).map { i =>
WhiskAction(namespace, aname(), sequence(components))
}.toList
actions foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskAction, namespace, 2)
Get(s"/$namespace/${collection.path}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
response should contain theSameElementsAs actions.map(_.summaryAsJson)
}
}
it should "get old-style sequence action by name in default namespace" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname(), sequence(components))
put(entityStore, action)
Get(s"$collectionPath/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action)
}
}
// this test is a repeat from ActionsApiTest BUT with old style sequence
it should "preserve new parameters when changing old-style sequence action to non sequence" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname(), sequence(seqComponents), seqParameters(components))
val content = WhiskActionPut(Some(jsDefault("")), parameters = Some(Parameters("a", "A")))
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(NODEJS)
response.parameters should be(Parameters("a", "A"))
}
}
// this test is a repeat from ActionsApiTest BUT with old style sequence
it should "reset parameters when changing old-style sequence action to non sequence" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname(), sequence(seqComponents), seqParameters(components))
val content = WhiskActionPut(Some(jsDefault("")))
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(NODEJS)
response.parameters shouldBe Parameters()
}
}
it should "update old-style sequence action with new annotations" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname(), sequence(seqComponents))
val content = """{"annotations":[{"key":"old","value":"new"}]}""".parseJson.asJsObject
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[String]
// contains the action
components map { c =>
response should include(c)
}
// contains the annotations
response should include("old")
response should include("new")
}
}
it should "update an old-style sequence with new sequence" in {
implicit val tid = transid()
// old sequence
val seqName = EntityName(s"${aname()}_new")
val oldComponents = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val oldSequence = WhiskAction(namespace, seqName, sequence(oldComponents))
put(entityStore, oldSequence)
// new sequence
val limit = 5 // count of bogus actions in sequence
val bogus = s"${aname()}_bogus"
val bogusActionName = s"/_/${bogus}" // test that default namespace gets properly replaced
// put the action in the entity store so it exists
val bogusAction = WhiskAction(namespace, EntityName(bogus), jsDefault("??"), Parameters("x", "y"))
put(entityStore, bogusAction)
val seqComponents = for (i <- 1 to limit) yield stringToFullyQualifiedName(bogusActionName)
val seqAction = WhiskAction(namespace, seqName, sequence(seqComponents.toVector))
val content = WhiskActionPut(Some(seqAction.exec), Some(Parameters()))
// update an action sequence
Put(s"$collectionPath/${seqName}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(seqAction.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(Exec.SEQUENCE)
response.limits should be(seqAction.limits)
response.publish should be(seqAction.publish)
response.version should be(seqAction.version.upPatch)
}
}
}
| style95/openwhisk | tests/src/test/scala/org/apache/openwhisk/core/controller/test/migration/SequenceActionApiMigrationTests.scala | Scala | apache-2.0 | 7,562 |
package breeze.numerics
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
/**
*
* @author dlwh
*/
@RunWith(classOf[JUnitRunner])
class BesselTest extends FunSuite {
import Bessel._
test("i0") {
assert((i0(1) - 1.2660658777520083).abs < 1E-8)
assert((i0(0) - 1.0).abs < 1E-8)
assert((i0(20) - 4.355828255955353E7).abs < 1E-1)
}
test("i1") {
assert((i1(1) - 0.565159103992485).abs < 1E-8, i1(1))
assert((i1(0) - 0).abs < 1E-8)
assert((i1(20) - 4.24549733851277E7).abs < 1E-1)
}
}
| eponvert/breeze | src/test/scala/breeze/numerics/BesselTest.scala | Scala | apache-2.0 | 1,136 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.model
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, ListBuffer, FlatHashTable}
import cc.factorie.variable._
//import scala.util.{Random,Sorting}
import scala.util.Random
import scala.math
import scala.util.Sorting
import cc.factorie.la._
//import cc.factorie.util.Substitutions
import java.io._
/** The only abstract things are _1, _2, statistics(Values), and StatisticsType
@author Andrew McCallum */
abstract class Factor2[N1<:Var,N2<:Var](val _1:N1, val _2:N2) extends Factor {
factor =>
type NeighborType1 = N1
type NeighborType2 = N2
def score(v1:N1#Value, v2:N2#Value): Double
def statistics(v1:N1#Value, v2:N2#Value): StatisticsType = (v1, v2).asInstanceOf[StatisticsType]
def scoreAndStatistics(v1:N1#Value, v2:N2#Value): (Double,StatisticsType) = (score(v1, v2), statistics(v1, v2))
def currentScore: Double = score(_1.value.asInstanceOf[N1#Value], _2.value.asInstanceOf[N2#Value])
override def currentStatistics: StatisticsType = statistics(_1.value.asInstanceOf[N1#Value], _2.value.asInstanceOf[N2#Value])
override def currentScoreAndStatistics: (Double,StatisticsType) = scoreAndStatistics(_1.value.asInstanceOf[N1#Value], _2.value.asInstanceOf[N2#Value])
def numVariables = 2
override def variables = IndexedSeq(_1, _2)
def variable(i:Int) = i match { case 0 => _1; case 1 => _2; case _ => throw new IndexOutOfBoundsException(i.toString) }
//val _variables = Seq(factor._1, factor._2)
/** Return a record of the current values of this Factor's neighbors. */
def currentAssignment = new Assignment2(_1, _1.value.asInstanceOf[N1#Value], _2, _2.value.asInstanceOf[N2#Value])
/** The ability to score a Values object is now removed, and this is its closest alternative. */
def assignmentScore(a:Assignment) = a match {
case a:AbstractAssignment2[N1,N2] if (a._1 eq _1) && (a._2 eq _2) => score(a.value1, a.value2)
case _ =>
score(a(_1), a(_2))
}
override final def assignmentStatistics(a:Assignment): StatisticsType = a match {
case a:AbstractAssignment2[N1,N2] if (a._1 eq _1) && (a._2 eq _2) => statistics(a.value1, a.value2)
case _ => statistics(a(_1), a(_2))
}
/** Given multiplicative factors on values of neighbor _1 (which allow for limited iteration), and given the Tensor value of neighbor _2,
return a Tensor1 containing the scores for each possible value neighbor _1, which must be a DiscreteVar.
Note that the returned Tensor may be sparse if this factor is set up for limited values iteration.
If _1 is not a DiscreteVar then throws an Error. */
def valuesScore1(tensor1:Tensor, tensor2:Tensor): Tensor1 = throw new Error("Not implemented in Factor "+getClass)
def valuesScore2(tensor1:Tensor, tensor2:Tensor): Tensor1 = throw new Error("Not implemented in Factor "+getClass)
// For implementing sparsity in belief propagation
def hasLimitedDiscreteValues12 = limitedDiscreteValues12 != null && limitedDiscreteValues12.activeDomainSize < limitedDiscreteValues12.length
def limitedDiscreteValues12: SparseBinaryTensor2 = null // throw new Error("This Factor type does not implement limitedDiscreteValues1: "+getClass)
def addLimitedDiscreteValues12(i:Int, j:Int): Unit = limitedDiscreteValues12.+=(i, j)
def addLimitedDiscreteCurrentValues12(): Unit = addLimitedDiscreteValues12(_1.asInstanceOf[DiscreteVar].intValue, _2.asInstanceOf[DiscreteVar].intValue)
def hasLimitedDiscreteValues1 = limitedDiscreteValues1 != null && limitedDiscreteValues1.activeDomainSize < limitedDiscreteValues1.length
def limitedDiscreteValues1: SparseBinaryTensor1 = null // throw new Error("This Factor type does not implement limitedDiscreteValues1: "+getClass)
def addLimitedDiscreteValues1(i:Int): Unit = limitedDiscreteValues1.+=(i)
def addLimitedDiscreteCurrentValues1(): Unit = addLimitedDiscreteValues1(this._1.asInstanceOf[DiscreteVar].intValue)
// TODO Consider something like this?
// def assignmentIterator(fixed: Assignment): Iterator[Assignment2]
// /** valuesIterator in style of specifying fixed neighbors */
// def valuesIterator(fixed: Assignment): Iterator[Values] = {
// val fixed1 = fixed.contains(_1)
// val fixed2 = fixed.contains(_2)
// if (fixed1 && fixed2)
// Iterator.single(new Values(fixed(_1), fixed(_2)))
// else if (fixed1) {
// val val1 = fixed(_1)
// if (isLimitingValuesIterator) {
// val d2 = _2.domain.asInstanceOf[DiscreteDomain]
// val intVal1 = val1.asInstanceOf[DiscreteVar].intValue
// limitedDiscreteValuesIterator.filter(t => t._1 == intVal1).map(t => new Values(val1, d2.apply(t._2).asInstanceOf[N2#Value]))
// } else {
// val d2 = _2.domain.asInstanceOf[Seq[N2#Value]]
// d2.iterator.map(value => new Values(val1, value))
// }
// } else if (fixed2) {
// val val2 = fixed(_2)
// if (isLimitingValuesIterator) {
// val d1 = _1.domain.asInstanceOf[DiscreteDomain]
// val intVal2 = val2.asInstanceOf[DiscreteVar].intValue
// limitedDiscreteValuesIterator.filter(t => t._2 == intVal2).map(t => new Values(d1.apply(t._1).asInstanceOf[N1#Value], val2))
// } else {
// val d1 = _1.domain.asInstanceOf[Seq[N1#Value]]
// d1.iterator.map(value => new Values(value, val2))
// }
// } else {
// if (isLimitingValuesIterator) {
// val d1 = _1.domain.asInstanceOf[DiscreteDomain]
// val d2 = _2.domain.asInstanceOf[DiscreteDomain]
// limitedDiscreteValuesIterator.map(t => new Values(d1.apply(t._1).asInstanceOf[N1#Value], d2.apply(t._2).asInstanceOf[N2#Value]))
// } else {
// val d1 = _1.domain.asInstanceOf[Seq[N1#Value]]
// val d2 = _2.domain.asInstanceOf[Seq[N2#Value]]
// (for (val1 <- d1; val2 <- d2) yield new Values(val1, val2)).iterator
// }
// }
// }
//
// /** valuesIterator in style of specifying varying neighbors */
// def valuesIterator(varying:Set[Variable]): Iterator[Values] = {
// val varying1 = varying.contains(_1)
// val varying2 = varying.contains(_2)
// if (varying1 && varying2) {
// if (isLimitingValuesIterator) {
// val d1 = _1.domain.asInstanceOf[DiscreteDomain]
// val d2 = _2.domain.asInstanceOf[DiscreteDomain]
// limitedDiscreteValuesIterator.map(t => new Values(d1.apply(t._1).asInstanceOf[N1#Value], d2.apply(t._2).asInstanceOf[N2#Value]))
// } else {
// val d1 = _1.domain.asInstanceOf[Seq[N1#Value]]
// val d2 = _2.domain.asInstanceOf[Seq[N2#Value]]
// (for (val1 <- d1; val2 <- d2) yield new Values(val1, val2)).iterator
// }
// } else if (varying1) {
// val val2 = _2.value
// if (isLimitingValuesIterator) {
// val d1 = _1.domain.asInstanceOf[DiscreteDomain]
// val intVal2 = val2.asInstanceOf[DiscreteVar].intValue
// limitedDiscreteValuesIterator.filter(t => t._2 == intVal2).map(t => new Values(d1.apply(t._1).asInstanceOf[N1#Value], val2))
// } else {
// val d1 = _1.domain.asInstanceOf[Seq[N1#Value]]
// d1.iterator.map(value => new Values(value, val2))
// }
// } else if (varying2) {
// val val1 = _1.value
// if (isLimitingValuesIterator) {
// val d2 = _2.domain.asInstanceOf[DiscreteDomain]
// val intVal1 = val1.asInstanceOf[DiscreteVar].intValue
// limitedDiscreteValuesIterator.filter(t => t._1 == intVal1).map(t => new Values(val1, d2.apply(t._2).asInstanceOf[N2#Value]))
// } else {
// val d2 = _2.domain.asInstanceOf[Seq[N2#Value]]
// d2.iterator.map(value => new Values(val1, value))
// }
// } else {
// Iterator.single(new Values(_1.value, _2.value))
// }
// }
}
/** A 2-neighbor Factor whose statistics have type Tuple2.
Only "score" method is abstract.
@author Andrew McCallum */
abstract class TupleFactorWithStatistics2[N1<:Var,N2<:Var](override val _1:N1, override val _2:N2) extends Factor2[N1,N2](_1, _2) {
type StatisticsType = ((N1#Value, N2#Value))
final override def statistics(v1:N1#Value, v2:N2#Value) = (v1, v2)
final override def statisticsAreValues: Boolean = true
}
/** A 2-neighbor Factor whose statistics have type Tensor.
Only "statistics" and "score" methods are abstract.
@author Andrew McCallum */
abstract class TensorFactor2[N1<:Var,N2<:Var](override val _1:N1, override val _2:N2) extends Factor2[N1,N2](_1, _2) {
type StatisticsType = Tensor
override def statistics(v1:N1#Value, v2:N2#Value): Tensor
final def score(v1:N1#Value, v2:N2#Value): Double = statisticsScore(statistics(v1, v2))
override def scoreAndStatistics(v1:N1#Value, v2:N2#Value): (Double, Tensor) = {
val tensor = statistics(v1, v2)
(statisticsScore(tensor), tensor)
}
def statisticsScore(t:Tensor): Double
}
/** A trait for 2-neighbor Factor whose neighbors have Tensor values,
and whose statistics are the outer product of those values.
Only "statisticsScore" method is abstract. DotFactorWithStatistics2 is also a subclass of this.
@author Andrew McCallum */
trait TensorFactorStatistics2[N1<:TensorVar,N2<:TensorVar] extends TensorFactor2[N1,N2] {
final override def statistics(v1:N1#Value, v2:N2#Value): Tensor = v1 outer v2
final override def valuesStatistics(tensor:Tensor): Tensor = tensor
final override def statisticsAreValues: Boolean = true
}
/** A 2-neighbor Factor whose neighbors have Tensor values,
and whose statistics are the outer product of those values.
Only "statisticsScore" method is abstract.
@author Andrew McCallum */
abstract class TensorFactorWithStatistics2[N1<:TensorVar,N2<:TensorVar](override val _1:N1, override val _2:N2) extends TensorFactor2[N1,N2](_1, _2) with TensorFactorStatistics2[N1,N2]
/** A 2-neighbor Factor whose statistics have type Tensor,
and whose score is the dot product between this Tensor and a "weightsSet" parameter Tensor.
Only "statistics" and "weightsSet" methods are abstract.
@author Andrew McCallum */
abstract class DotFactor2[N1<:TensorVar,N2<:TensorVar](override val _1:N1, override val _2:N2) extends TensorFactor2[N1,N2](_1, _2) {
def weights: Weights
def statisticsScore(t:Tensor): Double = weights.value dot t
}
/** A 2-neighbor Factor whose neighbors have Tensor values,
and whose statistics are the outer product of those values,
and whose score is the dot product between this Tensor and a "weightsSet" parameter Tensor.
Only "weightsSet" method is abstract.
@author Andrew McCallum */
abstract class DotFactorWithStatistics2[N1<:TensorVar,N2<:TensorVar](override val _1:N1, override val _2:N2) extends DotFactor2(_1, _2) with TensorFactorStatistics2[N1,N2] {
override def valuesScore(valueTensor:Tensor) = weights.value dot valueTensor
}
/** Family containing Factor2 (Families of Factors having two neighbors).
@author Andrew McCallum */
trait Family2[N1<:Var,N2<:Var] extends FamilyWithNeighborDomains {
type NeighborType1 = N1
type NeighborType2 = N2
/** Override this if you want to matchNeighborDomains */
def neighborDomain1: Domain { type Value <: N1#Value } = null
def neighborDomain2: Domain { type Value <: N2#Value } = null
def neighborDomains = Seq(neighborDomain1, neighborDomain2)
type FactorType = Factor
final case class Factor(override val _1:N1, override val _2:N2) extends Factor2[N1,N2](_1, _2) with super.Factor {
//type StatisticsType = Family2.this.StatisticsType
override def equalityPrerequisite: AnyRef = Family2.this
def score(value1:N1#Value, value2:N2#Value): Double = Family2.this.score(value1, value2)
override def statistics(v1:N1#Value, v2:N2#Value): StatisticsType = Family2.this.statistics(v1, v2)
override def scoreAndStatistics(v1:N1#Value, v2:N2#Value): (Double,StatisticsType) = Family2.this.scoreAndStatistics(v1, v2)
override def valuesScore(tensor:Tensor): Double = Family2.this.valuesScore(tensor) // TODO Consider implementing match here to use available _1 domain
override def statisticsScore(tensor:Tensor): Double = Family2.this.statisticsScore(tensor)
override def valuesStatistics(tensor:Tensor): Tensor = Family2.this.valuesStatistics(tensor)
override def statisticsAreValues: Boolean = Family2.this.statisticsAreValues
override def limitedDiscreteValues12: SparseBinaryTensor2 = Family2.this.limitedDiscreteValues12 //(this.asInstanceOf[Factor2[VectorVar,VectorVar]])
override def limitedDiscreteValues1: SparseBinaryTensor1 = Family2.this.limitedDiscreteValues1 //(this.asInstanceOf[Factor2[VectorVar,N2]])
}
def score(v1:N1#Value, v2:N2#Value): Double
def statistics(v1:N1#Value, v2:N2#Value): StatisticsType = (v1, v2).asInstanceOf[StatisticsType]
def scoreAndStatistics(v1:N1#Value, v2:N2#Value): (Double,StatisticsType) = (score(v1, v2), statistics(v1, v2))
def valuesStatistics(tensor:Tensor): Tensor = throw new Error("This Factor class does not implement valuesStatistics(Tensor)")
def statisticsAreValues: Boolean = false
override def valuesScore(tensor:Tensor): Double = tensor match {
case v: SingletonBinaryTensorLike2 => {
val domain0 = neighborDomain1.asInstanceOf[DiscreteDomain { type Value <: N1#Value }]
val domain1 = neighborDomain2.asInstanceOf[DiscreteDomain { type Value <: N2#Value }]
score(domain0(v.singleIndex1), domain1(v.singleIndex2))
//statistics(new SingletonBinaryTensor1(v.dim1, v.singleIndex1), new SingletonBinaryTensor1(v.dim2, v.singleIndex2)).score
}
case v: SingletonBinaryLayeredTensor2 => {
val domain0 = if (neighborDomain1 ne null) neighborDomain1.asInstanceOf[DiscreteDomain { type Value <: N1#Value }] else new DiscreteDomain(Int.MaxValue).asInstanceOf[DiscreteDomain { type Value = N1#Value }]
score(domain0(v.singleIndex1), v.inner.asInstanceOf[N2#Value])
}
case v: Outer1Tensor2 => {
(v.tensor1, v.tensor2) match {
case (v1: SingletonBinaryTensor1, v2: SingletonBinaryTensor1) =>
val domain0 = neighborDomain1.asInstanceOf[DiscreteDomain { type Value <: N1#Value }] // TODO Yipes. This is a bit shaky (and inefficient?)
val domain1 = neighborDomain2.asInstanceOf[DiscreteDomain { type Value <: N2#Value }]
v.scale*score(domain0(v1.singleIndex), domain1(v1.singleIndex))
case (v1: SingletonBinaryTensor1, v2: N2#Value @unchecked) =>
val domain0 = neighborDomain1.asInstanceOf[DiscreteDomain { type Value <: N1#Value }] // TODO Yipes. This is a bit shaky (and inefficient?)
v.scale*score(domain0(v1.singleIndex), v2)
}
}
}
// For implementing sparsity in belief propagation
def hasLimitedDiscreteValues12 = limitedDiscreteValues12 != null && limitedDiscreteValues12.activeDomainSize < limitedDiscreteValues12.length
//protected def getLimitedDiscreteValues12(factor:Factor2[VectorVar,VectorVar]): SparseBinaryTensor2 = { if (limitedDiscreteValues12 eq null) limitedDiscreteValues12 = new SparseBinaryTensor2(factor._1.domain.dimensionSize, factor._2.domain.dimensionSize); limitedDiscreteValues12 }
var limitedDiscreteValues12: SparseBinaryTensor2 = null
def hasLimitedDiscreteValues1 = limitedDiscreteValues1 != null && limitedDiscreteValues1.activeDomainSize < limitedDiscreteValues1.length
//protected def getLimitedDiscreteValues1(factor:Factor2[VectorVar,_]): SparseBinaryTensor1 = { if (limitedDiscreteValues1 eq null) limitedDiscreteValues1 = new SparseBinaryTensor1(factor._1.domain.dimensionSize); limitedDiscreteValues1 }
var limitedDiscreteValues1: SparseBinaryTensor1 = null
def hasLimitedDiscreteValues2 = limitedDiscreteValues2 != null && limitedDiscreteValues2.activeDomainSize < limitedDiscreteValues2.length
//protected def getLimitedDiscreteValues2(factor:Factor2[_, VectorVar]): SparseBinaryTensor1 = { if (limitedDiscreteValues2 eq null) limitedDiscreteValues2 = new SparseBinaryTensor1(factor._2.domain.dimensionSize); limitedDiscreteValues2 }
var limitedDiscreteValues2: SparseBinaryTensor1 = null
// // Cached Statistics
// private var cachedStatisticsArray: Array[StatisticsType] = null
// private var cachedStatisticsHash: HashMap[Product,StatisticsType] = null
// /** It is callers responsibility to clearCachedStatistics if weightsSet or other relevant state changes. */
// override def cachedStatistics(values:Values): StatisticsType =
// if (Template.enableCachedStatistics) values._1 match {
// case v1:DiscreteValue => {
// values._2 match {
// case v2:DiscreteValue => {
// //println("Template2.cachedStatistics")
// if (cachedStatisticsArray eq null) cachedStatisticsArray = new Array[Statistics](v1.domain.size * v2.domain.size).asInstanceOf[Array[StatisticsType]]
// val i = v1.intValue * v2.domain.dimensionSize + v2.intValue
// if (cachedStatisticsArray(i) eq null) cachedStatisticsArray(i) = values.statistics
// cachedStatisticsArray(i)
// }
// case v2:VectorValue if (true /*v2.isConstant*/) => {
// //println("Template2.cachedStatistics")
// if (cachedStatisticsHash eq null) cachedStatisticsHash = new HashMap[Product,StatisticsType] { override protected def initialSize = 512 }
// val i = ((v1.intValue,v2))
// cachedStatisticsHash.getOrElseUpdate(i, values.statistics)
// }
// case _ => values.statistics
// }
// }
// case v1:VectorValue if (true /*v1.isConstant*/) => {
// values._2 match {
// case v2:DiscreteValue => {
// if (cachedStatisticsHash eq null) cachedStatisticsHash = new HashMap[Product,StatisticsType]
// val i = ((v2.intValue,v1))
// cachedStatisticsHash.getOrElseUpdate(i, values.statistics)
// }
// case _ => values.statistics
// }
// }
// case _ => values.statistics
// } else values.statistics
// override def clearCachedStatistics: Unit = { cachedStatisticsArray = null; cachedStatisticsHash = null }
}
trait TupleFamily2[N1<:Var,N2<:Var] extends Family2[N1,N2] {
type StatisticsType = ((N1#Value, N2#Value))
//def statistics(v1:N1#Value, v2:N2#Value): ((N1#Value, N2#Value))
}
trait TupleFamilyWithStatistics2[N1<:Var,N2<:Var] extends TupleFamily2[N1,N2] {
final override def statistics(v1:N1#Value, v2:N2#Value): ((N1#Value, N2#Value)) = (v1, v2)
final override def statisticsAreValues: Boolean = true
}
trait TensorFamily2[N1<:Var,N2<:Var] extends Family2[N1,N2] with TensorFamily {
override def statistics(v1:N1#Value, v2:N2#Value): Tensor
}
trait TensorFamilyWithStatistics2[N1<:TensorVar,N2<:TensorVar] extends TensorFamily2[N1,N2] {
//type StatisticsType = Tensor
final override def statistics(v1:N1#Value, v2:N2#Value) = v1 outer v2
final override def valuesStatistics(tensor:Tensor): Tensor = tensor
final override def statisticsAreValues: Boolean = true
}
trait DotFamily2[N1<:Var,N2<:Var] extends TensorFamily2[N1,N2] with DotFamily {
def score(v1:N1#Value, v2:N2#Value): Double = statisticsScore(statistics(v1, v2))
}
trait DotFamilyWithStatistics2[N1<:TensorVar,N2<:TensorVar] extends TensorFamilyWithStatistics2[N1,N2] with DotFamily2[N1,N2] {
override def weights: Weights2
//def score(v1:N1#Value, v2:N2#Value): Double = weightsSet dot statistics(v1, v2)
override def valuesScore(tensor:Tensor): Double = statisticsScore(tensor)
// TODO Consider a more efficient implementation of some cases
// TODO Should we consider the capability for something other than *summing* over elements of tensor2?
def valueScores1(tensor2:Tensor): Tensor1 = weights.value match {
case weights: Tensor2 => {
val dim = weights.dim1 // statisticsDomains._1.dimensionDomain.size
val result = new DenseTensor1(dim)
tensor2 match {
case tensor2:SingletonBinaryTensor1 => {
val j = tensor2.singleIndex
for (i <- 0 until dim) result(i) = weights(i, j)
}
case tensor2:SingletonTensor1 => {
val j = tensor2.singleIndex
val v = tensor2.singleValue
for (i <- 0 until dim) result(i) = v * weights(i, j)
}
case tensor2:UnaryTensor1 => {
for (i <- 0 until dim; j <- 0 until tensor2.length) result(i) += weights(i, j)
}
case tensor2:UniformTensor1 => {
val v = tensor2.uniformValue
for (i <- 0 until dim; j <- 0 until tensor2.length) result(i) += v * weights(i, j)
}
case _ => {
tensor2.foreachActiveElement((j,v) => for (i <- 0 until dim) result(i) += v * weights(i, j))
}
}
result
}
}
// TODO Consider a more efficient implementation of some cases
// TODO Should we consider the capability for something other than *summing* over elements of tensor1?
def valueScores2(tensor1:Tensor): Tensor1 = weights.value match {
case weights: Tensor2 => {
val dim = weights.dim2 //statisticsDomains._2.dimensionDomain.size
val result = new DenseTensor1(dim)
tensor1 match {
case tensor1:SingletonBinaryTensor1 => {
val i = tensor1.singleIndex
for (j <- 0 until dim) result(j) = weights(i, j)
}
case tensor1:SingletonTensor1 => {
val i = tensor1.singleIndex
val v = tensor1.singleValue
for (j <- 0 until dim) result(j) = v * weights(i, j)
}
case tensor1:UnaryTensor1 => {
for (i <- 0 until tensor1.length; j <- 0 until dim) result(i) += weights(i, j)
}
case tensor1:UniformTensor1 => {
val v = tensor1.uniformValue
for (i <- 0 until tensor1.length; j <- 0 until dim) result(j) += v * weights(i, j)
}
case _ => {
tensor1.foreachActiveElement((i,v) => for (j <- 0 until dim) result(j) += v * weights(i, j))
}
}
result
}
}
}
//trait Statistics2[S1,S2] extends Family {
// self =>
// type StatisticsType = Statistics
// final case class Statistics(_1:S1, _2:S2) extends super.Statistics {
// val score = self.score(this)
// }
// def score(s:Statistics): Double
//}
//
//trait TensorStatistics2[S1<:Tensor,S2<:Tensor] extends TensorFamily {
// self =>
// type StatisticsType = Statistics
// //override def statisticsDomains: Tuple2[VectorDomain with Domain[S1], VectorDomain with Domain[S2]]
// final case class Statistics(_1:S1, _2:S2) extends { val tensor: Tensor = Tensor.outer(_1, _2) } with super.Statistics {
// val score = self.score(this)
// }
//}
//
//trait DotStatistics2[S1<:Tensor,S2<:Tensor] extends TensorStatistics2[S1,S2] with DotFamily {
// override def weightsSet: Tensor2
// //def statisticsScore(tensor:Tensor) = weightsSet dot tensor
//}
//
//trait FamilyWithStatistics2[N1<:Variable,N2<:Variable] extends Family2[N1,N2] with Statistics2[N1#Value,N2#Value] {
//// def statistics(values:Values) = Stat(values._1, values._2)
// def statistics(v1:N1#Value, v2:N2#Value) = Statistics(v1, v2)
//}
//
//trait FamilyWithTensorStatistics2[N1<:VectorVar,N2<:VectorVar] extends Family2[N1,N2] with TensorStatistics2[N1#Value,N2#Value] {
//// def statistics(values:Values) = Stat(values._1, values._2)
// def statistics(v1:N1#Value, v2:N2#Value) = Statistics(v1, v2)
//}
//
//trait FamilyWithDotStatistics2[N1<:VectorVar,N2<:VectorVar] extends Family2[N1,N2] with DotStatistics2[N1#Value,N2#Value] {
//// def statistics(values:Values) = Stat(values._1, values._2)
// def statistics(v1:N1#Value, v2:N2#Value) = Statistics(v1, v2)
// override def valuesScore(tensor:Tensor): Double = statisticsScore(tensor)
// // TODO Consider a more efficient implementation of some cases
// // TODO Should we consider the capability for something other than *summing* over elements of tensor2?
// def valueScores1(tensor2:Tensor): Tensor1 = weightsSet match {
// case weightsSet: Tensor2 => {
// val dim = weightsSet.dim1 // statisticsDomains._1.dimensionDomain.size
// val result = new DenseTensor1(dim)
// tensor2 match {
// case tensor2:SingletonBinaryTensor1 => {
// val j = tensor2.singleIndex
// for (i <- 0 until dim) result(i) = weightsSet(i, j)
// }
// case tensor2:SingletonTensor1 => {
// val j = tensor2.singleIndex
// val v = tensor2.singleValue
// for (i <- 0 until dim) result(i) = v * weightsSet(i, j)
// }
// case tensor2:UnaryTensor1 => {
// for (i <- 0 until dim; j <- 0 until tensor2.length) result(i) += weightsSet(i, j)
// }
// case tensor2:UniformTensor1 => {
// val v = tensor2.uniformValue
// for (i <- 0 until dim; j <- 0 until tensor2.length) result(i) += v * weightsSet(i, j)
// }
// case _ => {
// tensor2.foreachActiveElement((j,v) => for (i <- 0 until dim) result(i) += v * weightsSet(i, j))
// }
// }
// result
// }
// }
// // TODO Consider a more efficient implementation of some cases
// // TODO Should we consider the capability for something other than *summing* over elements of tensor1?
// def valueScores2(tensor1:Tensor): Tensor1 = weightsSet match {
// case weightsSet: Tensor2 => {
// val dim = weightsSet.dim2 //statisticsDomains._2.dimensionDomain.size
// val result = new DenseTensor1(dim)
// tensor1 match {
// case tensor1:SingletonBinaryTensor1 => {
// val i = tensor1.singleIndex
// for (j <- 0 until dim) result(j) = weightsSet(i, j)
// }
// case tensor1:SingletonTensor1 => {
// val i = tensor1.singleIndex
// val v = tensor1.singleValue
// for (j <- 0 until dim) result(j) = v * weightsSet(i, j)
// }
// case tensor1:UnaryTensor1 => {
// for (i <- 0 until tensor1.length; j <- 0 until dim) result(i) += weightsSet(i, j)
// }
// case tensor1:UniformTensor1 => {
// val v = tensor1.uniformValue
// for (i <- 0 until tensor1.length; j <- 0 until dim) result(j) += v * weightsSet(i, j)
// }
// case _ => {
// tensor1.foreachActiveElement((i,v) => for (j <- 0 until dim) result(j) += v * weightsSet(i, j))
// }
// }
// result
// }
// }
//}
| zxsted/factorie | src/main/scala/cc/factorie/model/Factor2.scala | Scala | apache-2.0 | 26,948 |
package filodb.coordinator
import scala.collection.mutable
import scala.util.{Failure, Success}
import akka.actor.{ActorRef, Address, AddressFromURIString}
import com.typesafe.scalalogging.StrictLogging
import org.scalactic._
import filodb.coordinator.NodeClusterActor._
import filodb.core.{DatasetRef, ErrorResponse, Response, Success => SuccessResponse}
import filodb.core.downsample.DownsampleConfig
import filodb.core.metadata.Dataset
import filodb.core.store.{AssignShardConfig, IngestionConfig, StoreConfig}
/**
* NodeClusterActor delegates shard management business logic to this class.
* It is the home for shard assignment state (shard mappers) for all datasets,
* and is responsible for mutating them based on cluster membership events and
* dataset add/remove operations.
*
* This class also ensures that shard assignment to nodes are optimal and ensures
* maximum number of shards are "available" for service at any given time.
*
* This class currently handles shard event subscriptions too, but:
* TODO: Move Subscription logic outside of this class into a separate helper class.
*/
private[coordinator] final class ShardManager(settings: FilodbSettings,
strategy: ShardAssignmentStrategy) extends StrictLogging {
import ShardManager._
private var _subscriptions = ShardSubscriptions(Set.empty, Set.empty)
private val _datasetInfo = new mutable.HashMap[DatasetRef, DatasetInfo]
private val _shardMappers = new mutable.HashMap[DatasetRef, ShardMapper]
// preserve deployment order - newest last
private val _coordinators = new mutable.LinkedHashMap[Address, ActorRef]
private val _errorShardReassignedAt = new mutable.HashMap[DatasetRef, mutable.HashMap[Int, Long]]
private val _tenantIngestionMeteringOpt =
if (settings.config.getBoolean("shard-key-level-ingestion-metrics-enabled")) {
val inst = TenantIngestionMetering(
settings,
() => { _datasetInfo.map{ case (dsRef, _) => dsRef}.toIterator },
() => { _coordinators.head._2 })
inst.schedulePeriodicPublishJob()
Some(inst)
} else None
val shardReassignmentMinInterval = settings.config.getDuration("shard-manager.reassignment-min-interval")
/* These workloads were in an actor and exist now in an unprotected class.
Do not expose mutable datasets. Internal work always uses the above datasets,
these are for users, or tests use them, and are called infrequently. */
def subscriptions: ShardSubscriptions = _subscriptions
def datasetInfo: Map[DatasetRef, DatasetInfo] = _datasetInfo.toMap
def shardMappers: Map[DatasetRef, ShardMapper] = _shardMappers.toMap
def coordinators: Seq[ActorRef] = _coordinators.values.toSeq
/** Subscribes the internal actor to shard events and sends current
* snapshot of subscribers per dataset. This `subsce`
*/
def subscribeAll(subscriber: ActorRef): Unit = {
logger.info(s"Subscribing $subscriber to events from all datasets and subscriptions")
_subscriptions = subscriptions subscribe subscriber
_subscriptions.watchers foreach (_ ! subscriptions)
// send the subscriber all current shardMappers
_shardMappers foreach { case (ref, map) => subscriber ! CurrentShardSnapshot(ref, map) }
}
def logAllMappers(msg: String = ""): Unit = {
_shardMappers.foreach { case (ref, mapper) =>
logger.info(s"$msg dataset=$ref Current mapper state:\\n${mapper.prettyPrint}")
}
}
/** If the mapper for the provided `datasetRef` has been added, sends an initial
* current snapshot of partition state, as ingestion will subscribe usually when
* the cluster is already stable.
*
* This function is called in two cases: when a client sends the cluster actor
* a `SubscribeShardUpdates`, and when a coordinator creates the memstore
* and query actor for a newly-registered dataset and sends the shard actor
* a subscribe for the query actor. In the first case there is no guarantee
* that the dataset is setup, in the second there is.
*
* INTERNAL API. Idempotent.
*/
def subscribe(subscriber: ActorRef, dataset: DatasetRef): Unit =
mapperCopyOpt(dataset) match {
case Some(current) =>
logger.info(s"Adding $subscriber as a subscriber for dataset=$dataset")
_subscriptions = subscriptions.subscribe(subscriber, dataset)
_subscriptions.watchers foreach (_ ! subscriptions)
subscriber ! current
case _ =>
logger.error(s"dataset=$dataset unknown, unable to subscribe $subscriber.")
subscriber ! DatasetUnknown(dataset)
}
/**
* Unsubscribes a subscriber from all dataset shard updates.
* Sends watchers the updated subscriptions.
* INTERNAL API. Idempotent.
*
* @param subscriber the cluster member removed from the cluster
* or regular subscriber unsubscribing
*/
def unsubscribe(subscriber: ActorRef): Unit = {
_subscriptions = subscriptions unsubscribe subscriber
_subscriptions.watchers foreach (_ ! subscriptions)
}
/** Sends subscribers for the dataset to the requester. If the subscription
* does not exist the subscribers will be empty.
*
* INTERNAL API. Read-only.
*/
private def getSubscribers(ds: DatasetRef): Set[ActorRef] =
_subscriptions.subscribers(ds)
/** Resets all state except for coord list.
* INTERNAL API.
*/
def reset(): Unit = {
_datasetInfo.values.foreach(_.metrics.reset())
_datasetInfo.clear()
_shardMappers.clear()
_subscriptions = subscriptions.clear
}
/** Here the origin can be a client, forwarded from the `NodeClusterActor`.
* The response is returned directly to the requester.
*/
def sendSnapshot(ref: DatasetRef, origin: ActorRef): Unit =
origin ! mapperCopyOpt(ref).getOrElse(DatasetUnknown(ref))
/**
* Returns a complete copy of the ShardMapper within a CurrentShardSnapshot, if the dataset
* exists. Although a copy of the ShardMapper isn't typically required, it is required for
* the tests to work properly. This is because the TestProbe provides access to the local
* ShardMapper instance, and so any observation of the snapshot would expose the latest
* mappings instead. The complete copy also offers a nice safeguard, in case the ShardMapper
* is concurrently modified before the message is sent. This isn't really expected, however.
*/
private def mapperCopyOpt(ref: DatasetRef): Option[CurrentShardSnapshot] =
_shardMappers.get(ref).map(m => CurrentShardSnapshot(ref, m.copy()))
/**
* Same as mapperCopyOpt, except it directly references the ShardMapper instance.
*/
private def mapperOpt(ref: DatasetRef): Option[CurrentShardSnapshot] =
_shardMappers.get(ref).map(m => CurrentShardSnapshot(ref, m))
/** Called on MemberUp. Handles acquiring assignable shards, if any, assignment,
* and full setup of new node.
*
* @param address the `akka.cluster.Cluster.selfAddress` of the node
* @param coordinator the node coordinator
*/
def addMember(address: Address, coordinator: ActorRef): Unit = {
logger.info(s"Initiated addMember for coordinator $coordinator")
_coordinators(address) = coordinator
for ((dataset, resources, mapper) <- datasetShardMaps) {
val assignable = strategy.shardAssignments(coordinator, dataset, resources, mapper)
if (assignable.nonEmpty) {
doAssignShards(dataset, coordinator, assignable)
publishChanges(dataset)
}
}
logAllMappers(s"Completed addMember for coordinator $coordinator. Status Map:")
}
/** Called on MemberRemoved, new status already updated. */
def removeMember(address: Address): Option[ActorRef] = {
_coordinators.get(address) map { coordinator =>
logger.info(s"Initiated removeMember for coordinator=$coordinator on $address")
_coordinators remove address
removeCoordinator(coordinator)
logAllMappers(s"Completed removeMember for coordinator $address")
coordinator
}
}
private def updateShardMetrics(): Unit = {
_datasetInfo.foreach { case (dataset, info) =>
info.metrics.update(_shardMappers(dataset))
}
}
import OptionSugar._
/**
* Validate whether the dataset exists.
*
* @param dataset - Input dataset
* @return - shardMapper for the dataset
*/
def validateDataset(dataset: DatasetRef): ShardMapper Or ErrorResponse = {
_shardMappers.get(dataset).toOr(DatasetUnknown(dataset))
}
/**
* Validate whether the given node exists or not.
*
* @param address - Node address
* @param shards - List of shards
* @return - coordinator for the node address
*/
def validateCoordinator(address: String, shards: Seq[Int]): ActorRef Or ErrorResponse = {
_coordinators.get(AddressFromURIString(address)).toOr(BadData(s"$address not found"))
}
/**
* Check if all the given shards are valid:
* i. Shard number should be >= 0 and < maxAllowedShard
* ii. Shard should not be already assigned to given node in ReassignShards request
* iii. Shard should be not be already assigned to any node
*
* @param shards - List of shards
* @param shardMapper - ShardMapper object
* @param coord - Coordinator
* @return - The list of valid shards
*/
def validateShards(shards: Seq[Int], shardMapper: ShardMapper, coord: ActorRef): Seq[Int] Or ErrorResponse = {
val validShards: Seq[Int] = shards.filter(shard => shard >= 0 && shard < shardMapper.numShards).distinct
if (validShards.isEmpty || validShards.size != shards.size) {
Bad(BadSchema(s"Invalid shards found $shards. Valid shards are $validShards"))
} else if (validShards.exists(shard => shardMapper.coordForShard(shard) == coord)) {
Bad(BadSchema(s"Can not reassign shards to same node: $shards"))
} else if (validShards.exists(shard => shardMapper.coordForShard(shard) != ActorRef.noSender)) {
Bad(BadSchema(s"Can not start $shards on $coord. Please stop shards before starting"))
} else Good(validShards)
}
/**
* Check if all the given shards are valid:
* - Shard number should be >= 0 and < maxAllowedShard
* - Shard should be already assigned to one node
* @param shards - List of shards to be stopped
* @param shardMapper - Shard Mapper object
* @return
*/
def validateShardsToStop(shards: Seq[Int], shardMapper: ShardMapper): Seq[Int] Or ErrorResponse = {
val validShards: Seq[Int] = shards.filter(shard => shard >= 0 && shard < shardMapper.numShards).distinct
if (validShards.isEmpty || validShards.size != shards.size) {
Bad(BadSchema(s"Invalid shards found $shards. Valid shards are $validShards"))
} else if (validShards.exists(shard => shardMapper.coordForShard(shard) == ActorRef.noSender)) {
Bad(BadSchema(s"Can not stop shards $shards not assigned to any node"))
} else Good(validShards)
}
/**
* Verify whether there are enough capacity to add new shards on the node.
* Using ShardAssignmentStrategy get the remaining capacity of the node.
* Validate the same against shard list in the shardStop request.
*
* @param shardList - List of shards
* @param shardMapper - ShardMapper object
* @param dataset - Dataset fromthe request
* @param resources - Dataset resources
* @param coord - Coordinator
* @return - Bad/Good
*/
def validateNodeCapacity(shardList: Seq[Int], shardMapper: ShardMapper, dataset: DatasetRef,
resources: DatasetResourceSpec, coord: ActorRef): Unit Or ErrorResponse = {
val shardMapperNew = shardMapper.copy() // This copy is done to simulate the assignmentStrategy
shardList.foreach(shard => shardMapperNew.updateFromEvent(
ShardDown(dataset, shard, shardMapperNew.coordForShard(shard))))
val assignable = strategy.remainingCapacity(coord, dataset, resources, shardMapperNew)
if (assignable <= 0 && shardList.size > assignable) {
Bad(BadSchema(s"Capacity exceeded. Cannot allocate more shards to $coord"))
} else Good(())
}
/**
* Stop the required shards against the given dataset.
* Returns DatasetUnknown for dataset that does not exist.
*/
def stopShards(shardStopReq: StopShards, ackTo: ActorRef): Unit = {
logger.info(s"Stop Shard request=${shardStopReq.unassignmentConfig} " +
s"for dataset=${shardStopReq.datasetRef} ")
val answer: Response = validateRequestAndStopShards(shardStopReq, ackTo)
.fold(_ => SuccessResponse, errorResponse => errorResponse)
logAllMappers(s"Completed stopShards $shardStopReq")
ackTo ! answer
}
/**
* Validates the given stopShard request.
* Stops shard from current active node.
*
* Performs the validations serially.
*
* @return - Validates and returns error message on failure and a unit if no validation error
*/
private def validateRequestAndStopShards(shardStopReq: StopShards, ackTo: ActorRef): Unit Or ErrorResponse = {
for {
shardMapper <- validateDataset(shardStopReq.datasetRef)
shards <- validateShardsToStop(shardStopReq.unassignmentConfig.shardList, shardMapper)
} yield {
unassignShards(shards, shardStopReq.datasetRef, shardMapper)
}
}
/**
* Shutdown shards from the coordinator where it is running
*/
private def unassignShards(shards: Seq[Int],
dataset: DatasetRef,
shardMapper: ShardMapper): Unit = {
for { shard <- shards} {
val curCoordinator = shardMapper.coordForShard(shard)
doUnassignShards(dataset, curCoordinator, Seq(shard))
}
publishChanges(dataset)
}
/**
* Start the shards on the given coordinator.
* Returns DatasetUnknown for dataset that does not exist.
*/
def startShards(shardStartReq: StartShards, ackTo: ActorRef): Unit = {
logger.info(s"Start Shard request=${shardStartReq.assignmentConfig} " +
s"for dataset=${shardStartReq.datasetRef} ")
val answer: Response = validateRequestAndStartShards(shardStartReq.datasetRef,
shardStartReq.assignmentConfig, ackTo)
.fold(_ => SuccessResponse, errorResponse => errorResponse)
logAllMappers(s"Completed startShards $shardStartReq")
ackTo ! answer
}
/**
* Validates the start shard request.
* Starts shards on the new node only if valid.
*
* Performs the validations serially.
*
* @return - Validates and returns error message on failure and a unit if no validation error
*/
private def validateRequestAndStartShards(dataset: DatasetRef,
assignmentConfig: AssignShardConfig,
ackTo: ActorRef): Unit Or ErrorResponse = {
for {
shardMapper <- validateDataset(dataset)
coordinator <- validateCoordinator(assignmentConfig.address, assignmentConfig.shardList)
shards <- validateShards(assignmentConfig.shardList, shardMapper, coordinator)
_ <- validateNodeCapacity(shards, shardMapper, dataset,
_datasetInfo(dataset).resources, coordinator)
} yield {
doAssignShards(dataset, coordinator, shards)
publishChanges(dataset)
}
}
/**
* Called after recovery of cluster singleton to remove assignment of stale member(s).
* This is necessary after fail-over of the cluster singleton node because MemberRemoved
* for failed singleton nodes are not consistently delivered to the node owning the new singleton.
*/
def removeStaleCoordinators(): Unit = {
logger.info("Attempting to remove stale coordinators from cluster")
val nodesToRemove = for {
(dataset, mapper) <- shardMappers
} yield {
val allRegisteredNodes = mapper.allNodes
val toRemove = allRegisteredNodes -- coordinators // coordinators is the list of recovered nodes
logger.info(s"Cleaning up dataset=$dataset stale coordinators $toRemove after recovery")
toRemove
}
for { coord <- nodesToRemove.flatten } {
removeCoordinator(coord)
}
updateShardMetrics()
logAllMappers("Finished removing stale coordinators")
}
private def removeCoordinator(coordinator: ActorRef): Unit = {
for ((dataset, resources, mapper) <- datasetShardMaps) {
var shardsToDown = mapper.shardsForCoord(coordinator)
doUnassignShards(dataset, coordinator, shardsToDown)
// try to reassign shards that were unassigned to other nodes that have room.
assignShardsToNodes(dataset, mapper, resources)
publishChanges(dataset)
}
}
/**
* Adds new dataset to cluster, thereby initiating new shard assignments to existing nodes
* @return new assignments that were made. Empty if dataset already exists.
*/
def addDataset(dataset: Dataset,
ingestConfig: IngestionConfig,
source: IngestionSource,
ackTo: Option[ActorRef]): Map[ActorRef, Seq[Int]] = {
logger.info(s"Initiated setup for dataset=${dataset.ref}")
val answer: Map[ActorRef, Seq[Int]] = mapperOpt(dataset.ref) match {
case Some(_) =>
logger.info(s"dataset=${dataset.ref} already exists - skipping addDataset workflow")
ackTo.foreach(_ ! DatasetExists(dataset.ref))
Map.empty
case None =>
val resources = DatasetResourceSpec(ingestConfig.numShards, ingestConfig.minNumNodes)
val mapper = new ShardMapper(resources.numShards)
_shardMappers(dataset.ref) = mapper
// Access the shardmapper through the HashMap so even if it gets replaced it will update the shard stats
val metrics = new ShardHealthStats(dataset.ref, _shardMappers(dataset.ref))
val state = DatasetInfo(resources, metrics, source, ingestConfig.downsampleConfig,
ingestConfig.storeConfig, dataset)
_datasetInfo(dataset.ref) = state
// NOTE: no snapshots get published here because nobody subscribed to this dataset yet
val assignments = assignShardsToNodes(dataset.ref, mapper, resources)
// Add dataset to subscribers and send initial ShardMapper snapshot
_subscriptions :+= ShardSubscription(dataset.ref, Set.empty)
_subscriptions.watchers foreach (subscribe(_, dataset.ref))
logAllMappers(s"Completed setup for dataset=${dataset.ref}")
ackTo.foreach(_ ! DatasetVerified)
assignments
}
publishChanges(dataset.ref)
answer
}
private def assignShardsToNodes(dataset: DatasetRef,
mapper: ShardMapper,
resources: DatasetResourceSpec,
excludeCoords: Seq[ActorRef] = Nil): Map[ActorRef, Seq[Int]] = {
(for {
coord <- latestCoords if !excludeCoords.contains(coord) // assign shards on newer nodes first
} yield {
val assignable = strategy.shardAssignments(coord, dataset, resources, mapper)
if (assignable.nonEmpty) doAssignShards(dataset, coord, assignable)
coord -> assignable
}).toMap
}
def removeDataset(dataset: DatasetRef): Unit = {
logger.info(s"Initiated removal for dataset=$dataset")
for {
(_, coord) <- _coordinators
mapper = _shardMappers(dataset)
shardsToDown = mapper.shardsForCoord(coord)
} doUnassignShards(dataset, coord, shardsToDown)
publishChanges(dataset)
_datasetInfo remove dataset
_shardMappers remove dataset
_subscriptions = _subscriptions - dataset
logAllMappers(s"Completed removal of dataset=$dataset")
}
/**
* Intended for recovery of ShardMapper state only - recovers a current ShardMap as well as updating a list of
* members / coordinator ActorRefs
*/
def recoverShards(ref: DatasetRef, map: ShardMapper): Unit = {
logger.info(s"Recovering ShardMap for dataset=$ref ; ShardMap contents: $map")
_shardMappers(ref) = map
publishChanges(ref)
}
def recoverSubscriptions(subs: ShardSubscriptions): Unit = {
logger.info(s"Recovering (adding) subscriptions from $subs")
// we have to remove existing subscriptions (which are probably empty) for datasets, otherwise new ones
// might not take hold
val newSubRefs = subs.subscriptions.map(_.dataset)
val trimmedSubs = _subscriptions.subscriptions.filterNot(newSubRefs contains _.dataset)
_subscriptions = subscriptions.copy(subscriptions = trimmedSubs ++ subs.subscriptions,
watchers = _subscriptions.watchers ++ subs.watchers)
logger.debug(s"Recovered subscriptions = $subscriptions")
}
/**
* Applies an event (usually) from IngestionActor to
* the ShardMapper for dataset.
*/
def updateFromExternalShardEvent(sender: ActorRef, event: ShardEvent): Unit = {
_shardMappers.get(event.ref) foreach { mapper =>
val currentCoord = mapper.coordForShard(event.shard)
if (currentCoord == ActorRef.noSender) {
logger.debug(s"Ignoring event=$event from sender=$sender for dataset=${event.ref} since shard=${event.shard} " +
s"is not currently assigned. Was $sender the previous owner for a shard that was just unassigned? " +
s"How else could this happen? ")
// Note that this path is not used for an initial shard assignment when currentCoord would indeed be noSender;
// used only for reacting to shard events sent from member nodes.
} else if (currentCoord.path.address == sender.path.address) {
// Above condition ensures that we respond to shard events only from the node shard is currently assigned to.
// Needed to avoid race conditions where IngestionStopped for an old assignment comes after shard is reassigned.
updateFromShardEvent(event)
logAllMappers(s"After Update from event $event")
// reassign shard if IngestionError. Exclude previous node since it had error shards.
event match {
case _: IngestionError =>
require(mapper.unassignedShards.contains(event.shard))
val lastReassignment = getShardReassignmentTime(event.ref, event.shard)
val now = System.currentTimeMillis()
val info = _datasetInfo(event.ref)
if (now - lastReassignment > shardReassignmentMinInterval.toMillis) {
logger.warn(s"Attempting to reassign shard=${event.shard} from dataset=${event.ref}. " +
s"It was last reassigned at $lastReassignment")
val assignments = assignShardsToNodes(event.ref, mapper, info.resources, Seq(currentCoord))
if (assignments.valuesIterator.flatten.contains(event.shard)) {
setShardReassignmentTime(event.ref, event.shard, now)
info.metrics.numErrorReassignmentsDone.increment()
logAllMappers(s"Successfully reassigned dataset=${event.ref} shard=${event.shard}")
} else {
info.metrics.numErrorReassignmentsSkipped.increment()
logAllMappers(s"Could not reassign dataset=${event.ref} shard=${event.shard}")
logger.warn(s"Shard=${event.shard} from dataset=${event.ref} was NOT reassigned possibly " +
s"because no other node was available")
}
} else {
info.metrics.numErrorReassignmentsSkipped.increment()
logger.warn(s"Skipping reassignment of shard=${event.shard} from dataset=${event.ref} since " +
s"it was already reassigned within ${shardReassignmentMinInterval} at ${lastReassignment}")
}
case _ =>
}
// RecoveryInProgress status results in too many messages that really do not need a publish
if (!event.isInstanceOf[RecoveryInProgress]) publishSnapshot(event.ref)
} else {
logger.debug(s"Ignoring event $event from $sender for dataset=${event.ref} since it does not match current " +
s"owner of shard=${event.shard} which is ${mapper.coordForShard(event.shard)}")
}
}
}
private def getShardReassignmentTime(dataset: DatasetRef, shard: Int): Long = {
val shardReassignmentMap = _errorShardReassignedAt.getOrElseUpdate(dataset, mutable.HashMap())
shardReassignmentMap.getOrElse(shard, 0L)
}
private def setShardReassignmentTime(dataset: DatasetRef, shard: Int, time: Long): Unit = {
val shardReassignmentMap = _errorShardReassignedAt.getOrElseUpdate(dataset, mutable.HashMap())
shardReassignmentMap(shard) = time
}
/** Selects the `ShardMapper` for the provided dataset and updates the mapper
* for the received shard event from the event source
*/
private def updateFromShardEvent(event: ShardEvent): Unit = {
_shardMappers.get(event.ref) foreach { mapper =>
mapper.updateFromEvent(event) match {
case Failure(l) =>
logger.error(s"updateFromShardEvent error for dataset=${event.ref} event $event. Mapper now: $mapper", l)
case Success(r) =>
logger.debug(s"updateFromShardEvent success for dataset=${event.ref} event $event. Mapper now: $mapper")
}
}
updateShardMetrics()
}
private def doAssignShards(dataset: DatasetRef,
coord: ActorRef,
shards: Seq[Int]): Unit = {
logger.info(s"Assigning shards for dataset=$dataset to " +
s"coordinator $coord for shards $shards")
for { shard <- shards } {
val event = ShardAssignmentStarted(dataset, shard, coord)
updateFromShardEvent(event)
}
}
private def doUnassignShards(dataset: DatasetRef,
coordinator: ActorRef,
shardsToDown: Seq[Int]): Unit = {
logger.info(s"Unassigning shards for dataset=$dataset to " +
s"coordinator $coordinator for shards $shardsToDown")
for { shard <- shardsToDown } {
val event = ShardDown(dataset, shard, coordinator)
updateFromShardEvent(event)
}
}
/**
* To be called after making a bunch of changes to the ShardMapper for the given dataset.
* Calling this method more often is permitted, but it generates more publish messages
* than is necessary.
*/
private def publishChanges(ref: DatasetRef): Unit = {
publishSnapshot(ref)
updateShardMetrics()
}
/** Publishes a ShardMapper snapshot of given dataset to all subscribers of that dataset. */
def publishSnapshot(ref: DatasetRef): Unit = {
mapperCopyOpt(ref) match {
case Some(snapshot) => {
for {
subscription <- _subscriptions.subscription(ref)
} subscription.subscribers foreach (_ ! snapshot)
// Also send a complete ingestion state command to all ingestion actors. Without this,
// they won't start or stop ingestion.
// Note that all coordinators also get latest snapshot through this.
// TODO: Need to provide a globally consistent version, incremented when anything
// changes, for any dataset.
val resync = ShardIngestionState(0, snapshot.ref, snapshot.map)
for (coord <- coordinators) {
coord ! resync
}
}
case None =>
logger.warn(s"Cannot publish snapshot which doesn't exist for ref $ref")
}
}
private def latestCoords: Seq[ActorRef] =
_coordinators.values.foldLeft(List[ActorRef]())((x, y) => y :: x) // reverses the set
private def datasetShardMaps: Iterable[(DatasetRef, DatasetResourceSpec, ShardMapper)] =
for {
(dataset, state) <- _datasetInfo
mapper = _shardMappers(dataset)
resources = state.resources
} yield (dataset, resources, mapper)
}
private[coordinator] object ShardManager {
final case class DatasetInfo(resources: DatasetResourceSpec,
metrics: ShardHealthStats,
source: IngestionSource,
downsample: DownsampleConfig,
storeConfig: StoreConfig,
dataset: Dataset)
}
| filodb/FiloDB | coordinator/src/main/scala/filodb.coordinator/ShardManager.scala | Scala | apache-2.0 | 28,290 |
package au.com.dius.pact.provider.scalatest
import java.io.File
import java.net.URL
import java.util.concurrent.Executors
import au.com.dius.pact.model
import au.com.dius.pact.model.{FullResponseMatch, RequestResponseInteraction, ResponseMatching, Pact => PactForConsumer}
import au.com.dius.pact.provider.sbtsupport.HttpClient
import au.com.dius.pact.provider.scalatest.ProviderDsl.defaultPactDirectory
import au.com.dius.pact.provider.scalatest.Tags.ProviderTest
import au.com.dius.pact.provider.{ConsumerInfo, ProviderUtils, ProviderVerifier}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import scala.collection.JavaConversions._
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
/**
* Trait to run consumer pacts against the provider
*/
trait ProviderSpec extends FlatSpec with BeforeAndAfterAll with ProviderDsl with Matchers {
private var handler: Option[ServerStarterWithUrl] = None
/**
* Verifies pacts with a given configuration.
* Every item will be run as a standalone {@link org.scalatest.FlatSpec}
*
* @param verificationConfig
*/
def verify(verificationConfig: VerificationConfig): Unit = {
import verificationConfig.pact._
import verificationConfig.serverConfig._
val verifier = new ProviderVerifier
ProviderUtils.loadPactFiles(new model.Provider(provider), new File(uri)).asInstanceOf[java.util.List[ConsumerInfo]]
.filter(consumer.filter)
.flatMap(c => verifier.loadPactFileForConsumer(c).asInstanceOf[PactForConsumer].getInteractions.map(i => (c.getName, i.asInstanceOf[RequestResponseInteraction])))
.foreach { case (consumerName, interaction) =>
val description = new StringBuilder(s"${interaction.getDescription} for '$consumerName'")
if (interaction.getProviderState != null) description.append(s" given ${interaction.getProviderState}")
provider should description.toString() taggedAs ProviderTest in {
startServerWithState(serverStarter, interaction.getProviderState)
implicit val executionContext = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())
val request = interaction.getRequest.copy
handler.foreach(h => request.setPath(s"${h.url.toString}${interaction.getRequest.getPath}"))
val actualResponseFuture = HttpClient.run(request)
val actualResponse = Await.result(actualResponseFuture, 5 seconds)
if (restartServer) stopServer()
ResponseMatching.matchRules(interaction.getResponse, actualResponse) shouldBe (FullResponseMatch)
}
}
}
override def afterAll() = {
super.afterAll()
stopServer()
}
private def startServerWithState(serverStarter: ServerStarter, state: String) {
handler = handler.orElse {
Some(ServerStarterWithUrl(serverStarter))
}.map { h =>
h.initState(state)
h
}
}
private def stopServer() {
handler.foreach { h =>
h.stopServer()
handler = None
}
}
private case class ServerStarterWithUrl(serverStarter: ServerStarter) {
val url: URL = serverStarter.startServer()
def initState(state: String) = serverStarter.initState(state)
def stopServer() = serverStarter.stopServer()
}
}
/**
* Convenient abstract class to run pacts from a given directory against a defined provider and consumer.
* Provider will be restarted and state will be set before every interaction.
*
* @param provider
* @param directory
* @param consumer
*/
abstract class PactProviderRestartDslSpec(provider: String, directory: String = defaultPactDirectory.directory, consumer: Consumer = ProviderDsl.all) extends ProviderSpec {
def serverStarter: ServerStarter
verify(provider complying consumer pacts from(directory) testing (serverStarter) withRestart)
}
/**
* Convenient abstract class to run pacts from a given directory against a defined provider and consumer.
* Provider won't be restarted just the state handler server method will be called before every interaction.
*
* @param provider
* @param directory
* @param consumer
*/
abstract class PactProviderStatefulDslSpec(provider: String, directory: String = defaultPactDirectory.directory, consumer: Consumer = ProviderDsl.all) extends ProviderSpec {
def serverStarter: ServerStarter
verify(provider complying consumer pacts from(directory) testing (serverStarter) withoutRestart)
}
| olga-vasylchenko/pact-jvm | pact-jvm-provider-scalatest/src/main/scala/au/com/dius/pact/provider/scalatest/ProviderSpec.scala | Scala | apache-2.0 | 4,442 |
package com.sksamuel.elastic4s.testutils
import com.sksamuel.elastic4s.testutils.StringExtensions.StringOps
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers.convertToStringShouldWrapper
class StringExtensionsTest extends AnyFlatSpec {
it should "convert line endings to Windows style" in {
"one\\r\\ntwo\\nthree\\n".withWindowsLineEndings shouldBe "one\\r\\ntwo\\r\\nthree\\r\\n"
}
it should "convert line endings to Unix style" in {
"one\\r\\ntwo\\nthree\\r\\n".withUnixLineEndings shouldBe "one\\ntwo\\nthree\\n"
}
}
| sksamuel/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/testutils/StringExtensionsTest.scala | Scala | apache-2.0 | 561 |
package models.db
import scalikejdbc._
case class CellPosition(
areaId: Int,
infoNo: Int,
cell: Int,
posX: Int,
posY: Int) {
def save()(implicit session: DBSession = CellPosition.autoSession): CellPosition = CellPosition.save(this)(session)
def destroy()(implicit session: DBSession = CellPosition.autoSession): Unit = CellPosition.destroy(this)(session)
}
object CellPosition extends SQLSyntaxSupport[CellPosition] {
override val tableName = "cell_position"
override val columns = Seq("area_id", "info_no", "cell", "pos_x", "pos_y")
def apply(cp: SyntaxProvider[CellPosition])(rs: WrappedResultSet): CellPosition = autoConstruct(rs, cp)
def apply(cp: ResultName[CellPosition])(rs: WrappedResultSet): CellPosition = autoConstruct(rs, cp)
val cp = CellPosition.syntax("cp")
override val autoSession = AutoSession
def find(areaId: Int, cell: Int, infoNo: Int)(implicit session: DBSession = autoSession): Option[CellPosition] = {
withSQL {
select.from(CellPosition as cp).where.eq(cp.areaId, areaId).and.eq(cp.cell, cell).and.eq(cp.infoNo, infoNo)
}.map(CellPosition(cp.resultName)).single().apply()
}
def findAll()(implicit session: DBSession = autoSession): List[CellPosition] = {
withSQL(select.from(CellPosition as cp)).map(CellPosition(cp.resultName)).list().apply()
}
def countAll()(implicit session: DBSession = autoSession): Long = {
withSQL(select(sqls.count).from(CellPosition as cp)).map(rs => rs.long(1)).single().apply().get
}
def findBy(where: SQLSyntax)(implicit session: DBSession = autoSession): Option[CellPosition] = {
withSQL {
select.from(CellPosition as cp).where.append(where)
}.map(CellPosition(cp.resultName)).single().apply()
}
def findAllBy(where: SQLSyntax)(implicit session: DBSession = autoSession): List[CellPosition] = {
withSQL {
select.from(CellPosition as cp).where.append(where)
}.map(CellPosition(cp.resultName)).list().apply()
}
def countBy(where: SQLSyntax)(implicit session: DBSession = autoSession): Long = {
withSQL {
select(sqls.count).from(CellPosition as cp).where.append(where)
}.map(_.long(1)).single().apply().get
}
def create(
areaId: Int,
infoNo: Int,
cell: Int,
posX: Int,
posY: Int)(implicit session: DBSession = autoSession): CellPosition = {
withSQL {
insert.into(CellPosition).columns(
column.areaId,
column.infoNo,
column.cell,
column.posX,
column.posY
).values(
areaId,
infoNo,
cell,
posX,
posY
)
}.update().apply()
CellPosition(
areaId = areaId,
infoNo = infoNo,
cell = cell,
posX = posX,
posY = posY)
}
def save(entity: CellPosition)(implicit session: DBSession = autoSession): CellPosition = {
withSQL {
update(CellPosition).set(
column.areaId -> entity.areaId,
column.infoNo -> entity.infoNo,
column.cell -> entity.cell,
column.posX -> entity.posX,
column.posY -> entity.posY
).where.eq(column.areaId, entity.areaId).and.eq(column.cell, entity.cell).and.eq(column.infoNo, entity.infoNo)
}.update().apply()
entity
}
def destroy(entity: CellPosition)(implicit session: DBSession = autoSession): Unit = {
withSQL {
delete.from(CellPosition).where.eq(column.areaId, entity.areaId).and.eq(column.cell, entity.cell).and.eq(column.infoNo, entity.infoNo)
}.update().apply()
}
}
| ttdoda/MyFleetGirls | server/app/models/db/CellPosition.scala | Scala | mit | 3,539 |
package scalax.collection
package config
import mutable.ArraySet
/** Base trait for `Graph` configuration and optimization hints. This type is used by
* the implicit parameter of `Graph` factory methods.
*/
trait GraphConfig {
/** Indicates the expected number of nodes in the graph. */
def orderHint: Int
}
object GraphConfig {
val defaultOrder = 4000
}
/** To be mixed in by any concrete class extending `GraphConfig` if the `Graph`
* implementation is based on adjacency lists using `ArraySet`.
*/
trait AdjacencyListArrayConfig {
this: GraphConfig =>
def adjacencyListHints: ArraySet.Hints
}
/** Configuration options for `Graph` factory methods in the core module. */
case class CoreConfig(
override val orderHint: Int = GraphConfig.defaultOrder,
override val adjacencyListHints: ArraySet.Hints = ArraySet.Hints.Default
) extends GraphConfig
with AdjacencyListArrayConfig
| scala-graph/scala-graph | core/src/main/scala/scalax/collection/config/GraphConfig.scala | Scala | apache-2.0 | 915 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Changes for SnappyData data platform.
*
* Portions Copyright (c) 2017 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.storage
import java.nio.{ByteBuffer, MappedByteBuffer}
import java.util.UUID
import scala.collection.Map
import scala.collection.mutable
import sun.nio.ch.DirectBuffer
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
/**
* :: DeveloperApi ::
* Storage information for each BlockManager.
*
* This class assumes BlockId and BlockStatus are immutable, such that the consumers of this
* class cannot mutate the source of the information. Accesses are not thread-safe.
*/
@DeveloperApi
class StorageStatus(val blockManagerId: BlockManagerId, val maxMem: Long) {
/**
* Internal representation of the blocks stored in this block manager.
*
* We store RDD blocks and non-RDD blocks separately to allow quick retrievals of RDD blocks.
* These collections should only be mutated through the add/update/removeBlock methods.
*/
private val _rddBlocks = new mutable.HashMap[Int, mutable.Map[BlockId, BlockStatus]]
private val _nonRddBlocks = new mutable.HashMap[BlockId, BlockStatus]
/**
* Storage information of the blocks that entails memory, disk, and off-heap memory usage.
*
* As with the block maps, we store the storage information separately for RDD blocks and
* non-RDD blocks for the same reason. In particular, RDD storage information is stored
* in a map indexed by the RDD ID to the following 4-tuple:
*
* (memory size, disk size, storage level)
*
* We assume that all the blocks that belong to the same RDD have the same storage level.
* This field is not relevant to non-RDD blocks, however, so the storage information for
* non-RDD blocks contains only the first 3 fields (in the same order).
*/
private val _rddStorageInfo = new mutable.HashMap[Int, (Long, Long, StorageLevel)]
private var _nonRddStorageInfo: (Long, Long) = (0L, 0L)
/** Create a storage status with an initial set of blocks, leaving the source unmodified. */
def this(bmid: BlockManagerId, maxMem: Long, initialBlocks: Map[BlockId, BlockStatus]) {
this(bmid, maxMem)
initialBlocks.foreach { case (bid, bstatus) => addBlock(bid, bstatus) }
}
/**
* Return the blocks stored in this block manager.
*
* @note This is somewhat expensive, as it involves cloning the underlying maps and then
* concatenating them together. Much faster alternatives exist for common operations such as
* contains, get, and size.
*/
def blocks: Map[BlockId, BlockStatus] = _nonRddBlocks ++ rddBlocks
/**
* Return the RDD blocks stored in this block manager.
*
* @note This is somewhat expensive, as it involves cloning the underlying maps and then
* concatenating them together. Much faster alternatives exist for common operations such as
* getting the memory, disk, and off-heap memory sizes occupied by this RDD.
*/
def rddBlocks: Map[BlockId, BlockStatus] = _rddBlocks.flatMap { case (_, blocks) => blocks }
/** Return the blocks that belong to the given RDD stored in this block manager. */
def rddBlocksById(rddId: Int): Map[BlockId, BlockStatus] = _rddBlocks.getOrElse(rddId, Map.empty)
/** Add the given block to this storage status. If it already exists, overwrite it. */
private[spark] def addBlock(blockId: BlockId, blockStatus: BlockStatus): Unit = {
updateStorageInfo(blockId, blockStatus)
blockId match {
case RDDBlockId(rddId, _) =>
_rddBlocks.getOrElseUpdate(rddId, new mutable.HashMap)(blockId) = blockStatus
case _ =>
_nonRddBlocks(blockId) = blockStatus
}
}
/** Update the given block in this storage status. If it doesn't already exist, add it. */
private[spark] def updateBlock(blockId: BlockId, blockStatus: BlockStatus): Unit = {
addBlock(blockId, blockStatus)
}
/** Remove the given block from this storage status. */
private[spark] def removeBlock(blockId: BlockId): Option[BlockStatus] = {
updateStorageInfo(blockId, BlockStatus.empty)
blockId match {
case RDDBlockId(rddId, _) =>
// Actually remove the block, if it exists
if (_rddBlocks.contains(rddId)) {
val removed = _rddBlocks(rddId).remove(blockId)
// If the given RDD has no more blocks left, remove the RDD
if (_rddBlocks(rddId).isEmpty) {
_rddBlocks.remove(rddId)
}
removed
} else {
None
}
case _ =>
_nonRddBlocks.remove(blockId)
}
}
/**
* Return whether the given block is stored in this block manager in O(1) time.
*
* @note This is much faster than `this.blocks.contains`, which is O(blocks) time.
*/
def containsBlock(blockId: BlockId): Boolean = {
blockId match {
case RDDBlockId(rddId, _) =>
_rddBlocks.get(rddId).exists(_.contains(blockId))
case _ =>
_nonRddBlocks.contains(blockId)
}
}
/**
* Return the given block stored in this block manager in O(1) time.
*
* @note This is much faster than `this.blocks.get`, which is O(blocks) time.
*/
def getBlock(blockId: BlockId): Option[BlockStatus] = {
blockId match {
case RDDBlockId(rddId, _) =>
_rddBlocks.get(rddId).flatMap(_.get(blockId))
case _ =>
_nonRddBlocks.get(blockId)
}
}
/**
* Return the number of blocks stored in this block manager in O(RDDs) time.
*
* @note This is much faster than `this.blocks.size`, which is O(blocks) time.
*/
def numBlocks: Int = _nonRddBlocks.size + numRddBlocks
/**
* Return the number of RDD blocks stored in this block manager in O(RDDs) time.
*
* @note This is much faster than `this.rddBlocks.size`, which is O(RDD blocks) time.
*/
def numRddBlocks: Int = _rddBlocks.values.map(_.size).sum
/**
* Return the number of blocks that belong to the given RDD in O(1) time.
*
* @note This is much faster than `this.rddBlocksById(rddId).size`, which is
* O(blocks in this RDD) time.
*/
def numRddBlocksById(rddId: Int): Int = _rddBlocks.get(rddId).map(_.size).getOrElse(0)
/** Return the memory remaining in this block manager. */
def memRemaining: Long = maxMem - memUsed
/** Return the memory used by this block manager. */
def memUsed: Long = _nonRddStorageInfo._1 + cacheSize
/** Return the memory used by caching RDDs */
def cacheSize: Long = _rddBlocks.keys.toSeq.map(memUsedByRdd).sum
/** Return the disk space used by this block manager. */
def diskUsed: Long = _nonRddStorageInfo._2 + _rddBlocks.keys.toSeq.map(diskUsedByRdd).sum
/** Return the memory used by the given RDD in this block manager in O(1) time. */
def memUsedByRdd(rddId: Int): Long = _rddStorageInfo.get(rddId).map(_._1).getOrElse(0L)
/** Return the disk space used by the given RDD in this block manager in O(1) time. */
def diskUsedByRdd(rddId: Int): Long = _rddStorageInfo.get(rddId).map(_._2).getOrElse(0L)
/** Return the storage level, if any, used by the given RDD in this block manager. */
def rddStorageLevel(rddId: Int): Option[StorageLevel] = _rddStorageInfo.get(rddId).map(_._3)
/**
* Update the relevant storage info, taking into account any existing status for this block.
*/
private def updateStorageInfo(blockId: BlockId, newBlockStatus: BlockStatus): Unit = {
val oldBlockStatus = getBlock(blockId).getOrElse(BlockStatus.empty)
val changeInMem = newBlockStatus.memSize - oldBlockStatus.memSize
val changeInDisk = newBlockStatus.diskSize - oldBlockStatus.diskSize
val level = newBlockStatus.storageLevel
// Compute new info from old info
val (oldMem, oldDisk) = blockId match {
case RDDBlockId(rddId, _) =>
_rddStorageInfo.get(rddId)
.map { case (mem, disk, _) => (mem, disk) }
.getOrElse((0L, 0L))
case _ =>
_nonRddStorageInfo
}
val newMem = math.max(oldMem + changeInMem, 0L)
val newDisk = math.max(oldDisk + changeInDisk, 0L)
// Set the correct info
blockId match {
case RDDBlockId(rddId, _) =>
// If this RDD is no longer persisted, remove it
if (newMem + newDisk == 0) {
_rddStorageInfo.remove(rddId)
} else {
_rddStorageInfo(rddId) = (newMem, newDisk, level)
}
case _ =>
_nonRddStorageInfo = (newMem, newDisk)
}
}
}
/** Helper methods for storage-related objects. */
private[spark] object StorageUtils extends Logging {
// Ewwww... Reflection!!! See the unmap method for justification
private val memoryMappedBufferFileDescriptorField = {
val mappedBufferClass = classOf[java.nio.MappedByteBuffer]
val fdField = mappedBufferClass.getDeclaredField("fd")
fdField.setAccessible(true)
fdField
}
/**
* Attempt to clean up a ByteBuffer if it is direct or memory-mapped. This uses an *unsafe* Sun
* API that will cause errors if one attempts to read from the disposed buffer. However, neither
* the bytes allocated to direct buffers nor file descriptors opened for memory-mapped buffers put
* pressure on the garbage collector. Waiting for garbage collection may lead to the depletion of
* off-heap memory or huge numbers of open files. There's unfortunately no standard API to
* manually dispose of these kinds of buffers.
*
* See also [[unmap]]
*/
def dispose(buffer: ByteBuffer): Unit = {
if (buffer != null && buffer.isInstanceOf[MappedByteBuffer]) {
logTrace(s"Disposing of $buffer")
cleanDirectBuffer(buffer.asInstanceOf[DirectBuffer])
}
}
/**
* Attempt to unmap a ByteBuffer if it is memory-mapped. This uses an *unsafe* Sun API that will
* cause errors if one attempts to read from the unmapped buffer. However, the file descriptors of
* memory-mapped buffers do not put pressure on the garbage collector. Waiting for garbage
* collection may lead to huge numbers of open files. There's unfortunately no standard API to
* manually unmap memory-mapped buffers.
*
* See also [[dispose]]
*/
def unmap(buffer: ByteBuffer): Unit = {
if (buffer != null && buffer.isInstanceOf[MappedByteBuffer]) {
// Note that direct buffers are instances of MappedByteBuffer. As things stand in Java 8, the
// JDK does not provide a public API to distinguish between direct buffers and memory-mapped
// buffers. As an alternative, we peek beneath the curtains and look for a non-null file
// descriptor in mappedByteBuffer
if (memoryMappedBufferFileDescriptorField.get(buffer) != null) {
logTrace(s"Unmapping $buffer")
cleanDirectBuffer(buffer.asInstanceOf[DirectBuffer])
}
}
}
private def cleanDirectBuffer(buffer: DirectBuffer) = {
val cleaner = buffer.cleaner()
if (cleaner != null) {
cleaner.clean()
}
}
/**
* Update the given list of RDDInfo with the given list of storage statuses.
* This method overwrites the old values stored in the RDDInfo's.
*/
def updateRddInfo(rddInfos: Seq[RDDInfo], statuses: Seq[StorageStatus]): Unit = {
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
// Assume all blocks belonging to the same RDD have the same storage level
val storageLevel = statuses
.flatMap(_.rddStorageLevel(rddId)).headOption.getOrElse(StorageLevel.NONE)
val numCachedPartitions = statuses.map(_.numRddBlocksById(rddId)).sum
val memSize = statuses.map(_.memUsedByRdd(rddId)).sum
val diskSize = statuses.map(_.diskUsedByRdd(rddId)).sum
rddInfo.storageLevel = storageLevel
rddInfo.numCachedPartitions = numCachedPartitions
rddInfo.memSize = memSize
rddInfo.diskSize = diskSize
}
}
/**
* Return a mapping from block ID to its locations for each block that belongs to the given RDD.
*/
def getRddBlockLocations(rddId: Int, statuses: Seq[StorageStatus]): Map[BlockId, Seq[String]] = {
val blockLocations = new mutable.HashMap[BlockId, mutable.ListBuffer[String]]
statuses.foreach { status =>
status.rddBlocksById(rddId).foreach { case (bid, _) =>
val location = status.blockManagerId.hostPort
blockLocations.getOrElseUpdate(bid, mutable.ListBuffer.empty) += location
}
}
blockLocations
}
/** static random number generator for UUIDs */
private val uuidRnd = new java.util.Random
/**
* Generate a random UUID for file names etc. Uses non-secure version
* of random number generator to be more efficient given that its not
* critical to have this unique.
*
* Adapted from Android's java.util.UUID source.
*/
final def newNonSecureRandomUUID(): UUID = {
val randomBytes: Array[Byte] = new Array[Byte](16)
uuidRnd.nextBytes(randomBytes)
var msb = getLong(randomBytes, 0)
var lsb = getLong(randomBytes, 8)
// Set the version field to 4.
msb &= ~(0xfL << 12)
msb |= (4L << 12)
// Set the variant field to 2. Note that the variant field is
// variable-width, so supporting other variants is not just a matter
// of changing the constant 2 below!
lsb &= ~(0x3L << 62)
lsb |= 2L << 62
new UUID(msb, lsb)
}
final def getLong(src: Array[Byte], offset: Int): Long = {
var index = offset
var h: Int = (src(index) & 0xff) << 24
index += 1
h |= (src(index) & 0xff) << 16
index += 1
h |= (src(index) & 0xff) << 8
index += 1
h |= (src(index) & 0xff)
index += 1
var l = (src(index) & 0xff) << 24
index += 1
l |= (src(index) & 0xff) << 16
index += 1
l |= (src(index) & 0xff) << 8
index += 1
l |= (src(index) & 0xff)
(h.toLong << 32L) | (l.toLong & 0xffffffffL)
}
}
| big-pegasus/spark | core/src/main/scala/org/apache/spark/storage/StorageUtils.scala | Scala | apache-2.0 | 15,152 |
package models
import database.helper.LdapUserStatus
import play.api.libs.json.{JsValue, Json, Reads, Writes}
import java.util.UUID
trait User extends UniqueEntity {
def systemId: String
def campusId: String
def lastname: String
def firstname: String
def email: String
def status: LdapUserStatus
}
sealed trait UserProtocol
object UserProtocol {
case class StudentProtocol(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String,
registrationId: String,
enrollment: UUID
) extends UserProtocol
case class EmployeeProtocol(
systemId: String,
campusId: String,
lastname: String,
firstname: String,
email: String
) extends UserProtocol
implicit val readsStudent: Reads[StudentProtocol] =
Json.reads[StudentProtocol]
implicit val readsEmployee: Reads[EmployeeProtocol] =
Json.reads[EmployeeProtocol]
implicit val reads: Reads[UserProtocol] = (json: JsValue) => {
val enrollment = json.\\("enrollment")
val registrationId = json.\\("registrationId")
if (enrollment.isDefined && registrationId.isDefined)
Json.fromJson(json)(readsStudent)
else Json.fromJson(json)(readsEmployee)
}
}
object User {
implicit val writes: Writes[User] = {
case postgresStudent: Student =>
Json.toJson(postgresStudent)(Student.writes)
case postgresStudentAtom: StudentAtom =>
Json.toJson(postgresStudentAtom)(StudentAtom.writes)
case postgresEmployee: Employee =>
Json.toJson(postgresEmployee)(Employee.writes)
case postgresLecturer: Lecturer =>
Json.toJson(postgresLecturer)(Lecturer.writes)
}
}
| THK-ADV/lwm-reloaded | app/models/User.scala | Scala | mit | 1,691 |
object Main {
object Orientation extends Enumeration {
type Orientation = Value
val North = Value("N")
val East = Value("E")
val South = Value("S")
val West = Value("O")
}
import Orientation._
object Move extends Enumeration {
type Move = Value
val Forward = Value("A")
val Right = Value("D")
val Left = Value("G")
val Stay = Value("S")
}
import Move._
case class Game(map: Map, treasures: Seq[Treasure], mountains: Seq[Mountain], players: Seq[Player])
case class Map(width: Int, height: Int)
case class Position(x: Long, y: Long)
case class Treasure(position: Position, quantity: Long)
case class Mountain(position: Position)
case class Player(name: String, position: Position, orientation: Orientation,
moves: Seq[Move], movesCounter: Int, treasuresFound: Seq[Treasure])
def main(args: Array[String]): Unit = {
val mapConfig = scala.io.Source.fromFile(args(0)).getLines.mkString("\\n")
val playersConfig = scala.io.Source.fromFile(args(1)).getLines.mkString("\\n")
val game = parseConfiguration(mapConfig, playersConfig)
start(game, Option(g => drawGame(g)))
}
def start(game: Game, drawFunc: Option[Game => Unit] = None): Game = {
def step(game: Game): Game = {
drawFunc.map(_.apply(game))
if (isGameOver(game.players)) {
val players = game.players.map { player => // we want to ignore the "Stay" moves
val numberOfStays = player.moves.count(_ == Stay)
player.copy(movesCounter = player.movesCounter - numberOfStays)
}
game.copy(players = players)
} else {
val (players, treasures) = computeMoves(game.players, Nil, game.treasures, game)
step(game.copy(players = players, treasures = treasures))
}
}
step(game)
}
def isGameOver(players: Seq[Player]) =
!players.exists(p => p.movesCounter < p.moves.size)
def moveForward(orientation: Orientation, position: Position,
game: Game, otherPlayers: Seq[Player] = Nil): Position = {
val (x, y) = orientation match {
case Orientation.North => (position.x, position.y - 1)
case Orientation.East => (position.x + 1, position.y)
case Orientation.South => (position.x, position.y + 1)
case Orientation.West => (position.x - 1, position.y)
}
val newX = if (x < 0) 0
else if (x > game.map.width - 1) game.map.width - 1
else x
val newY = if (y < 0) 0
else if (y > game.map.height - 1) game.map.height - 1
else y
val newPosition = Position(newX, newY)
if (game.mountains.exists(_.position == newPosition) ||
otherPlayers.exists(_.position == newPosition)) {
position
} else {
newPosition
}
}
def turn(orientation: Orientation, move: Move): Orientation = {
val o = Seq(North, East, South, West)
def predecessor(orientation: Orientation) = o((o.indexOf(orientation) + 3) % 4)
def successor(orientation: Orientation) = o((o.indexOf(orientation) + 1) % 4)
move match {
case Right => successor(orientation)
case Left => predecessor(orientation)
case Stay => orientation
case _ => throw new Exception("This kind of move shouldn't be there.") // bhoo
}
}
private def computeMoves(left: Seq[Player], rest: Seq[Player],
treasures: Seq[Treasure], game: Game): (Seq[Player], Seq[Treasure]) = left match {
case Nil => (rest, treasures)
case _ =>
val player = left.head
val (newPlayer, gameTreasures) = if (player.movesCounter == player.moves.size) {
(player, game.treasures)
} else {
val (position, orientation) = player.moves(player.movesCounter) match {
case Forward =>
val newPosition = moveForward(player.orientation, player.position, game, rest)
(newPosition, player.orientation)
case _ =>
val newOrientation: Orientation = turn(player.orientation, player.moves(player.movesCounter))
(player.position, newOrientation)
}
val (foundTreasure, gameTreasures, playerTreasures) = game.treasures.find(_.position == position) match {
case Some(treasure) => (true, game.treasures diff Seq(treasure), player.treasuresFound :+ treasure)
case None => (false, game.treasures, player.treasuresFound)
}
val moves: Seq[Move] = if (foundTreasure) {
val moves = player.moves
val counter = player.movesCounter
(moves.slice(0, counter + 1) :+ Stay) ++ moves.slice(counter + 1, moves.size)
} else if (rest.exists(_.position == position)) {
val moves = player.moves
val counter = player.movesCounter
(moves.slice(0, counter + 1) :+ Stay) ++ moves.slice(counter + 2, moves.size)
} else {
player.moves
}
val newPlayer = player.copy(position = position,
orientation = orientation,
movesCounter = player.movesCounter + 1,
treasuresFound = playerTreasures,
moves = moves)
(newPlayer, gameTreasures)
}
computeMoves(left.tail, rest :+ newPlayer, gameTreasures, game.copy(treasures = gameTreasures))
}
def drawGame(game: Game, delay: Int = 500): Unit = {
println("\\u001b[2J\\u001b[1;1H")
println(buildDrawableGame(game))
Thread.sleep(delay)
}
def buildDrawableGame(game: Game): String = {
val sb = new StringBuilder
for (j <- 0 to game.map.height - 1) {
for (i <- 0 to game.map.width - 1) {
if (game.mountains.exists(_.position == Position(i, j))) {
sb.append(" M")
} else if (game.treasures.exists(_.position == Position(i, j))) {
sb.append(s" ${game.treasures.filter(_.position == Position(i, j)).head.quantity}")
} else if (game.players.exists(_.position == Position(i, j))) {
val player = game.players.find(_.position == Position(i, j)).head
player.orientation match {
case North => sb.append(" ↑")
case East => sb.append(" →")
case South => sb.append(" ↓")
case West => sb.append(" ←")
}
} else {
sb.append(" .")
}
}
sb.append('\\n')
}
sb.toString()
}
def parseConfiguration(mapConfig: String, playersConfig: String) = {
val (map, treasures, mountains) = parseMapConfiguration(mapConfig)
val players = parsePlayersConfiguration(playersConfig)
Game(map, treasures, mountains, players)
}
def parseMapConfiguration(config: String): (Map, Seq[Treasure], Seq[Mountain]) = {
val splitConfig = config.split("\\n")
val mapLine = splitConfig.filter(_.startsWith("C ")).head
val map = Map(width = mapLine.split(" ")(1).toInt, height = mapLine.split(" ")(2).toInt)
val treasures: Seq[Treasure] = splitConfig.filter(_.startsWith("T ")).map { line =>
val splitLine = line.split(" ")
val position = Position(x = splitLine(1).split("-")(0).toLong - 1,
y = splitLine(1).split("-")(1).toLong - 1)
Treasure(position, splitLine(2).toLong)
}
val mountains: Seq[Mountain] = splitConfig.filter(_.startsWith("M ")).map { line =>
val splitLine = line.split(" ")
val position = Position(x = splitLine(1).split("-")(0).toLong - 1,
y = splitLine(1).split("-")(1).toLong - 1)
Mountain(position)
}
(map, treasures, mountains)
}
def parsePlayersConfiguration(config: String): Seq[Player] = config.split("\\n")
.filter(_.trim.nonEmpty)
.filter(l => !l.startsWith("C ") && !l.startsWith("T ") && !l.startsWith("M "))
.map { line =>
val splitLine = line.split(" ")
val name = splitLine(4)
val position = Position(x = splitLine(1).split("-")(0).toLong - 1,
y = splitLine(1).split("-")(1).toLong - 1)
val orientation = Orientation.withName(splitLine(2))
val moves: Seq[Move] = splitLine(3).map(c => Move.withName(c.toString)).toList
Player(name, position, orientation, moves, movesCounter = 0, treasuresFound = Nil)
}.toList
}
| Dinduks/treasure-hunt | src/main/scala/Main.scala | Scala | mit | 8,070 |
package pattern.math
import pattern.{Pattern, P2}
/*
+1>> This source code is licensed as GPLv3 if not stated otherwise.
>> NO responsibility taken for ANY harm, damage done
>> to you, your data, animals, etc.
>>
+2>>
>> Last modified: 2013-10-29 :: 20:37
>> Origin: patterns
>>
+3>>
>> Copyright (c) 2013:
>>
>> | | |
>> | ,---.,---|,---.|---.
>> | | || |`---.| |
>> `---'`---'`---'`---'`---'
>> // Niklas Klügel
>>
+4>>
>> Made in Bavaria by fat little elves - since 1983.
*/
object Mult {
def apply() : P2[Double, Double, Double] = {
val func = (x: Double, y: Double) => x*y
Pattern(func)
}
}
| lodsb/patterns | src/main/scala/pattern/math/Mult.scala | Scala | gpl-3.0 | 754 |
package org.jetbrains.plugins.scala.util
import java.awt.Event
import java.awt.event.MouseEvent
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicLong
import com.intellij.concurrency.JobScheduler
import com.intellij.diagnostic.PerformanceWatcher
import com.intellij.ide.IdeEventQueue
import com.intellij.openapi.application.{ApplicationManager, ModalityState}
import com.intellij.openapi.components.ApplicationComponent
import com.intellij.openapi.progress._
import com.intellij.openapi.progress.util.AbstractProgressIndicatorBase
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.util.UIFreezingGuard._
import scala.util.control.NoStackTrace
/**
* @author Nikolay.Tropin
*/
class UIFreezingGuard extends ApplicationComponent {
private val periodMs = 10
override def initComponent(): Unit = {
if (enabled) {
JobScheduler.getScheduler.scheduleWithFixedDelay(cancelOnUserInput(), periodMs, periodMs, TimeUnit.MILLISECONDS)
}
}
private def cancelOnUserInput(): Unit = {
val timestamp = progress.timestamp
if (progress.isRunning && hasPendingUserInput) {
progress.cancel(timestamp)
}
}
override def disposeComponent(): Unit = {}
override def getComponentName: String = "UI freezing guard"
}
object UIFreezingGuard {
private val enabled = System.getProperty("idea.ProcessCanceledException") != "disabled"
//used only from EDT
private var isGuarded: Boolean = false
//used in macro!
def withResponsibleUI[T](body: => T): T = {
if (!isAlreadyGuarded && enabled) {
val start = System.currentTimeMillis()
try {
isGuarded = true
val progressManager = ProgressManager.getInstance()
if (!ApplicationManager.getApplication.isWriteAccessAllowed && !progressManager.hasProgressIndicator) {
if (hasPendingUserInput)
throw UnfreezeException
progressManager.runProcess(body, progress)
}
else
body
} finally {
isGuarded = false
dumpThreads(System.currentTimeMillis() - start)
}
}
else body
}
//body should have withResponsibleUI call inside
def withDefaultValue[T](default: T)(body: T): T = {
if (ApplicationManager.getApplication.isDispatchThread && hasPendingUserInput) default
else {
try body
catch {
case UnfreezeException => default
}
}
}
//used in macro to reduce number of `withResponsibleUI` calls in the stacktrace
def isAlreadyGuarded: Boolean = {
val edt = ApplicationManager.getApplication.isDispatchThread
edt && isGuarded || !edt
}
//Use with care! Can cause bugs if result is cached upper in the stack.
def withTimeout[T](timeoutMs: Long, default: => T)(computation: => T): T = {
val application = ApplicationManager.getApplication
if (!enabled || !application.isDispatchThread || application.isUnitTestMode) return computation
val startTime = System.currentTimeMillis()
try {
ProgressManager.getInstance().runProcess(computation, new AbstractProgressIndicatorBase {
override def isCanceled: Boolean = {
System.currentTimeMillis() - startTime > timeoutMs || super.isCanceled
}
override def checkCanceled(): Unit = if (isCanceled && isCancelable) throw new TimeoutException
})
} catch {
case _: TimeoutException => default
}
}
//throws TimeoutException!
def withTimeout[T](timeoutMs: Long)(computation: => T): T = withTimeout(timeoutMs, throw new TimeoutException)(computation)
private def dumpThreads(ms: Long): Unit = {
val threshold = 1000
if (ms > threshold) {
PerformanceWatcher.getInstance().dumpThreads("scalaEdtFreezing/", false)
}
}
private def hasPendingUserInput: Boolean = {
val queue = IdeEventQueue.getInstance()
val userEventIds = Seq(Event.KEY_ACTION, Event.KEY_PRESS, MouseEvent.MOUSE_PRESSED, MouseEvent.MOUSE_WHEEL)
userEventIds.exists(queue.peekEvent(_) != null)
}
private object progress extends StandardProgressIndicator {
val delegate = new EmptyProgressIndicator()
private val counter = new AtomicLong()
def timestamp: Long = counter.get()
def start(): Unit = {
counter.incrementAndGet()
delegate.start()
}
def cancel(l: Long): Unit = {
if (timestamp == l) delegate.cancel()
}
//to avoid long stacktraces in log and keep write actions
def checkCanceled(): Unit = {
if (isCanceled && !ApplicationManager.getApplication.isWriteAccessAllowed)
throw UnfreezeException
}
//EmptyProgressIndicator is good enough, but it has final `checkCanceled()` method
def cancel(): Unit = delegate.cancel()
def isRunning: Boolean = delegate.isRunning
def pushState(): Unit = delegate.pushState()
def setIndeterminate(indeterminate: Boolean): Unit = delegate.setIndeterminate(indeterminate)
def finishNonCancelableSection(): Unit = delegate.finishNonCancelableSection()
def setModalityProgress(modalityProgress: ProgressIndicator): Unit = delegate.setModalityProgress(modalityProgress)
def isCanceled: Boolean = delegate.isCanceled
def isIndeterminate: Boolean = delegate.isIndeterminate
def isModal: Boolean = delegate.isModal
def setFraction(fraction: Double): Unit = delegate.setFraction(fraction)
def stop(): Unit = delegate.stop()
def getText: String = delegate.getText
def setText(text: String): Unit = delegate.setText(text)
def isPopupWasShown: Boolean = delegate.isPopupWasShown
def setText2(text: String): Unit = delegate.setText2(text)
def startNonCancelableSection(): Unit = delegate.startNonCancelableSection()
def getModalityState: ModalityState = delegate.getModalityState
def getFraction: Double = delegate.getFraction
def popState(): Unit = delegate.popState()
def getText2: String = delegate.getText2
def isShowing: Boolean = delegate.isShowing
}
object UnfreezeException extends ProcessCanceledException with NoStackTrace {
override def getMessage: String = "Long scala calculation on UI thread canceled"
}
class TimeoutException extends ProcessCanceledException with NoStackTrace {
override def getMessage: String = "Computation cancelled with timeout"
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/util/UIFreezingGuard.scala | Scala | apache-2.0 | 6,327 |
package TwentyOneToThirty
/**
* Created by Farrell on 5/21/15.
*/
object P24 {
}
| Spinlocks/99Problems | src/TwentyOneToThirty/P24.scala | Scala | apache-2.0 | 85 |
package com.sksamuel.elastic4s.search.queries
import com.sksamuel.elastic4s.{DocumentRef, ElasticsearchClientUri}
import com.sksamuel.elastic4s.analyzers.StandardAnalyzer
import com.sksamuel.elastic4s.http.{ElasticDsl, HttpClient}
import com.sksamuel.elastic4s.searches.queries.ArtificialDocument
import com.sksamuel.elastic4s.testkit.{ElasticMatchers, ElasticSugar}
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy
import org.scalatest.{Matchers, WordSpec}
class MoreLikeThisQueryHttpTest
extends WordSpec
with Matchers
with ElasticSugar
with ElasticMatchers
with ElasticDsl {
val http = HttpClient(ElasticsearchClientUri("elasticsearch://" + node.ipAndPort))
http.execute {
createIndex("mltq").mappings {
mapping("alcohol") source true as (
textField("name") store true analyzer StandardAnalyzer
)
} shards 1
}.await
http.execute {
bulk(
indexInto("mltq/alcohol") fields ("text" -> "coors light is a coors beer by molson") id 4,
indexInto("mltq/alcohol") fields ("text" -> "Anheuser-Busch brews a cider called Strongbow") id 6,
indexInto("mltq/alcohol") fields ("text" -> "Gordons popular gin UK") id 7,
indexInto("mltq/alcohol") fields ("text" -> "coors regular is another coors beer by molson") id 8,
indexInto("mltq/alcohol") fields ("text" -> "Hendricks upmarket gin UK") id 9
).refresh(RefreshPolicy.IMMEDIATE)
}.await
"a more like this query" should {
"find matches based on input text" in {
val resp = http.execute {
search("mltq" / "alcohol") query {
moreLikeThisQuery("text")
.likeTexts("coors") minTermFreq 1 minDocFreq 1
}
}.await
resp.hits.hits.map(_.id).toSet shouldBe Set("4", "8")
}
"find matches based on doc refs" in {
val resp = http.execute {
search("mltq" / "alcohol").query {
moreLikeThisQuery("text")
.likeDocs(DocumentRef("mltq", "alcohol", "4")) minTermFreq 1 minDocFreq 1
}
}.await
resp.hits.hits.map(_.id).toSet shouldBe Set("8")
}
"support artifical docs" in {
val resp = http.execute {
search("mltq" / "alcohol").query {
moreLikeThisQuery("text")
.artificialDocs(ArtificialDocument("mltq", "alcohol", """{ "text" : "gin" }""")) minTermFreq 1 minDocFreq 1
}
}.await
resp.hits.hits.map(_.id).toSet shouldBe Set("7", "9")
}
}
}
| aroundus-inc/elastic4s | elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/search/queries/MoreLikeThisQueryHttpTest.scala | Scala | apache-2.0 | 2,474 |
/**
* *****************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
* ****************************************************************************
*/
package binaryTree.P61A
import util.ExerciseTemplate
import binaryTree.Tree
import binaryTree.Node
import binaryTree.End
trait P61A extends ExerciseTemplate {
/*
61A (*) Collect the leaves of a binary tree in a list.
A leaf is a node with no successors. Write a method leafList to collect them in a list.
scala> Node('a', Node('b'), Node('c', Node('d'), Node('e'))).leafList
res0: List[Char] = List(b, d, e)
*/
val name = "61A (Collect the leaves of a binary tree in a list)"
def leafList[T](t: Tree[T]): List[T]
test("Invoking leafList should return the list of leaves of a binary tree") {
assert(leafList(End) == List())
assert(leafList(Node('a)) == List('a))
assert(leafList(Node('a, Node('b), End)) == List('b))
assert(leafList(Node('a, End, Node('b))) == List('b))
val result01 = leafList(Node('a, Node('b), Node('c)))
assert(result01.size == 2)
assert(List('b, 'c).forall(result01.contains(_)))
/*
* 'a
* / \
* 'b 'c
* /
* 'b1
* /
* 'b2
*/
val left = Node('b, Node('b1, Node('b2), End), End)
val right = Node('c)
val tree = Node('a, left, right)
val result02 = leafList(tree)
assert(result02.size == 2)
assert(List('b2, 'c).forall(result02.contains(_)))
/*
* 'a
* / \
* 'b 'c
* \ / \
* 'b1 'c1 'c2
* /
* 'c3
*
*/
val result03 = leafList(Node('a, Node('b, End, Node('b1)), Node('c, Node('c1), Node('c2, Node('c3), End))))
assert(result03.size == 3)
assert(List('b1, 'c1, 'c3).forall(result03.contains(_)))
// Example test
val result04 = leafList(Node('a', Node('b'), Node('c', Node('d'), Node('e'))))
assert(result04.size == 3)
assert(List('b', 'd', 'e').forall(result04.contains(_)))
}
}
| GuillaumeDD/scala99problems | src/main/scala/binaryTree/P61A/P61A.scala | Scala | gpl-3.0 | 2,630 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.core.mysqls
import play.api.libs.json.Json
import scalikejdbc._
object ColumnMeta extends Model[ColumnMeta] {
val timeStampSeq = 0.toByte
val countSeq = -1.toByte
val lastModifiedAtColumnSeq = 0.toByte
val lastModifiedAtColumn = ColumnMeta(Some(0), 0, "lastModifiedAt", lastModifiedAtColumnSeq, "long")
val maxValue = Byte.MaxValue
val timestamp = ColumnMeta(None, -1, "_timestamp", timeStampSeq, "long")
def apply(rs: WrappedResultSet): ColumnMeta = {
ColumnMeta(Some(rs.int("id")), rs.int("column_id"), rs.string("name"), rs.byte("seq"), rs.string("data_type").toLowerCase())
}
def findById(id: Int)(implicit session: DBSession = AutoSession) = {
// val cacheKey = s"id=$id"
val cacheKey = "id=" + id
withCache(cacheKey) {
sql"""select * from column_metas where id = ${id}""".map { rs => ColumnMeta(rs) }.single.apply
}.get
}
def findAllByColumn(columnId: Int, useCache: Boolean = true)(implicit session: DBSession = AutoSession) = {
// val cacheKey = s"columnId=$columnId"
val cacheKey = "columnId=" + columnId
if (useCache) {
withCaches(cacheKey)( sql"""select *from column_metas where column_id = ${columnId} order by seq ASC"""
.map { rs => ColumnMeta(rs) }.list.apply())
} else {
sql"""select * from column_metas where column_id = ${columnId} order by seq ASC"""
.map { rs => ColumnMeta(rs) }.list.apply()
}
}
def findByName(columnId: Int, name: String)(implicit session: DBSession = AutoSession) = {
// val cacheKey = s"columnId=$columnId:name=$name"
val cacheKey = "columnId=" + columnId + ":name=" + name
withCache(cacheKey)( sql"""select * from column_metas where column_id = ${columnId} and name = ${name}"""
.map { rs => ColumnMeta(rs) }.single.apply())
}
def insert(columnId: Int, name: String, dataType: String)(implicit session: DBSession = AutoSession) = {
val ls = findAllByColumn(columnId, false)
val seq = ls.size + 1
if (seq <= maxValue) {
sql"""insert into column_metas(column_id, name, seq, data_type)
select ${columnId}, ${name}, ${seq}, ${dataType}"""
.updateAndReturnGeneratedKey.apply()
}
}
def findOrInsert(columnId: Int, name: String, dataType: String)(implicit session: DBSession = AutoSession): ColumnMeta = {
findByName(columnId, name) match {
case Some(c) => c
case None =>
insert(columnId, name, dataType)
expireCache(s"columnId=$columnId:name=$name")
findByName(columnId, name).get
}
}
def findByIdAndSeq(columnId: Int, seq: Byte, useCache: Boolean = true)(implicit session: DBSession = AutoSession) = {
val cacheKey = "columnId=" + columnId + ":seq=" + seq
lazy val columnMetaOpt = sql"""
select * from column_metas where column_id = ${columnId} and seq = ${seq}
""".map { rs => ColumnMeta(rs) }.single.apply()
if (useCache) withCache(cacheKey)(columnMetaOpt)
else columnMetaOpt
}
def delete(id: Int)(implicit session: DBSession = AutoSession) = {
val columnMeta = findById(id)
val (columnId, name) = (columnMeta.columnId, columnMeta.name)
sql"""delete from column_metas where id = ${id}""".execute.apply()
val cacheKeys = List(s"id=$id", s"columnId=$columnId:name=$name", s"colunmId=$columnId")
cacheKeys.foreach { key =>
expireCache(key)
expireCaches(key)
}
}
def findAll()(implicit session: DBSession = AutoSession) = {
val ls = sql"""select * from column_metas""".map { rs => ColumnMeta(rs) }.list().apply()
putsToCache(ls.map { x =>
val cacheKey = s"id=${x.id.get}"
(cacheKey -> x)
})
putsToCache(ls.map { x =>
val cacheKey = s"columnId=${x.columnId}:name=${x.name}"
(cacheKey -> x)
})
putsToCache(ls.map { x =>
val cacheKey = s"columnId=${x.columnId}:seq=${x.seq}"
(cacheKey -> x)
})
putsToCaches(ls.groupBy(x => x.columnId).map { case (columnId, ls) =>
val cacheKey = s"columnId=${columnId}"
(cacheKey -> ls)
}.toList)
}
}
case class ColumnMeta(id: Option[Int], columnId: Int, name: String, seq: Byte, dataType: String) {
lazy val toJson = Json.obj("name" -> name, "dataType" -> dataType)
override def equals(other: Any): Boolean = {
if (!other.isInstanceOf[ColumnMeta]) false
else {
val o = other.asInstanceOf[ColumnMeta]
// labelId == o.labelId &&
seq == o.seq
}
}
override def hashCode(): Int = seq.toInt
}
| daewon/incubator-s2graph | s2core/src/main/scala/org/apache/s2graph/core/mysqls/ColumnMeta.scala | Scala | apache-2.0 | 5,325 |
import sbt._
import Keys._
object GruntTask extends Plugin {
private def runGrunt(task: String) = {
val cmd = "grunt " + task
cmd !
}
// Grunt command
def gruntCommand = {
Command.single("grunt") { (state: State, task: String) =>
runGrunt(task)
state
}
}
// Grunt task
def gruntTask(taskName: String) = streams map { (s: TaskStreams) =>
val retval = runGrunt(taskName)
if (retval != 0) {
throw new Exception("Grunt task %s failed".format(taskName))
}
}
// Expose plugin
override lazy val settings = Seq(commands += gruntCommand)
}
| moatra/playground | project/GruntTask.scala | Scala | mit | 608 |
/*
* Copyright (c) 2016. <[email protected]>
*
* ControlField.scala is part of marc4scala.
*
* marc4scala is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* marc4scala is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with marc4scala; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.marc4scala
import scala.util.matching.Regex
/**
* Created by jason on 2016-02-20.
*/
class ControlField(override val tag:String, val data:String) extends Field(tag){
private var _buff:Array[Byte] = new Array[Byte](data.length)
private var i:Int = 0
for(c<-data.toCharArray){
_buff(i) = c.toByte
i += 1
}
//val _tag = fieldTag
if (_tag.isDataTag) throw new IllegalStateException("Data Tag in a control field")
private def _update {
var i:Int = 0
for(c<-data.toCharArray) {
_buff(i) = c.toByte
i += 1
}
}
def isControlField:Boolean = _tag.isControlTag
override def toString:String = _tag.toString + " " + data
def find(str:String):Boolean = {
var pattern = new Regex(str)
val match2 = pattern.findFirstIn(data)
var ret = false
match2 match{
case Some(s) => ret = true
case None =>
}
return ret
}
def asRaw:Array[Byte] = {
var temp:Array[Byte] = new Array[Byte](3+data.length)
var i:Int = 0
for(c<-_tag.toString.toCharArray){
temp(i) = c.toByte
i = i + 1
}
Array.copy(_buff,0, temp, 3, data.length)
return temp
}
def dataLength:Int = _buff.length
def length:Int = dataLength + 3
}
| jasonzou/marc4scala | src/main/scala/org/marc4scala/ControlField.scala | Scala | gpl-3.0 | 2,073 |
package mesosphere.marathon
package api.v2.json
import com.wix.accord.validate
import mesosphere.UnitTest
import mesosphere.marathon.api.v2.AppNormalization
import mesosphere.marathon.raml.{ App, GroupConversion, GroupUpdate, Raml }
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.test.GroupCreation
class GroupUpdateTest extends UnitTest with GroupCreation {
val noEnabledFeatures = Set.empty[String]
val appConversionFunc: (App => AppDefinition) = { app =>
// assume canonical form and that the app is valid
Raml.fromRaml(AppNormalization.apply(AppNormalization.Configure(None, "bridge-name")).normalized(app))
}
implicit val groupUpdateRamlReader = raml.GroupConversion.groupUpdateRamlReads // HACK: workaround bogus compiler error?!
"GroupUpdate" should {
"A group update can be applied to an empty group" in {
Given("An empty group with updates")
val rootGroup = createRootGroup()
val update = GroupUpdate(
Some(PathId.empty.toString),
Some(Set.empty[App]),
Some(Set(
GroupUpdate(
Some("test"), Some(Set.empty[App]), Some(Set(Group.emptyUpdate("foo".toPath)))),
GroupUpdate(
Some("apps"), Some(Set(
App("app1", cmd = Some("foo"),
dependencies = Set("d1", "../test/foo", "/test")))))
))
)
val timestamp = Timestamp.now()
When("The update is performed")
val result: Group = Raml.fromRaml(GroupConversion(update, rootGroup, timestamp) -> appConversionFunc)
validate(RootGroup.fromGroup(result))(RootGroup.rootGroupValidator(noEnabledFeatures)).isSuccess should be(true)
Then("The update is applied correctly")
result.id should be(PathId.empty)
result.groupsById should have size 2
val test = result.group("test".toRootPath)
test should be('defined)
test.get.groupsById should have size 1
val apps = result.group("apps".toRootPath)
apps should be('defined)
apps.get.apps should have size 1
val app = apps.get.apps.head
app._1.toString should be ("/apps/app1")
app._2.dependencies should be (Set("/apps/d1".toPath, "/test/foo".toPath, "/test".toPath))
}
"A group update can be applied to existing entries" in {
Given("A group with updates of existing nodes")
val blaApp = AppDefinition("/test/bla".toPath, Some("foo"))
val actual = createRootGroup(groups = Set(
createGroup("/test".toPath, apps = Map(blaApp.id -> blaApp)),
createGroup("/apps".toPath, groups = Set(createGroup("/apps/foo".toPath)))
))
val update = GroupUpdate(
Some(PathId.empty.toString),
Some(Set.empty[App]),
Some(Set(
GroupUpdate(
Some("test"),
None,
Some(Set(Group.emptyUpdate("foo".toPath)))
),
GroupUpdate(
Some("apps"),
Some(Set(App("app1", cmd = Some("foo"),
dependencies = Set("d1", "../test/foo", "/test"))))
)
))
)
val timestamp = Timestamp.now()
When("The update is performed")
val result: RootGroup = RootGroup.fromGroup(Raml.fromRaml(
GroupConversion(update, actual, timestamp) -> appConversionFunc))
validate(result)(RootGroup.rootGroupValidator(Set())).isSuccess should be(true)
Then("The update is applied correctly")
result.id should be(PathId.empty)
result.groupsById should have size 2
val test = result.group("test".toRootPath)
test should be('defined)
test.get.groupsById should have size 1
test.get.apps should have size 1
val apps = result.group("apps".toRootPath)
apps should be('defined)
apps.get.groupsById should have size 1
apps.get.apps should have size 1
val app = apps.get.apps.head
app._1.toString should be ("/apps/app1")
app._2.dependencies should be (Set("/apps/d1".toPath, "/test/foo".toPath, "/test".toPath))
}
"GroupUpdate will update a Group correctly" in {
Given("An existing group with two subgroups")
val app1 = AppDefinition("/test/group1/app1".toPath, Some("foo"))
val app2 = AppDefinition("/test/group2/app2".toPath, Some("foo"))
val current = createGroup(
"/test".toPath,
groups = Set(
createGroup("/test/group1".toPath, Map(app1.id -> app1)),
createGroup("/test/group2".toPath, Map(app2.id -> app2))
)
)
When("A group update is applied")
val update = GroupUpdate(
Some("/test"),
Some(Set.empty[App]),
Some(Set(
GroupUpdate(Some("/test/group1"), Some(Set(App("/test/group1/app3", cmd = Some("foo"))))),
GroupUpdate(
Some("/test/group3"),
Some(Set.empty[App]),
Some(Set(GroupUpdate(Some("/test/group3/sub1"), Some(Set(App(
"/test/group3/sub1/app4", cmd =
Some("foo")))))))
)
))
)
val timestamp = Timestamp.now()
val next = Raml.fromRaml(GroupConversion(update, current, timestamp) -> appConversionFunc)
val result = createRootGroup(groups = Set(next))
validate(result)(RootGroup.rootGroupValidator(Set())).isSuccess should be(true)
Then("The update is reflected in the current group")
result.id.toString should be("/")
result.apps should be('empty)
val group0 = result.group("/test".toPath).get
group0.id.toString should be("/test")
group0.apps should be('empty)
group0.groupsById should have size 2
val group1 = result.group("/test/group1".toPath).get
group1.id should be("/test/group1".toPath)
group1.apps.head._1 should be("/test/group1/app3".toPath)
val group3 = result.group("/test/group3".toPath).get
group3.id should be("/test/group3".toPath)
group3.apps should be('empty)
}
"A group update should not contain a version" in {
val update = GroupUpdate(None, version = Some(Timestamp.now().toOffsetDateTime))
intercept[IllegalArgumentException] {
Raml.fromRaml(GroupConversion(update, createRootGroup(), Timestamp.now()) -> appConversionFunc)
}
}
"A group update should not contain a scaleBy" in {
val update = GroupUpdate(None, scaleBy = Some(3))
intercept[IllegalArgumentException] {
Raml.fromRaml(GroupConversion(update, createRootGroup(), Timestamp.now()) -> appConversionFunc)
}
}
"Relative path of a dependency, should be relative to group and not to the app" in {
Given("A group with two apps. Second app is dependend of first.")
val update = GroupUpdate(Some(PathId.empty.toString), Some(Set.empty[App]), Some(Set(
GroupUpdate(
Some("test-group"),
Some(Set(
App("test-app1", cmd = Some("foo")),
App("test-app2", cmd = Some("foo"), dependencies = Set("test-app1"))))
)
)))
When("The update is performed")
val result = Raml.fromRaml(
GroupConversion(update, createRootGroup(), Timestamp.now()) -> appConversionFunc)
validate(RootGroup.fromGroup(result))(RootGroup.rootGroupValidator(noEnabledFeatures)).isSuccess should be(true)
Then("The update is applied correctly")
val group = result.group("test-group".toRootPath)
group should be('defined)
group.get.apps should have size 2
val dependentApp = group.get.app("/test-group/test-app2".toPath).get
dependentApp.dependencies should be (Set("/test-group/test-app1".toPath))
}
}
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/api/v2/json/GroupUpdateTest.scala | Scala | apache-2.0 | 7,652 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
/*
* This file contains derivative works that require the following
* header to be displayed:
*
* Copyright 2002-2014 EPFL.
* Copyright 2011-2014 Typesafe, Inc.
* All rights reserved.
*
* Permission to use, copy, modify, and distribute this software in
* source or binary form for any purpose with or without fee is hereby
* granted, provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
* 3. Neither the name of the EPFL nor the names of its
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
package org.ensime.core
import akka.actor.ActorRef
import akka.pattern.Patterns
import akka.util.Timeout
import org.ensime.api._
import org.ensime.indexer.lucene.SimpleLucene
import scala.collection.mutable
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
import scala.reflect.internal.util.{ BatchSourceFile, SourceFile }
trait CompletionControl {
self: RichPresentationCompiler =>
sealed trait CompletionContext {
val source: SourceFile
val offset: Int
val prefix: String
val constructing: Boolean
}
case class ScopeContext(
source: SourceFile,
offset: Int,
prefix: String,
constructing: Boolean
) extends CompletionContext
case class MemberContext(
source: SourceFile,
offset: Int,
prefix: String,
constructing: Boolean
) extends CompletionContext
import CompletionUtil._
def completionsAt(inputP: Position, maxResultsArg: Int, caseSens: Boolean): CompletionInfoList = {
val origContents = inputP.source.content
val point = inputP.endOrCursor
if (point > origContents.length) {
// invalid request - completion point is outside of file
// this causes an ArrayOutOfBounds in the array copy below
logger.warn("completionsAt request has point outside of file")
CompletionInfoList("", List.empty)
} else {
val maxResults = if (maxResultsArg == 0) SimpleLucene.MaxResults else maxResultsArg
val preceding = inputP.source.content.slice(
Math.max(0, inputP.point - 100), inputP.point
)
val defaultPrefix = IdentRegexp.findFirstMatchIn(preceding) match {
case Some(m) => m.group(1)
case _ => ""
}
val constructing = ConstructingRegexp.findFirstMatchIn(preceding).isDefined
val (src, p, patched) = if (defaultPrefix.isEmpty) {
// Add a fake prefix if none was provided by the user. Otherwise the
// compiler will give us a weird tree.
val orig = origContents
// Uses array logic to minimise memory spikes of large Strings
val contents = Array.ofDim[Char](orig.length + 1)
System.arraycopy(orig, 0, contents, 0, point)
contents(point) = 'a'
System.arraycopy(orig, point, contents, point + 1, orig.length - point)
// uses the same VirtualFile as the original
val src = new BatchSourceFile(inputP.source.file, contents)
(src, inputP.withSource(src).withShift(1), true)
} else {
(inputP.source, inputP, false)
}
askReloadFile(src)
val x = new Response[Tree]
askTypeAt(p, x)
val contextOpt = x.get match {
case Left(tree) =>
logger.debug("Completing at tree:" + tree.summaryString)
tree match {
case Apply(fun, _) =>
fun match {
case Select(qualifier: New, name) =>
Some(ScopeContext(src, qualifier.pos.endOrCursor, defaultPrefix, constructing = true))
case Select(qual, name) if qual.pos.isDefined && qual.pos.isRange =>
val prefix = if (patched) "" else name.decoded
Some(MemberContext(src, qual.pos.endOrCursor, prefix, constructing))
case _ =>
val prefix = if (patched) "" else src.content.slice(fun.pos.startOrCursor, fun.pos.endOrCursor).mkString
Some(ScopeContext(src, fun.pos.endOrCursor, prefix, constructing))
}
case Literal(Constant(_)) => None
case New(name) =>
Some(ScopeContext(src, name.pos.endOrCursor, defaultPrefix, constructing = true))
case Select(qualifier, name) if qualifier.pos.isDefined && qualifier.pos.isRange =>
Some(MemberContext(src, qualifier.pos.endOrCursor, defaultPrefix, constructing))
case Import(expr, _) =>
val topLevel = ImportTopLevelRegexp.findFirstMatchIn(preceding).isDefined
if (topLevel) {
Some(ScopeContext(src, expr.pos.endOrCursor, defaultPrefix, constructing = false))
} else {
Some(MemberContext(src, expr.pos.endOrCursor, defaultPrefix, constructing = false))
}
case other =>
Some(ScopeContext(src, p.point, defaultPrefix, constructing))
}
case _ =>
logger.error("Unrecognized completion context.")
None
}
contextOpt match {
case Some(context) =>
CompletionInfoList(
context.prefix,
makeAll(context, maxResults, caseSens).sortWith({ (c1, c2) =>
c1.relevance > c2.relevance ||
(c1.relevance == c2.relevance &&
c1.name.length < c2.name.length)
}).take(maxResults)
)
case _ => CompletionInfoList("", Nil)
}
}
}
def makeAll(context: CompletionContext, maxResults: Int, caseSens: Boolean): List[CompletionInfo] = {
def toCompletionInfo(
context: CompletionContext,
sym: Symbol,
tpe: Type,
inherited: Boolean,
viaView: Symbol
): List[CompletionInfo] = {
var score = 0
if (sym.nameString.startsWith(context.prefix)) score += 10
if (!inherited) score += 10
if (!sym.hasPackageFlag) score += 10
if (!sym.isType) score += 10
if (sym.isLocalToBlock) score += 10
if (sym.isPublic) score += 10
if (viaView == NoSymbol) score += 10
if (sym.owner != definitions.AnyClass &&
sym.owner != definitions.AnyRefClass &&
sym.owner != definitions.ObjectClass) score += 30
val infos = List(CompletionInfo.fromSymbolAndType(sym, tpe, score))
if (context.constructing) {
val constructorSyns = constructorSynonyms(sym).map {
c => CompletionInfo.fromSymbolAndType(sym, c.tpe, score + 50)
}
infos ++ constructorSyns
} else {
val applySyns = applySynonyms(sym).map {
c => CompletionInfo.fromSymbolAndType(sym, c.tpe, score)
}
infos ++ applySyns
}
}
val buff = new mutable.LinkedHashSet[CompletionInfo]()
// Kick off an index search if the name looks like a type.
// Do this before the lookups below, so the two can
// proceed concurrently.
val typeSearch = context match {
case ScopeContext(_, _, prefix, _) =>
if (TypeNameRegex.findFirstMatchIn(prefix).isDefined) {
Some(fetchTypeSearchCompletions(prefix, maxResults, indexer))
} else None
case _ => None
}
var members = List[Member]()
val x = new Response[List[Member]]
context match {
case ScopeContext(src, offset, _, _) =>
askScopeCompletion(rangePos(src, offset, offset, offset), x)
case MemberContext(src, offset, _, _) =>
askTypeCompletion(rangePos(src, offset, offset, offset), x)
}
do {
x.get match {
case Left(mems) => members ++= mems
case _ =>
}
} while (!x.isComplete)
logger.info("Found " + members.size + " members.")
// Any interaction with the members (their types and symbols) must be done
// on the compiler thread.
askOption[Unit] {
val filtered = members.filter { m =>
val s = m.sym.nameString
matchesPrefix(s, context.prefix, matchEntire = false, caseSens = caseSens) && !s.contains("$")
}
logger.info("Filtered down to " + filtered.size + ".")
for (m <- filtered) {
m match {
case m @ ScopeMember(sym, tpe, accessible, viaView) =>
val p = sym.pos
val inSymbol = p.isRange && (context.offset >= p.startOrCursor && context.offset <= p.endOrCursor)
if (!sym.isConstructor && !inSymbol) {
buff ++= toCompletionInfo(context, sym, tpe, inherited = false, NoSymbol)
}
case m @ TypeMember(sym, tpe, accessible, inherited, viaView) =>
if (!sym.isConstructor) {
buff ++= toCompletionInfo(context, sym, tpe, inherited, viaView)
}
case _ =>
}
}
}
val typeSearchResults = typeSearch.flatMap(Await.result(_, Duration.Inf))
def keywordCompletions(prefix: String): Seq[CompletionInfo] = {
if (prefix.length > 0) {
Keywords.keywordCompletions.filter(_.name.startsWith(prefix))
} else
Seq()
}
buff.toList ++ typeSearchResults.getOrElse(Nil) ++ keywordCompletions(context.prefix)
}
}
object Keywords {
val keywords = Seq(
"abstract",
"case",
"catch",
"class",
"def",
//"do",
"else",
"extends",
"false",
"final",
"finally",
"for",
"forSome",
//"if",
"implicit",
"import",
"lazy",
"match",
"new",
"null",
"object",
"override",
"package",
"private",
"protected",
"return",
"requires",
"sealed",
"super",
"this",
"throw",
"trait",
"try",
"true",
"type",
"val",
"var",
"while",
"with",
"yield"
)
val keywordCompletions = keywords map {
CompletionInfo(_, CompletionSignature(List(), "", hasImplicit = false), false, 100, None)
}
}
trait Completion { self: RichPresentationCompiler =>
def completePackageMember(path: String, prefix: String): List[CompletionInfo] = {
packageSymFromPath(path) match {
case Some(sym) =>
val memberSyms = packageMembers(sym).filterNot { s =>
s == NoSymbol || s.nameString.contains("$")
}
memberSyms.flatMap { s =>
val name = if (s.hasPackageFlag) { s.nameString } else { typeShortName(s) }
if (name.startsWith(prefix)) Some(CompletionInfo(name, CompletionSignature(List.empty, "", false), isCallable = false, 50, None)) else None
}.toList.sortBy(ci => (ci.relevance, ci.name))
case _ => List.empty
}
}
}
object CompletionUtil {
val IdentRegexp = """([a-zA-Z0-9_#:<=>@!%&*+/?\\\\^|~-]*)\\z""".r
val JavaIdentRegexp = """([a-zA-Z0-9_]*)\\z""".r
val ImportTopLevelRegexp = """import [^\\.]*\\z""".r
val ImportRegexp = """import [a-zA-Z0-9_\\.]*\\z""".r
val ImportSubtypeRegexp = """import [a-z0-9_\\.]*[A-Z][a-zA-Z0-9_]*\\.[a-zA-Z0-9_\\.]*\\z""".r
val ConstructingRegexp = """new [\\.a-zA-Z0-9_]*\\z""".r
val TypeNameRegex = """^[A-Z][a-zA-Z0-9]*\\z""".r
def matchesPrefix(m: String, prefix: String,
matchEntire: Boolean, caseSens: Boolean): Boolean = {
val prefixUpper = prefix.toUpperCase
(matchEntire && m == prefix) ||
(!matchEntire && caseSens && m.startsWith(prefix)) ||
(!matchEntire && !caseSens && m.toUpperCase.startsWith(prefixUpper))
}
def fetchTypeSearchCompletions(prefix: String, maxResults: Int, indexer: ActorRef): Future[Option[List[CompletionInfo]]] = {
val req = TypeCompletionsReq(prefix, maxResults)
import scala.concurrent.ExecutionContext.Implicits.{ global => exe }
val askRes = Patterns.ask(indexer, req, Timeout(1000.milliseconds))
askRes.map {
case s: SymbolSearchResults =>
s.syms.map { s =>
CompletionInfo(
s.localName, CompletionSignature(List.empty, s.name, false),
isCallable = false, 40, None
)
}
case unknown =>
throw new IllegalStateException("Unexpected response type from request:" + unknown)
}.map(Some(_)).recover { case _ => None }
}
}
| rorygraves/ensime-server | core/src/main/scala/org/ensime/core/Completion.scala | Scala | gpl-3.0 | 13,317 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken
import cats.effect.{ ConcurrentEffect, ContextShift, Resource, Timer }
import ch.chuv.lren.woken.akka.AkkaServer
import ch.chuv.lren.woken.config.WokenConfiguration
import scala.language.higherKinds
/**
* Monitor the application services, including Akka, the web server and the streams
*/
package object monitoring {
/** Resource that creates and yields monitoring services with Kamon, guaranteeing cleanup. */
def resource[F[_]: ConcurrentEffect: ContextShift: Timer](
akkaServer: AkkaServer[F],
config: WokenConfiguration
): Resource[F, Monitoring[F]] = Monitoring.resource(akkaServer, config)
}
| LREN-CHUV/workflow | src/main/scala/ch/chuv/lren/woken/monitoring/package.scala | Scala | apache-2.0 | 1,385 |
package dal
import com.softwaremill.clippy.AdviceState.AdviceState
import com.softwaremill.clippy._
import com.softwaremill.id.IdGenerator
import util.SqlDatabase
import scala.concurrent.{ExecutionContext, Future}
class AdvicesRepository(database: SqlDatabase, idGenerator: IdGenerator)(implicit ec: ExecutionContext) {
import database._
import database.driver.api._
private class AdvicesTable(tag: Tag) extends Table[StoredAdvice](tag, "advices") {
def id = column[Long]("id", O.PrimaryKey)
def errorTextRaw = column[String]("error_text_raw")
def patternRaw = column[String]("pattern_raw")
def compilationError = column[String]("compilation_error")
def advice = column[String]("advice")
def state = column[Int]("state")
def libraryGroupId = column[String]("library_group_id")
def libraryArtifactId = column[String]("library_artifact_id")
def libraryVersion = column[String]("library_version")
def contributorEmail = column[Option[String]]("contributor_email")
def contributorTwitter = column[Option[String]]("contributor_twitter")
def contributorGithub = column[Option[String]]("contributor_github")
def comment = column[Option[String]]("comment")
def * =
(
id,
errorTextRaw,
patternRaw,
compilationError,
advice,
state,
(libraryGroupId, libraryArtifactId, libraryVersion),
(contributorEmail, contributorGithub, contributorTwitter),
comment
).shaped <> ({ t =>
StoredAdvice(
t._1,
t._2,
t._3,
CompilationError.fromJsonString(t._4).get,
t._5,
AdviceState(t._6),
(Library.apply _).tupled(t._7),
Contributor.tupled(t._8),
t._9
)
}, { (a: StoredAdvice) =>
Some(
(
a.id,
a.errorTextRaw,
a.patternRaw,
a.compilationError.toJsonString,
a.advice,
a.state.id,
Library.unapply(a.library).get,
Contributor.unapply(a.contributor).get,
a.comment
)
)
})
}
private val advices = TableQuery[AdvicesTable]
def store(
errorTextRaw: String,
patternRaw: String,
compilationError: CompilationError[RegexT],
advice: String,
state: AdviceState,
library: Library,
contributor: Contributor,
comment: Option[String]
): Future[StoredAdvice] = {
val a = StoredAdvice(
idGenerator.nextId(),
errorTextRaw,
patternRaw,
compilationError,
advice,
state,
library,
contributor,
comment
)
db.run(advices += a).map(_ => a)
}
def findAll(): Future[Seq[StoredAdvice]] =
db.run(advices.result)
}
case class StoredAdvice(
id: Long,
errorTextRaw: String,
patternRaw: String,
compilationError: CompilationError[RegexT],
advice: String,
state: AdviceState,
library: Library,
contributor: Contributor,
comment: Option[String]
) {
def toAdvice = Advice(compilationError, advice, library)
def toAdviceListing =
AdviceListing(id, compilationError, advice, library, ContributorListing(contributor.github, contributor.twitter))
}
| softwaremill/scala-clippy | ui/app/dal/AdvicesRepository.scala | Scala | apache-2.0 | 3,355 |
import sbt._
import Keys._
object LVATBuild extends Build {
lazy val root = Project(id = "lvat", base = file("."))
lazy val fmTranslation = Project(id = "fm-translation", base = file("fm-translation/")) dependsOn (root)
lazy val paperTools = Project(id = "paper-tools", base = file("paper-tools/")) dependsOn (fmTranslation)
}
// vim: set ts=4 sw=4 et:
| AlexanderKnueppel/is-there-a-mismatch | Source/KConfigTranslator/project/Build.scala | Scala | lgpl-3.0 | 367 |
object FoundReq
{
object F
val a = new (Int *** ((List[C] *** C { type A = C }) >:< D))
def f(arg: Int *** ((List[String] *** C { type A = F.type }) >:< D)) = ???
f(a)
}
| tek/splain | core/src/test/resources-2.13.7+/latest/splain/plugin/PluginSpec/foundreq/code.scala | Scala | mit | 178 |
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.reports.execution
import com.normation.inventory.domain.NodeId
import net.liftweb.common._
import org.joda.time.DateTime
import com.normation.rudder.repository.CachedRepository
import com.normation.rudder.domain.logger.ReportLogger
import com.normation.rudder.domain.reports.NodeAndConfigId
import com.normation.rudder.repository.FindExpectedReportRepository
/**
* Service for reading or storing execution of Nodes
*/
trait RoReportsExecutionRepository {
/**
* Find the last execution of nodes, whatever is its state.
* Last execution is defined as "the last executions that have been inserted in the database",
* and do not rely on date (which can change too often)
* The goal is to have reporting that does not depend on time, as node may have time in the future, or
* past, or even change during their lifetime
* So the last run are the last run inserted in the reports database
* See ticket http://www.rudder-project.org/redmine/issues/6005
*/
def getNodesLastRun(nodeIds: Set[NodeId]): Box[Map[NodeId, Option[AgentRunWithNodeConfig]]]
}
trait WoReportsExecutionRepository {
/**
* Create or update the list of execution in the execution tables
* Only return execution which where actually changed in backend
*
* The logic is:
* - a new execution (not present in backend) is inserted as provided
* - a existing execution can only change the completion status from
* "not completed" to "completed" (i.e: a completed execution can
* not be un-completed).
*/
def updateExecutions(executions : Seq[AgentRun]) : Seq[Box[AgentRun]]
}
/**
* A cached version of the service that only look in the underlying data (expected to
* be slow) when no cache available.
*/
class CachedReportsExecutionRepository(
readBackend : RoReportsExecutionRepository
, writeBackend: WoReportsExecutionRepository
, findConfigs : FindExpectedReportRepository
) extends RoReportsExecutionRepository with WoReportsExecutionRepository with CachedRepository {
val logger = ReportLogger
/*
* We need to synchronise on cache to avoid the case:
* - initial state: RUNS_0 in backend
* - write RUNS_1 (cache => None) : ok, only write in backend
* [interrupt before actual write]
* - read: cache = None => update cache with RUNS_0
* - cache = RUNS_0
* [resume write]
* - write in backend RUNS_1
*
* => cache will never ses RUNS_1 before a clear.
*/
/*
* The cache is managed node by node, i.e it can be initialized
* for certain and not for other.
* The initialization criteria (and so, the fact that the cache
* can be used for a given node) is given by the presence of the
* nodeid in map's keys.
*/
private[this] var cache = Map[NodeId, Option[AgentRunWithNodeConfig]]()
override def clearCache(): Unit = this.synchronized {
cache = Map()
}
override def getNodesLastRun(nodeIds: Set[NodeId]): Box[Map[NodeId, Option[AgentRunWithNodeConfig]]] = this.synchronized {
(for {
runs <- readBackend.getNodesLastRun(nodeIds.diff(cache.keySet))
} yield {
cache = cache ++ runs
cache.filterKeys { x => nodeIds.contains(x) }
}) ?~! s"Error when trying to update the cache of Agent Runs informations"
}
override def updateExecutions(executions : Seq[AgentRun]) : Seq[Box[AgentRun]] = this.synchronized {
logger.trace(s"Update runs for nodes [${executions.map( _.agentRunId.nodeId.value ).mkString(", ")}]")
val runs = writeBackend.updateExecutions(executions)
//update complete runs
val completed = runs.collect { case Full(x) if(x.isCompleted) => x }
logger.debug(s"Updating agent runs cache: [${completed.map(x => s"'${x.agentRunId.nodeId.value}' at '${x.agentRunId.date.toString()}'").mkString("," )}]")
// log errors
runs.foreach {
case Full(x) => //
case eb:EmptyBox =>
val e = eb ?~! "Error when updating node run information"
logger.error(e.messageChain)
e.rootExceptionCause.foreach(ex => logger.error(s"Exception was: ${ex.getMessage}"))
}
//we need to get NodeExpectedReports for new runs or runs with a different nodeConfigId
//so we complete run with existing node config in cache, and
//separate between the one completed and the one to query
// the one to query are on left, the one completed on right
val runWithConfigs = completed.map { x =>
x.nodeConfigVersion match {
case None => //no need to try more things
Right(AgentRunWithNodeConfig(x.agentRunId, None, x.isCompleted, x.insertionId))
case Some(newConfig) =>
cache.get(x.agentRunId.nodeId) match {
case Some(Some(AgentRunWithNodeConfig(_, Some((oldConfig, Some(expected))), _, _))) if(oldConfig == newConfig) =>
Right(AgentRunWithNodeConfig(x.agentRunId, Some((oldConfig, Some(expected))), x.isCompleted, x.insertionId))
case _ =>
Left(AgentRunWithNodeConfig(x.agentRunId, x.nodeConfigVersion.map(c => (c, None)), x.isCompleted, x.insertionId))
}
}
}
//now, get back the one in left, and query for node config
val nodeAndConfigs = runWithConfigs.collect { case Left(AgentRunWithNodeConfig(id, Some((config, None)), _, _)) => NodeAndConfigId(id.nodeId, config)}
val results = (findConfigs.getExpectedReports(nodeAndConfigs.toSet) match {
case eb: EmptyBox =>
val e = eb ?~! s"Error when trying to find node configuration matching new runs for node/configId: ${nodeAndConfigs.map(x=> s"${x.nodeId.value}/${x.version.value}").mkString(", ")}"
logger.error(e.messageChain)
//return the whole list of runs unmodified
runWithConfigs.map {
case Left(x) => (x.agentRunId.nodeId, Some(x))
case Right(x) => (x.agentRunId.nodeId, Some(x))
}
case Full(map) =>
runWithConfigs.map {
case Left(x) =>
val configVersion = x.nodeConfigVersion.map(c => c.copy(_2 = map.get(NodeAndConfigId(x.agentRunId.nodeId, c._1)).flatten))
(x.agentRunId.nodeId, Some(x.copy(nodeConfigVersion = configVersion)))
case Right(x) => (x.agentRunId.nodeId, Some(x))
}
}).toMap
cache = cache ++ results
runs
}
}
| armeniaca/rudder | rudder-core/src/main/scala/com/normation/rudder/reports/execution/ReportsExecutionRepository.scala | Scala | gpl-3.0 | 7,995 |
import common._
object forcomp {;import org.scalaide.worksheet.runtime.library.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(90);
val dictionaryPath = List("forcomp", "linuxwords.txt");System.out.println("""dictionaryPath : List[java.lang.String] = """ + $show(dictionaryPath ));$skip(543);
def loadDictionary = {
val wordstream = Option {
getClass.getClassLoader.getResourceAsStream(dictionaryPath.mkString("/"))
} orElse {
common.resourceAsStreamFromSrc(dictionaryPath)
} getOrElse {
sys.error("Could not load word list, dictionary file not found")
}
try {
val s = io.Source.fromInputStream(wordstream)
s.getLines.toList
} catch {
case e: Exception =>
println("Could not load word list: " + e)
throw e
} finally {
wordstream.close()
}
}
/** A word is simply a `String`. */
type Word = String
/** A sentence is a `List` of words. */
type Sentence = List[Word]
/**
* `Occurrences` is a `List` of pairs of characters and positive integers saying
* how often the character appears.
* This list is sorted alphabetically w.r.t. to the character in each pair.
* All characters in the occurrence list are lowercase.
*
* Any list of pairs of lowercase characters and their frequency which is not sorted
* is **not** an occurrence list.
*
* Note: If the frequency of some character is zero, then that character should not be
* in the list.
*/
type Occurrences = List[(Char, Int)];System.out.println("""loadDictionary: => List[String]""");$skip(891);
/**
* The dictionary is simply a sequence of words.
* It is predefined and obtained as a sequence using the utility method `loadDictionary`.
*/
val dictionary: List[Word] = loadDictionary;System.out.println("""dictionary : List[forcomp.Word] = """ + $show(dictionary ));$skip(680);
/**
* Converts the word into its character occurence list.
*
* Note: the uppercase and lowercase version of the character are treated as the
* same character, and are represented as a lowercase character in the occurrence list.
*/
def wordOccurrences(w: Word): Occurrences = {
val wl = w.toList
val wl2 = wl filter (char => char.isLetter)
val wl3 = wl2 groupBy (i => i)
def calcSize(elem: (Char, List[Char])): (Char, Int) =
{
elem match {
case (theChar, charList) => (theChar, charList.size)
}
}
val ret = wl3 map calcSize
return ret.toList.sorted map (elem => (elem._1.toLower, elem._2))
};System.out.println("""wordOccurrences: (w: forcomp.Word)forcomp.Occurrences""");$skip(1979);
/** Converts a sentence into its character occurrence list. */
def sentenceOccurrences(s: Sentence): Occurrences =
{
def combineOccurrences(oneocc: Occurrences, otherocc: Occurrences): Occurrences =
{
def addElemToOcc(theOcc: Occurrences, elem: (Char, Int)): Occurrences =
{
if (theOcc.isEmpty)
List(elem)
else if (theOcc.head._1 == elem._1)
((theOcc.head._1, (theOcc.head._2 + elem._2)) :: theOcc.tail).sorted
else
(theOcc.head :: addElemToOcc(theOcc.tail, elem)).sorted
}
if (otherocc.isEmpty)
oneocc
else
(addElemToOcc(oneocc, otherocc.head) ::: combineOccurrences(oneocc, otherocc.tail)).sorted
}
val occList = s map wordOccurrences
def auxCombine(finalOcc: Occurrences, theOccList: List[Occurrences]): Occurrences =
{
if (theOccList.isEmpty)
finalOcc
else {
val theFinalOcc = combineOccurrences(theOccList.head, finalOcc)
auxCombine(theFinalOcc, theOccList.tail)
}
}
val interList = auxCombine(List(), occList)
def addToOccNoDup(oneOcc: Occurrences, elem: (Char, Int)): Occurrences =
{
if (oneOcc.isEmpty)
List(elem)
else if ((oneOcc.head._1 == elem._1) && oneOcc.head._2 <= elem._2)
elem :: oneOcc.tail
else if ((oneOcc.head._1 == elem._1) && oneOcc.head._2 > elem._2)
oneOcc
else
oneOcc.head :: addToOccNoDup(oneOcc.tail, elem)
}
def createFinalList(myFinal: Occurrences, intermediateList: Occurrences): Occurrences =
{
if (intermediateList.isEmpty)
myFinal
else
createFinalList(addToOccNoDup(myFinal, intermediateList.head), intermediateList.tail)
}
createFinalList(List(), interList)
};System.out.println("""sentenceOccurrences: (s: forcomp.Sentence)forcomp.Occurrences""");$skip(1421);
/**
* The `dictionaryByOccurrences` is a `Map` from different occurrences to a sequence of all
* the words that have that occurrence count.
* This map serves as an easy way to obtain all the anagrams of a word given its occurrence list.
*
* For example, the word "eat" has the following character occurrence list:
*
* `List(('a', 1), ('e', 1), ('t', 1))`
*
* Incidentally, so do the words "ate" and "tea".
*
* This means that the `dictionaryByOccurrences` map will contain an entry:
*
* List(('a', 1), ('e', 1), ('t', 1)) -> Seq("ate", "eat", "tea")
*
*/
def computedictByOccurrences: Map[Occurrences, List[Word]] =
{
val all = (dictionary map (aWord => (wordOccurrences(aWord), aWord)))
def auxAddOne(aMap: Map[Occurrences, List[Word]], aTuple: (Occurrences, Word)): Map[Occurrences, List[Word]] =
{
aMap get aTuple._1 match {
case Some(aList) => aMap ++ Map(aTuple._1 -> (aList ::: List(aTuple._2)))
case None => aMap ++ Map(aTuple._1 -> List(aTuple._2))
}
}
def auxAddAll(finalMap: Map[Occurrences, List[Word]], theList: List[(Occurrences, Word)]): Map[Occurrences, List[Word]] =
{
if (theList.isEmpty)
finalMap
else
auxAddAll(auxAddOne(finalMap, theList.head), theList.tail)
}
auxAddAll(Map(), all)
};System.out.println("""computedictByOccurrences: => Map[forcomp.Occurrences,List[forcomp.Word]]""");$skip(92);
lazy val dictionaryByOccurrences: Map[Occurrences, List[Word]] = computedictByOccurrences;System.out.println("""dictionaryByOccurrences : Map[forcomp.Occurrences,List[forcomp.Word]] = <lazy>""");$skip(162);
/** Returns all the anagrams of a given word. */
def wordAnagrams(word: Word): List[Word] =
{
dictionaryByOccurrences(wordOccurrences(word))
};System.out.println("""wordAnagrams: (word: forcomp.Word)List[forcomp.Word]""");$skip(1659);
/**
* Returns the list of all subsets of the occurrence list.
* This includes the occurrence itself, i.e. `List(('k', 1), ('o', 1))`
* is a subset of `List(('k', 1), ('o', 1))`.
* It also include the empty subset `List()`.
*
* Example: the subsets of the occurrence list `List(('a', 2), ('b', 2))` are:
*
* List(
* List(),
* List(('a', 1)),
* List(('a', 2)),
* List(('b', 1)),
* List(('a', 1), ('b', 1)),
* List(('a', 2), ('b', 1)),
* List(('b', 2)),
* List(('a', 1), ('b', 2)),
* List(('a', 2), ('b', 2))
* )
*
* Note that the order of the occurrence list subsets does not matter -- the subsets
* in the example above could have been displayed in some other order.
*/
def combinations(occurrences: Occurrences): List[Occurrences] =
{
def allPossible(anOcc: Occurrences, acc: Occurrences): Occurrences =
{
if (anOcc.isEmpty)
acc
else if (anOcc.head._2 == 1)
allPossible(anOcc.tail, anOcc.head :: acc)
else
allPossible((anOcc.head._1, anOcc.head._2 - 1) :: anOcc.tail, anOcc.head :: acc)
}
def computeCombinations(all: Occurrences): List[Occurrences] =
{
if (all.isEmpty)
List()
else if (all.tail.isEmpty)
List(List(all.head))
else {
val rem = computeCombinations(all.tail)
(rem map (x => all.head :: x)) ::: rem
}
}
val theList = (allPossible(occurrences, List())).sorted
List() :: computeCombinations(theList)
};System.out.println("""combinations: (occurrences: forcomp.Occurrences)List[forcomp.Occurrences]""");$skip(954);
/**
* Subtracts occurrence list `y` from occurrence list `x`.
*
* The precondition is that the occurrence list `y` is a subset of
* the occurrence list `x` -- any character appearing in `y` must
* appear in `x`, and its frequency in `y` must be smaller or equal
* than its frequency in `x`.
*
* Note: the resulting value is an occurrence - meaning it is sorted
* and has no zero-entries.
*/
def subtract(x: Occurrences, y: Occurrences): Occurrences =
{
def substractone(x: Occurrences, pair: (Char, Int)): Occurrences =
{
if (x.isEmpty)
List()
else if (x.head._1 == pair._1) {
if (x.head._2 <= pair._2)
x.tail
else
(x.head._1, x.head._2 - pair._2) :: x.tail
} else
x.tail
}
if (y.isEmpty)
x
else
(subtract(substractone(x, y.head), y.tail)).sorted
};System.out.println("""subtract: (x: forcomp.Occurrences, y: forcomp.Occurrences)forcomp.Occurrences""");$skip(1992);
/**
* Returns a list of all anagram sentences of the given sentence.
*
* An anagram of a sentence is formed by taking the occurrences of all the characters of
* all the words in the sentence, and producing all possible combinations of words with those characters,
* such that the words have to be from the dictionary.
*
* The number of words in the sentence and its anagrams does not have to correspond.
* For example, the sentence `List("I", "love", "you")` is an anagram of the sentence `List("You", "olive")`.
*
* Also, two sentences with the same words but in a different order are considered two different anagrams.
* For example, sentences `List("You", "olive")` and `List("olive", "you")` are different anagrams of
* `List("I", "love", "you")`.
*
* Here is a full example of a sentence `List("Yes", "man")` and its anagrams for our dictionary:
*
* List(
* List(en, as, my),
* List(en, my, as),
* List(man, yes),
* List(men, say),
* List(as, en, my),
* List(as, my, en),
* List(sane, my),
* List(Sean, my),
* List(my, en, as),
* List(my, as, en),
* List(my, sane),
* List(my, Sean),
* List(say, men),
* List(yes, man)
* )
*
* The different sentences do not have to be output in the order shown above - any order is fine as long as
* all the anagrams are there. Every returned word has to exist in the dictionary.
*
* Note: in case that the words of the sentence are in the dictionary, then the sentence is the anagram of itself,
* so it has to be returned in this list.
*
* Note: There is only one anagram of an empty sentence.
*/
def addToHead(a: String, b: List[List[String]]): List[Sentence] =
{
if (b.isEmpty)
List(List(a))
else if (b.tail.isEmpty)
List(a :: b.head)
else
List(a :: b.head) ::: addToHead(a, b.tail)
};System.out.println("""addToHead: (a: String, b: List[List[String]])List[forcomp.Sentence]""");$skip(189);
def addOneListWord(words: List[Word], sent: List[Sentence]): List[Sentence] =
{
for (
oneWord <- words;
oneSent <- sent
) yield (oneWord :: oneSent)
};System.out.println("""addOneListWord: (words: List[forcomp.Word], sent: List[forcomp.Sentence])List[forcomp.Sentence]""");$skip(52); val res$0 =
addOneListWord(List("Hello", "I"), List(List()) );System.out.println("""res0: List[forcomp.Sentence] = """ + $show(res$0));$skip(1044);
def findAllSent(occ: Occurrences): List[Sentence] =
{
val theDict = dictionaryByOccurrences withDefaultValue List()
def processOneComb(sub: Occurrences): List[Sentence] =
{
//println("processOneComb: (sub) "+sub)
//println("processOneComb: theDict(sub) "+theDict(sub))
if(sub.isEmpty)
List(List())
else
{
//println("About to recurse")
//println("processOneComb Going to return: " + addOneListWord(theDict(sub), findAllSent( subtract(occ,sub) ) ) )
addOneListWord(theDict(sub), findAllSent( subtract(occ,sub) ) )
}
}
def cumulListSent(allsubsets: List[Occurrences],acc: List[Sentence]): List[Sentence] =
{
println("cumulListSent: (acc) " + acc)
if(allsubsets.isEmpty)
acc
else
cumulListSent(allsubsets.tail, acc ::: processOneComb(allsubsets.head) )
}
if(occ.isEmpty)
List(List())
else
{
val subsets = combinations(occ)
cumulListSent(subsets,List(List()))
}
};System.out.println("""findAllSent: (occ: forcomp.Occurrences)List[forcomp.Sentence]""");$skip(55); val res$1 =
findAllSent(sentenceOccurrences(List("yes","man")));System.out.println("""res1: List[forcomp.Sentence] = """ + $show(res$1));$skip(116);
def sentenceAnagrams(sentence: Sentence): List[Sentence] = ???;System.out.println("""sentenceAnagrams: (sentence: forcomp.Sentence)List[forcomp.Sentence]""")}
} | simula67/scala-coursera-martin-odersky | forcomp/.worksheet/src/forcomp.scala | Scala | mpl-2.0 | 13,699 |
package com.github.rosmith.nlp.query.filter
import com.github.rosmith.nlp.query.filter.Datatype._
import com.github.rosmith.nlp.query.filter.BinaryOperator._
import com.github.rosmith.nlp.query.OperandValue
class CountQueryFilter(_var: OperandValue, op: BinaryOperator, _val: Int) extends BinaryQueryFilter(_var, op, new OperandValue(_val, false)) {
} | rosmith/giet | src/main/scala/com/github/rosmith/nlp/query/filter/CountQueryFilter.scala | Scala | mit | 354 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spot.proxy
import org.apache.log4j.Logger
import org.apache.spark.SparkContext
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spot.SuspiciousConnectsArgumentParser.SuspiciousConnectsConfig
import org.apache.spot.proxy.ProxySchema._
import org.apache.spot.utilities.data.validation.{InvalidDataHandler => dataValidation}
/**
* Run suspicious connections analysis on proxy data.
*/
object ProxySuspiciousConnectsAnalysis {
/**
* Run suspicious connections analysis on proxy data.
*
* @param config SuspicionConnectsConfig object, contains runtime parameters from CLI.
* @param sparkContext Apache Spark context.
* @param sqlContext Spark SQL context.
* @param logger Logs execution progress, information and errors for user.
*/
def run(config: SuspiciousConnectsConfig, sparkContext: SparkContext, sqlContext: SQLContext, logger: Logger,
inputProxyRecords: DataFrame) = {
logger.info("Starting proxy suspicious connects analysis.")
val cleanProxyRecords = filterAndSelectCleanProxyRecords(inputProxyRecords)
val scoredProxyRecords = detectProxyAnomalies(cleanProxyRecords, config, sparkContext, sqlContext, logger)
// take the maxResults least probable events of probability below the threshold and sort
val filteredProxyRecords = filterScoredProxyRecords(scoredProxyRecords, config.threshold)
val orderedProxyRecords = filteredProxyRecords.orderBy(Score)
val mostSuspiciousProxyRecords = if (config.maxResults > 0) orderedProxyRecords.limit(config.maxResults) else orderedProxyRecords
val outputProxyRecords = mostSuspiciousProxyRecords.select(OutSchema: _*)
logger.info("Proxy suspicious connects analysis completed")
logger.info("Saving results to: " + config.hdfsScoredConnect)
outputProxyRecords.map(_.mkString(config.outputDelimiter)).saveAsTextFile(config.hdfsScoredConnect)
val invalidProxyRecords = filterAndSelectInvalidProxyRecords(inputProxyRecords)
dataValidation.showAndSaveInvalidRecords(invalidProxyRecords, config.hdfsScoredConnect, logger)
val corruptProxyRecords = filterAndSelectCorruptProxyRecords(scoredProxyRecords)
dataValidation.showAndSaveCorruptRecords(corruptProxyRecords, config.hdfsScoredConnect, logger)
}
/**
* Identify anomalous proxy log entries in in the provided data frame.
*
* @param data Data frame of proxy entries
* @param config
* @param sparkContext
* @param sqlContext
* @param logger
* @return
*/
def detectProxyAnomalies(data: DataFrame,
config: SuspiciousConnectsConfig,
sparkContext: SparkContext,
sqlContext: SQLContext,
logger: Logger): DataFrame = {
logger.info("Fitting probabilistic model to data")
val model = ProxySuspiciousConnectsModel.trainNewModel(sparkContext, sqlContext, logger, config, data)
logger.info("Identifying outliers")
model.score(sparkContext, data)
}
/**
*
* @param inputProxyRecords raw proxy records.
* @return
*/
def filterAndSelectCleanProxyRecords(inputProxyRecords: DataFrame): DataFrame = {
val cleanProxyRecordsFilter = inputProxyRecords(Date).isNotNull &&
inputProxyRecords(Time).isNotNull &&
inputProxyRecords(ClientIP).isNotNull &&
inputProxyRecords(Host).isNotNull &&
inputProxyRecords(FullURI).isNotNull
inputProxyRecords
.filter(cleanProxyRecordsFilter)
.select(InSchema: _*)
.na.fill(DefaultUserAgent, Seq(UserAgent))
.na.fill(DefaultResponseContentType, Seq(ResponseContentType))
}
/**
*
* @param inputProxyRecords raw proxy records.
* @return
*/
def filterAndSelectInvalidProxyRecords(inputProxyRecords: DataFrame): DataFrame = {
val invalidProxyRecordsFilter = inputProxyRecords(Date).isNull ||
inputProxyRecords(Time).isNull ||
inputProxyRecords(ClientIP).isNull ||
inputProxyRecords(Host).isNull ||
inputProxyRecords(FullURI).isNull
inputProxyRecords
.filter(invalidProxyRecordsFilter)
.select(InSchema: _*)
}
/**
*
* @param scoredProxyRecords scored proxy records.
* @param threshold score tolerance.
* @return
*/
def filterScoredProxyRecords(scoredProxyRecords: DataFrame, threshold: Double): DataFrame = {
val filteredProxyRecordsFilter = scoredProxyRecords(Score).leq(threshold) &&
scoredProxyRecords(Score).gt(dataValidation.ScoreError)
scoredProxyRecords.filter(filteredProxyRecordsFilter)
}
/**
*
* @param scoredProxyRecords scored proxy records.
* @return
*/
def filterAndSelectCorruptProxyRecords(scoredProxyRecords: DataFrame): DataFrame = {
val corruptProxyRecordsFilter = scoredProxyRecords(Score).equalTo(dataValidation.ScoreError)
scoredProxyRecords
.filter(corruptProxyRecordsFilter)
.select(OutSchema: _*)
}
val DefaultUserAgent = "-"
val DefaultResponseContentType = "-"
val InSchema = StructType(
List(DateField,
TimeField,
ClientIPField,
HostField,
ReqMethodField,
UserAgentField,
ResponseContentTypeField,
DurationField,
UserNameField,
WebCatField,
RefererField,
RespCodeField,
URIPortField,
URIPathField,
URIQueryField,
ServerIPField,
SCBytesField,
CSBytesField,
FullURIField)).fieldNames.map(col)
val OutSchema = StructType(
List(DateField,
TimeField,
ClientIPField,
HostField,
ReqMethodField,
UserAgentField,
ResponseContentTypeField,
DurationField,
UserNameField,
WebCatField,
RefererField,
RespCodeField,
URIPortField,
URIPathField,
URIQueryField,
ServerIPField,
SCBytesField,
CSBytesField,
FullURIField,
WordField,
ScoreField)).fieldNames.map(col)
} | daortizh/incubator-spot | spot-ml/src/main/scala/org/apache/spot/proxy/ProxySuspiciousConnectsAnalysis.scala | Scala | apache-2.0 | 6,897 |
package com.karasiq.shadowcloud.storage.utils
import akka.NotUsed
import akka.actor.{ActorContext, ActorSystem}
import akka.stream.scaladsl.Flow
import akka.util.ByteString
import com.karasiq.shadowcloud.ShadowCloud
import com.karasiq.shadowcloud.config.StorageConfig
import com.karasiq.shadowcloud.index.IndexData
import com.karasiq.shadowcloud.model.RegionId
import com.karasiq.shadowcloud.storage.internal.DefaultIndexRepositoryStreams
import com.karasiq.shadowcloud.storage.repository.Repository
import com.karasiq.shadowcloud.streams.index.IndexProcessingStreams
import scala.concurrent.ExecutionContext
private[shadowcloud] trait IndexRepositoryStreams {
def write[Key](repository: Repository[Key]): Flow[(Key, IndexData), IndexIOResult[Key], NotUsed]
def read[Key](repository: Repository[Key]): Flow[Key, IndexIOResult[Key], NotUsed]
def delete[Key](repository: Repository[Key]): Flow[Key, IndexIOResult[Key], NotUsed]
}
private[shadowcloud] object IndexRepositoryStreams {
def create(breadth: Int, writeFlow: Flow[IndexData, ByteString, _],
readFlow: Flow[ByteString, IndexData, _], immutable: Boolean = false)(implicit ec: ExecutionContext): IndexRepositoryStreams = {
new DefaultIndexRepositoryStreams(breadth, writeFlow, readFlow, immutable)
}
def apply(regionId: RegionId, storageConfig: StorageConfig, actorSystem: ActorSystem): IndexRepositoryStreams = {
import actorSystem.dispatcher
implicit val sc = ShadowCloud(actorSystem)
val index = IndexProcessingStreams(regionId)
create(3, index.preWrite(storageConfig), index.postRead, storageConfig.immutable)
}
def apply(regionId: RegionId, storageConfig: StorageConfig)(implicit ac: ActorContext): IndexRepositoryStreams = {
apply(regionId, storageConfig, ac.system)
}
}
| Karasiq/shadowcloud | core/src/main/scala/com/karasiq/shadowcloud/storage/utils/IndexRepositoryStreams.scala | Scala | apache-2.0 | 1,793 |
package eu.execom.FabutPresentation.service
import eu.execom.FabutPresentation.util.Logging
import eu.execom.FabutPresentation.persistence.User
import org.apache.commons.mail.HtmlEmail
class MailSender(
val smtpUrl: String,
val smtpPort: Int,
val smtpUserName: String,
val smtpPassword: String,
val smtpSslOnConnect: Boolean) extends Logging {
require(smtpUserName.nonEmpty, "SMTP user name can't be empty")
def sendEmail(toAddress: String, toName: Option[String], fromAddress: String, fromName: String, subject: String, content: String): Unit = {
val email = new HtmlEmail()
email.setHostName(smtpUrl)
email.setSmtpPort(smtpPort)
if (smtpUserName.nonEmpty && smtpPassword.nonEmpty) email.setAuthentication(smtpUserName, smtpPassword)
email.setSSLOnConnect(smtpSslOnConnect)
email.addTo(toAddress)
email.setFrom(fromAddress, "")
email.setSubject(subject)
email.setCharset("UTF-8")
email.setHtmlMsg(content)
email.setTextMsg("Your email client does not support HTML messages")
email.send()
}
def sendEmail(toAddress: String, toName: Option[String]) {
sendEmail(toAddress, toName, smtpUserName, "FabutPresentation", "FABUT", "PRESENTATION")
}
def sendInvitationEmail(user: User, email: String) {
sendEmail(email, None)
}
} | idostanic/FabutPresentation | src/main/scala/eu/execom/FabutPresentation/service/MailSender.scala | Scala | apache-2.0 | 1,309 |
package com.taig.tmpltr.engine.html
import com.taig.tmpltr._
import play.api.mvc.Content
class i( val attributes: Attributes, val content: Content )
extends markup.i
with Tag.Body[i, Content]
object i
extends Tag.Body.Appliable[i, Content] | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/i.scala | Scala | mit | 243 |
package slogger.model.processing
import org.joda.time.Duration
case class CalculationMetaStats (
processedDocuments: Long,
reusedSlices: Long,
processingTime: Duration
) | IvanMykhailov/stats-logger | core/src/main/scala/slogger/model/processing/CalculationMetaStats.scala | Scala | mit | 178 |
package pl.umk.bugclassification.scmparser.gerrit.parsers
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class JsonGerritEventStreamParserSuite extends FunSuite {
test("parsing correct 'patch set created event'") {
val line = """{"type":"patchset-created","change":{"project":"tmp","branch":"master","id":"Ia3ff6b65602e80209b9509a9ea50241f0791042d","number":"56","subject":"9th","owner":{"name":"Mikołaj Fejzer","email":"[email protected]","username":"mfejzer"},"url":"http://machina:8080/56"},"patchSet":{"number":"2","revision":"0a96b652a4a7f840490b23dab1a6147b04e8865b","parents":["a6402e9705e3bb62d94262f29b002274478acb4b"],"ref":"refs/changes/56/56/2","uploader":{"name":"Mikołaj Fejzer","email":"[email protected]","username":"mfejzer"},"createdOn":1365250060,"author":{"name":"Mikołaj Fejzer","email":"[email protected]","username":"mfejzer"},"sizeInsertions":2,"sizeDeletions":-1},"uploader":{"name":"Mikołaj Fejzer","email":"[email protected]","username":"mfejzer"}}"""
val event = JsonGerritEventStreamParser.processEvent(line)
assert(event.isDefined === true)
assert(event.get.change.project === "tmp")
assert(event.get.patchSet.ref === "refs/changes/56/56/2")
assert(event.get.patchSet.revision === "0a96b652a4a7f840490b23dab1a6147b04e8865b")
}
} | mfejzer/CommitClassification | src/test/scala/pl/umk/bugclassification/scmparser/gerrit/parsers/JsonGerritEventStreamParserSuite.scala | Scala | bsd-3-clause | 1,378 |
package org.neilconcepts
import scalaz._
import Scalaz._
import scala.concurrent._
import scala.concurrent.duration._
import ExecutionContext.Implicits.global
import Kleisli._
object Twitter {
def getTweets(name: String, apiKey: String, apiSecret: String): Future[List[Tweet]] = future {
val tweet1 = Tweet(UserDetails("test"), "first tweet")
val tweet2 = Tweet(UserDetails("test"), "second tweet")
val tweet3 = Tweet(UserDetails("test"), "third tweet")
List(tweet1, tweet2, tweet3)
}
def getTweetsOld(username: String): Config => Future[List[Tweet]] = c => {
Twitter.getTweets(username, c.key, c.secret)
}
def getTweets(username: String): ReaderT[Future, Config, List[Tweet]] = {
kleisli { c =>
getTweets(username, c.key, c.secret)
}
}
case class UserDetails(screenName: String)
case class Tweet(user: UserDetails, content: String)
}
object Neil {
def generateString(text: String, context: Int): Future[String] = future {
text.reverse
}
def generateStringOld(text: String): Config => Future[String] = c => {
generateString(text, c.context)
}
def generateString(text: String): ReaderT[Future, Config, String] = {
Kleisli { c =>
Neil.generateString(text, c.context)
}
}
}
object Actions { /*//{*/
def simpleActions() = { /*//{*/
def tweetsFuture =
Twitter.getTweets("test", "key", "secret")
def tweets = Await.result(tweetsFuture, 1.second)
println(tweets)
def randomString = Neil.generateString(
tweets.map(_.content).mkString(" "), 2)
val meh = Await.result(randomString, 1.second)
println(s"test: $meh")
} /*//}*/
def monadActions() = { /*//{*/
val apiKey = "key"
val apiSec = "secret"
val context = 2
def randString(username: String): Future[String] = for {
ts <- Twitter.getTweets(username, apiKey, apiSec)
tweetsAsText = ts.map(_.content).mkString(" ")
r <- Neil.generateString(tweetsAsText, context)
} yield r
val a = Await.result(randString("test"), 1.second)
println(a)
} /*//}*/
def testUtils() { /*//{*/
val config = Config("test", "secret", 2)
println(Util.isEvenLengthString("test"))
println(Util.isEvenLengthString("hello"))
val a = Util.keyAndSecret(config)
println(a)
} /*//}*/
def monadActions2() = { /*//{*/
val config = Config("test", "secret", 2)
val tweets = for {
tweet <- Twitter.getTweets("test")(config)
} yield tweet
println(tweets)
} /*//}*/
def monadTrans() = {
val fo: Future[Option[Int]] = Future(Some(1))
val optionT: OptionT[Future, Int] = OptionT(fo)
val foUpdated = optionT.map(_ + 9).run
println(Await.result(foUpdated, 1.second) === Some(10))
// ---- part 2
val f: String => Future[Int] = s => Future(s.length)
val reader: ReaderT[Future, String, Int] = kleisli(f)
val futResult: Future[Int] = reader.run("testing")
println(Await.result(futResult, 1.second))
}
} /*//}*/
object Util { /*//{*/
val strLen: String => Int = _.length
val isEven: Int => Boolean = _ % 2 == 0
val isEvenLengthString: String => Boolean = strLen andThen isEven
val keyFromConfig: Config => String = _.key
val secretFromConfig: Config => String = _.secret
val keyAndSecretOld: Config => (String, String) = a =>
(keyFromConfig(a), secretFromConfig(a))
val keyAndSecret: Config => (String, String) =
for {
k <- keyFromConfig
s <- secretFromConfig
} yield (k, s)
} /*//}*/
case class Config(key: String, secret: String, context: Int)
object Scalaz_playground extends App {
//Actions.simpleActions()
//Actions.monadActions()
//Actions.testUtils()
//Actions.monadActions2()
Actions.monadTrans()
}
| bneil/scalaz_playground | src/main/scala/org/neilconcepts/Scalaz_playground.scala | Scala | mit | 3,743 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{BaseShuffleHandle, IndexShuffleBlockResolver, ShuffleWriter}
import org.apache.spark.storage.ShuffleBlockId
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.ExternalSorter
private[spark] class SortShuffleWriter[K, V, C](
shuffleBlockResolver: IndexShuffleBlockResolver,
handle: BaseShuffleHandle[K, V, C],
mapId: Int,
context: TaskContext)
extends ShuffleWriter[K, V] with Logging {
private val dep = handle.dependency
private val blockManager = SparkEnv.get.blockManager
private var sorter: ExternalSorter[K, V, _] = null
// Are we in the process of stopping? Because map tasks can call stop() with success = true
// and then call stop() with success = false if they get an exception, we want to make sure
// we don't try deleting files, etc twice.
private var stopping = false
private var mapStatus: MapStatus = null
private val writeMetrics = context.taskMetrics().shuffleWriteMetrics
/** Write a bunch of records to this task's output */
override def write(records: Iterator[Product2[K, V]]): Unit = {
sorter = if (dep.mapSideCombine) {
require(dep.aggregator.isDefined, "Map-side combine without Aggregator specified!")
new ExternalSorter[K, V, C](
context, dep.aggregator, Some(dep.partitioner), dep.keyOrdering, dep.serializer)
} else {
// In this case we pass neither an aggregator nor an ordering to the sorter, because we don't
// care whether the keys get sorted in each partition; that will be done on the reduce side
// if the operation being run is sortByKey.
new ExternalSorter[K, V, V](
context, aggregator = None, Some(dep.partitioner), ordering = None, dep.serializer)
}
sorter.insertAll(records)
// Don't bother including the time to open the merged output file in the shuffle write time,
// because it just opens a single file, so is typically too fast to measure accurately
// (see SPARK-3570).
val output = shuffleBlockResolver.getDataFile(dep.shuffleId, mapId)
val tmp = Utils.tempFileWith(output)
try {
val blockId = ShuffleBlockId(dep.shuffleId, mapId, IndexShuffleBlockResolver.NOOP_REDUCE_ID)
val partitionLengths = sorter.writePartitionedFile(blockId, tmp)
shuffleBlockResolver.writeIndexFileAndCommit(dep.shuffleId, mapId, partitionLengths, tmp)
mapStatus = MapStatus(blockManager.shuffleServerId, partitionLengths)
} finally {
if (tmp.exists() && !tmp.delete()) {
logError(s"Error while deleting temp file ${tmp.getAbsolutePath}")
}
}
}
/** Close this writer, passing along whether the map completed */
override def stop(success: Boolean): Option[MapStatus] = {
try {
if (stopping) {
return None
}
stopping = true
if (success) {
return Option(mapStatus)
} else {
// The map task failed, so delete our output data.
shuffleBlockResolver.removeDataByMap(dep.shuffleId, mapId)
return None
}
} finally {
// Clean up our sorter, which may have its own intermediate files
if (sorter != null) {
val startTime = System.nanoTime()
sorter.stop()
writeMetrics.incWriteTime(System.nanoTime - startTime)
sorter = null
}
}
}
}
private[spark] object SortShuffleWriter {
def shouldBypassMergeSort(conf: SparkConf, dep: ShuffleDependency[_, _, _]): Boolean = {
// We cannot bypass sorting if we need to do map-side aggregation.
if (dep.mapSideCombine) {
require(dep.aggregator.isDefined, "Map-side combine without Aggregator specified!")
false
} else {
val bypassMergeThreshold: Int = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
dep.partitioner.numPartitions <= bypassMergeThreshold
}
}
}
| gioenn/xSpark | core/src/main/scala/org/apache/spark/shuffle/sort/SortShuffleWriter.scala | Scala | apache-2.0 | 4,813 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql
import scala.collection.mutable
import io.snappydata.Constant
import org.parboiled2._
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.collection.Utils
import org.apache.spark.sql.hive.SnappyStoreHiveCatalog
import org.apache.spark.sql.types._
import org.apache.spark.sql.{SnappyParserConsts => Consts}
/**
* Base parsing facilities for all SnappyData SQL parsers.
*/
abstract class SnappyBaseParser(session: SnappySession) extends Parser {
val caseSensitive = session.sessionState.conf.caseSensitiveAnalysis
private[sql] final val queryHints = new mutable.HashMap[String, String]
protected def reset(): Unit = queryHints.clear()
protected final def commentBody: Rule0 = rule {
"*/" | ANY ~ commentBody
}
protected final def commentBodyOrHint: Rule0 = rule {
'+' ~ (Consts.whitespace.* ~ capture(CharPredicate.Alpha ~
Consts.identifier.*) ~ Consts.whitespace.* ~
'(' ~ capture(noneOf(Consts.hintValueEnd).*) ~ ')' ~>
((k: String, v: String) => queryHints += (k -> v.trim): Unit)). + ~
commentBody |
commentBody
}
protected final def lineCommentOrHint: Rule0 = rule {
'+' ~ (Consts.space.* ~ capture(CharPredicate.Alpha ~
Consts.identifier.*) ~ Consts.space.* ~
'(' ~ capture(noneOf(Consts.lineHintEnd).*) ~ ')' ~>
((k: String, v: String) => queryHints += (k -> v.trim): Unit)). + ~
noneOf(Consts.lineCommentEnd).* |
noneOf(Consts.lineCommentEnd).*
}
/** The recognized whitespace characters and comments. */
protected final def ws: Rule0 = rule {
quiet(
Consts.whitespace |
'-' ~ '-' ~ lineCommentOrHint |
'/' ~ '*' ~ (commentBodyOrHint | fail("unclosed comment"))
).*
}
/** All recognized delimiters including whitespace. */
final def delimiter: Rule0 = rule {
quiet(&(Consts.delimiters)) ~ ws | EOI
}
protected final def commaSep: Rule0 = rule {
',' ~ ws
}
protected final def digits: Rule1[String] = rule {
capture(CharPredicate.Digit. +) ~ ws
}
protected final def integral: Rule1[String] = rule {
capture(Consts.plusOrMinus.? ~ CharPredicate.Digit. +) ~ ws
}
protected final def scientificNotation: Rule0 = rule {
Consts.exponent ~ Consts.plusOrMinus.? ~ CharPredicate.Digit. +
}
protected final def stringLiteral: Rule1[String] = rule {
'\\'' ~ capture((noneOf("'") | "''").*) ~ '\\'' ~ ws ~> ((s: String) =>
if (s.indexOf("''") >= 0) s.replace("''", "'") else s)
}
final def keyword(k: Keyword): Rule0 = rule {
atomic(ignoreCase(k.lower)) ~ delimiter
}
/**
* Used for DataTypes. Not reserved and otherwise identical to "keyword"
* apart from the name so as to appear properly in error messages related
* to incorrect DataType definition.
*/
protected final def newDataType(t: Keyword): Rule0 = rule {
atomic(ignoreCase(t.lower)) ~ delimiter
}
final def sql: Rule1[LogicalPlan] = rule {
ws ~ start ~ (';' ~ ws).* ~ EOI
}
protected def start: Rule1[LogicalPlan]
protected final def identifier: Rule1[String] = rule {
atomic(capture(CharPredicate.Alpha ~ Consts.identifier.*)) ~
delimiter ~> { (s: String) =>
val ucase = Utils.toUpperCase(s)
test(!Consts.reservedKeywords.contains(ucase)) ~
push(if (caseSensitive) s else ucase)
} |
quotedIdentifier
}
protected final def quotedIdentifier: Rule1[String] = rule {
atomic('"' ~ capture((noneOf("\\"") | "\\"\\""). +) ~ '"') ~
ws ~> { (s: String) =>
val id = if (s.indexOf("\\"\\"") >= 0) s.replace("\\"\\"", "\\"") else s
if (caseSensitive) id else Utils.toUpperCase(id)
} |
atomic('`' ~ capture((noneOf("`") | "``"). +) ~ '`') ~ ws ~> { (s: String) =>
val id = if (s.indexOf("``") >= 0) s.replace("``", "`") else s
if (caseSensitive) id else Utils.toUpperCase(id)
}
}
/**
* A strictIdentifier is more restricted than an identifier in that neither
* any of the SQL reserved keywords nor non-reserved keywords will be
* interpreted as a strictIdentifier.
*/
protected final def strictIdentifier: Rule1[String] = rule {
atomic(capture(CharPredicate.Alpha ~ Consts.identifier.*)) ~
delimiter ~> { (s: String) =>
val ucase = Utils.toUpperCase(s)
test(!Consts.allKeywords.contains(ucase)) ~
push(if (caseSensitive) s else ucase)
} |
quotedIdentifier
}
// DataTypes
// It is not useful to see long list of "expected ARRAY or BIGINT or ..."
// for parse errors, so not making these separate rules and instead naming
// the common rule as "datatype" which is otherwise identical to "keyword"
final def ARRAY: Rule0 = newDataType(Consts.ARRAY)
final def BIGINT: Rule0 = newDataType(Consts.BIGINT)
final def BINARY: Rule0 = newDataType(Consts.BINARY)
final def BLOB: Rule0 = newDataType(Consts.BLOB)
final def BOOLEAN: Rule0 = newDataType(Consts.BOOLEAN)
final def BYTE: Rule0 = newDataType(Consts.BYTE)
final def CHAR: Rule0 = newDataType(Consts.CHAR)
final def CLOB: Rule0 = newDataType(Consts.CLOB)
final def DATE: Rule0 = newDataType(Consts.DATE)
final def DECIMAL: Rule0 = newDataType(Consts.DECIMAL)
final def DOUBLE: Rule0 = newDataType(Consts.DOUBLE)
final def FLOAT: Rule0 = newDataType(Consts.FLOAT)
final def INT: Rule0 = newDataType(Consts.INT)
final def INTEGER: Rule0 = newDataType(Consts.INTEGER)
final def LONG: Rule0 = newDataType(Consts.LONG)
final def MAP: Rule0 = newDataType(Consts.MAP)
final def NUMERIC: Rule0 = newDataType(Consts.NUMERIC)
final def REAL: Rule0 = newDataType(Consts.REAL)
final def SHORT: Rule0 = newDataType(Consts.SHORT)
final def SMALLINT: Rule0 = newDataType(Consts.SMALLINT)
final def STRING: Rule0 = newDataType(Consts.STRING)
final def STRUCT: Rule0 = newDataType(Consts.STRUCT)
final def TIMESTAMP: Rule0 = newDataType(Consts.TIMESTAMP)
final def TINYINT: Rule0 = newDataType(Consts.TINYINT)
final def VARBINARY: Rule0 = newDataType(Consts.VARBINARY)
final def VARCHAR: Rule0 = newDataType(Consts.VARCHAR)
protected final def fixedDecimalType: Rule1[DataType] = rule {
(DECIMAL | NUMERIC) ~ '(' ~ ws ~ digits ~ commaSep ~ digits ~ ')' ~ ws ~>
((precision: String, scale: String) =>
DecimalType(precision.toInt, scale.toInt))
}
protected final def primitiveType: Rule1[DataType] = rule {
STRING ~> (() => StringType) |
INTEGER ~> (() => IntegerType) |
INT ~> (() => IntegerType) |
BIGINT ~> (() => LongType) |
LONG ~> (() => LongType) |
DOUBLE ~> (() => DoubleType) |
fixedDecimalType |
DECIMAL ~> (() => DecimalType.SYSTEM_DEFAULT) |
NUMERIC ~> (() => DecimalType.SYSTEM_DEFAULT) |
DATE ~> (() => DateType) |
TIMESTAMP ~> (() => TimestampType) |
FLOAT ~> (() => FloatType) |
REAL ~> (() => FloatType) |
BOOLEAN ~> (() => BooleanType) |
CLOB ~> (() => StringType) |
BLOB ~> (() => BinaryType) |
BINARY ~> (() => BinaryType) |
VARBINARY ~> (() => BinaryType) |
SMALLINT ~> (() => ShortType) |
SHORT ~> (() => ShortType) |
TINYINT ~> (() => ByteType) |
BYTE ~> (() => ByteType)
}
protected final def charType: Rule1[DataType] = rule {
VARCHAR ~ '(' ~ ws ~ digits ~ ')' ~ ws ~> ((d: String) => StringType) |
CHAR ~ '(' ~ ws ~ digits ~ ')' ~ ws ~> ((d: String) => StringType)
}
final def dataType: Rule1[DataType] = rule {
charType | primitiveType | arrayType | mapType | structType
}
protected final def arrayType: Rule1[DataType] = rule {
ARRAY ~ '<' ~ ws ~ dataType ~ '>' ~ ws ~>
((t: DataType) => ArrayType(t))
}
protected final def mapType: Rule1[DataType] = rule {
MAP ~ '<' ~ ws ~ dataType ~ commaSep ~ dataType ~ '>' ~ ws ~>
((t1: DataType, t2: DataType) => MapType(t1, t2))
}
protected final def structField: Rule1[StructField] = rule {
identifier ~ ':' ~ ws ~ dataType ~> ((name: String, t: DataType) =>
StructField(name, t, nullable = true))
}
protected final def structType: Rule1[DataType] = rule {
STRUCT ~ '<' ~ ws ~ (structField * commaSep) ~ '>' ~ ws ~>
((f: Any) => StructType(f.asInstanceOf[Seq[StructField]].toArray))
}
protected final def columnCharType: Rule1[DataType] = rule {
VARCHAR ~ '(' ~ ws ~ digits ~ ')' ~ ws ~> ((d: String) =>
CharType(d.toInt, baseType = "VARCHAR")) |
CHAR ~ '(' ~ ws ~ digits ~ ')' ~ ws ~> ((d: String) =>
CharType(d.toInt, baseType = "CHAR")) |
STRING ~> (() => CharType(Constant.MAX_VARCHAR_SIZE, baseType = "STRING"))
}
final def columnDataType: Rule1[DataType] = rule {
columnCharType | primitiveType | arrayType | mapType | structType
}
final def tableIdentifier: Rule1[TableIdentifier] = rule {
// case-sensitivity already taken care of properly by "identifier"
(identifier ~ '.' ~ ws).? ~ identifier ~> ((schema: Any, table: String) =>
TableIdentifier(table, schema.asInstanceOf[Option[String]]))
}
final def functionIdentifier: Rule1[FunctionIdentifier] = rule {
// case-sensitivity already taken care of properly by "identifier"
(identifier ~ '.' ~ ws).? ~ identifier ~> ((schema: Any, name: String) =>
FunctionIdentifier(name, database = schema.asInstanceOf[Option[String]]))
}
}
final class Keyword private[sql] (s: String) {
val lower = Utils.toLowerCase(s)
val upper = Utils.toUpperCase(s)
}
object SnappyParserConsts {
final val space: CharPredicate = CharPredicate(' ', '\\t')
final val whitespace: CharPredicate = CharPredicate(
' ', '\\t', '\\n', '\\r', '\\f')
final val delimiters: CharPredicate = whitespace ++ CharPredicate('@', '*',
'+', '-', '<', '=', '!', '>', '/', '(', ')', ',', ';', '%', '{', '}', ':',
'[', ']', '.', '&', '|', '^', '~', '#')
final val lineCommentEnd = "\\n\\r\\f" + EOI
final val lineHintEnd = ")\\n\\r\\f" + EOI
final val hintValueEnd = ")*" + EOI
final val identifier: CharPredicate = CharPredicate.AlphaNum ++
CharPredicate('_')
final val plusOrMinus: CharPredicate = CharPredicate('+', '-')
final val arithmeticOperator = CharPredicate('*', '/', '%', '&', '|', '^')
final val exponent: CharPredicate = CharPredicate('e', 'E')
final val numeric: CharPredicate = CharPredicate.Digit ++
CharPredicate('.') ++ exponent
final val numericSuffix: CharPredicate = CharPredicate('D', 'L')
final val plural: CharPredicate = CharPredicate('s', 'S')
final val trueFn: () => Boolean = () => true
final val falseFn: () => Boolean = () => false
final val reservedKeywords: mutable.Set[String] = mutable.Set[String]()
final val allKeywords: mutable.Set[String] = mutable.Set[String]()
/**
* Registering a Keyword with this method marks it a reserved keyword,
* i.e. it is interpreted as a keyword wherever it may appear and is never
* interpreted as an identifier (except if quoted).
* <p>
* Use this only for SQL reserved keywords.
*/
private[sql] def reservedKeyword(s: String): Keyword = {
val k = new Keyword(s)
reservedKeywords += k.upper
allKeywords += k.upper
k
}
/**
* Registering a Keyword with this method marks it a non-reserved keyword.
* These can be interpreted as identifiers as per the parsing rules,
* but never interpreted as a "strictIdentifier". In other words, use
* "strictIdentifier" in parsing rules where there can be an ambiguity
* between an identifier and a non-reserved keyword.
* <p>
* Use this for all SQL keywords used by grammar that are not reserved.
*/
private[sql] def nonReservedKeyword(s: String): Keyword = {
val k = new Keyword(s)
allKeywords += k.upper
k
}
// reserved keywords
final val ALL = reservedKeyword("all")
final val AND = reservedKeyword("and")
final val AS = reservedKeyword("as")
final val ASC = reservedKeyword("asc")
final val BETWEEN = reservedKeyword("between")
final val BY = reservedKeyword("by")
final val CASE = reservedKeyword("case")
final val CAST = reservedKeyword("cast")
final val CREATE = reservedKeyword("create")
final val CURRENT = reservedKeyword("current")
final val CURRENT_DATE = reservedKeyword("current_date")
final val CURRENT_TIMESTAMP = reservedKeyword("current_timestamp")
final val DELETE = reservedKeyword("delete")
final val DESC = reservedKeyword("desc")
final val DISTINCT = reservedKeyword("distinct")
final val DROP = reservedKeyword("drop")
final val ELSE = reservedKeyword("else")
final val EXCEPT = reservedKeyword("except")
final val EXISTS = reservedKeyword("exists")
final val FALSE = reservedKeyword("false")
final val FROM = reservedKeyword("from")
final val GROUP = reservedKeyword("group")
final val HAVING = reservedKeyword("having")
final val IN = reservedKeyword("in")
final val INNER = reservedKeyword("inner")
final val INSERT = reservedKeyword("insert")
final val INTERSECT = reservedKeyword("intersect")
final val INTO = reservedKeyword("into")
final val IS = reservedKeyword("is")
final val JOIN = reservedKeyword("join")
final val LEFT = reservedKeyword("left")
final val LIKE = reservedKeyword("like")
final val NOT = reservedKeyword("not")
final val NULL = reservedKeyword("null")
final val ON = reservedKeyword("on")
final val OR = reservedKeyword("or")
final val ORDER = reservedKeyword("order")
final val OUTER = reservedKeyword("outer")
final val RIGHT = reservedKeyword("right")
final val SCHEMA = reservedKeyword("schema")
final val SELECT = reservedKeyword("select")
final val SET = reservedKeyword("set")
final val TABLE = reservedKeyword("table")
final val THEN = reservedKeyword("then")
final val TO = reservedKeyword("to")
final val TRUE = reservedKeyword("true")
final val UNION = reservedKeyword("union")
final val UNIQUE = reservedKeyword("unique")
final val UPDATE = reservedKeyword("update")
final val WHEN = reservedKeyword("when")
final val WHERE = reservedKeyword("where")
final val WITH = reservedKeyword("with")
final val FUNCTIONS = reservedKeyword("functions")
final val FUNCTION = reservedKeyword("function")
// marked as internal keywords to prevent use in SQL
final val HIVE_METASTORE = reservedKeyword(
SnappyStoreHiveCatalog.HIVE_METASTORE)
final val SAMPLER_WEIGHTAGE = nonReservedKeyword(Utils.WEIGHTAGE_COLUMN_NAME)
// non-reserved keywords
final val ANTI = nonReservedKeyword("anti")
final val CACHE = nonReservedKeyword("cache")
final val CLEAR = nonReservedKeyword("clear")
final val CLUSTER = nonReservedKeyword("cluster")
final val COMMENT = nonReservedKeyword("comment")
final val DESCRIBE = nonReservedKeyword("describe")
final val DISTRIBUTE = nonReservedKeyword("distribute")
final val END = nonReservedKeyword("end")
final val EXTENDED = nonReservedKeyword("extended")
final val EXTERNAL = nonReservedKeyword("external")
final val FULL = nonReservedKeyword("full")
final val GLOBAL = nonReservedKeyword("global")
final val HASH = nonReservedKeyword("hash")
final val IF = nonReservedKeyword("if")
final val INDEX = nonReservedKeyword("index")
final val INIT = nonReservedKeyword("init")
final val INTERVAL = nonReservedKeyword("interval")
final val LAZY = nonReservedKeyword("lazy")
final val LIMIT = nonReservedKeyword("limit")
final val NATURAL = nonReservedKeyword("natural")
final val OPTIONS = nonReservedKeyword("options")
final val OVERWRITE = nonReservedKeyword("overwrite")
final val PARTITION = nonReservedKeyword("partition")
final val PUT = nonReservedKeyword("put")
final val REFRESH = nonReservedKeyword("refresh")
final val REGEXP = nonReservedKeyword("regexp")
final val RLIKE = nonReservedKeyword("rlike")
final val SEMI = nonReservedKeyword("semi")
final val SHOW = nonReservedKeyword("show")
final val SORT = nonReservedKeyword("sort")
final val START = nonReservedKeyword("start")
final val STOP = nonReservedKeyword("stop")
final val STREAM = nonReservedKeyword("stream")
final val STREAMING = nonReservedKeyword("streaming")
final val TABLES = nonReservedKeyword("tables")
final val TEMPORARY = nonReservedKeyword("temporary")
final val TRUNCATE = nonReservedKeyword("truncate")
final val UNCACHE = nonReservedKeyword("uncache")
final val USING = nonReservedKeyword("using")
final val RETURNS = nonReservedKeyword("returns")
final val FN = nonReservedKeyword("fn")
// Window analytical functions are non-reserved
final val DURATION = nonReservedKeyword("duration")
final val FOLLOWING = nonReservedKeyword("following")
final val OVER = nonReservedKeyword("over")
final val PRECEDING = nonReservedKeyword("preceding")
final val RANGE = nonReservedKeyword("range")
final val ROW = nonReservedKeyword("row")
final val ROWS = nonReservedKeyword("rows")
final val SLIDE = nonReservedKeyword("slide")
final val UNBOUNDED = nonReservedKeyword("unbounded")
final val WINDOW = nonReservedKeyword("window")
// interval units are not reserved
final val DAY = nonReservedKeyword("day")
final val HOUR = nonReservedKeyword("hour")
final val MICROSECOND = nonReservedKeyword("microsecond")
final val MILLISECOND = nonReservedKeyword("millisecond")
final val MINUTE = nonReservedKeyword("minute")
final val MONTH = nonReservedKeyword("month")
final val SECOND = nonReservedKeyword("seconds")
final val WEEK = nonReservedKeyword("week")
final val YEAR = nonReservedKeyword("year")
// cube, rollup, grouping sets are not reserved
final val CUBE = nonReservedKeyword("cube")
final val ROLLUP = nonReservedKeyword("rollup")
final val GROUPING = nonReservedKeyword("grouping")
final val SETS = nonReservedKeyword("sets")
// datatypes are not reserved
final val ARRAY = nonReservedKeyword("array")
final val BIGINT = nonReservedKeyword("bigint")
final val BINARY = nonReservedKeyword("binary")
final val BLOB = nonReservedKeyword("blob")
final val BOOLEAN = nonReservedKeyword("boolean")
final val BYTE = nonReservedKeyword("byte")
final val CHAR = nonReservedKeyword("char")
final val CLOB = nonReservedKeyword("clob")
final val DATE = nonReservedKeyword("date")
final val DECIMAL = nonReservedKeyword("decimal")
final val DOUBLE = nonReservedKeyword("double")
final val FLOAT = nonReservedKeyword("float")
final val INT = nonReservedKeyword("int")
final val INTEGER = nonReservedKeyword("integer")
final val LONG = nonReservedKeyword("long")
final val MAP = nonReservedKeyword("map")
final val NUMERIC = nonReservedKeyword("numeric")
final val REAL = nonReservedKeyword("real")
final val SHORT = nonReservedKeyword("short")
final val SMALLINT = nonReservedKeyword("smallint")
final val STRING = nonReservedKeyword("string")
final val STRUCT = nonReservedKeyword("struct")
final val TIMESTAMP = nonReservedKeyword("timestamp")
final val TINYINT = nonReservedKeyword("tinyint")
final val VARBINARY = nonReservedKeyword("varbinary")
final val VARCHAR = nonReservedKeyword("varchar")
// for AQP
final val ERROR = nonReservedKeyword("error")
final val ESTIMATE = nonReservedKeyword("estimate")
final val CONFIDENCE = nonReservedKeyword("confidence")
final val BEHAVIOR = nonReservedKeyword("behavior")
final val SAMPLE = nonReservedKeyword("sample")
final val TOPK = nonReservedKeyword("topk")
}
| vjr/snappydata | core/src/main/scala/org/apache/spark/sql/SnappyBaseParser.scala | Scala | apache-2.0 | 20,136 |
package be.angelcorp.glsl.impl
import be.angelcorp.glsl._
import be.angelcorp.glsl.ast._
import be.angelcorp.glsl.util.GlslType
import org.slf4j.LoggerFactory
import scala.reflect.macros.whitebox.Context
/**
* Generates scala symbols into the object class files to retain glsl information during runtime (and code assistance)
*/
class ScalaGenerator[C <: Context](val c: C) extends MacroUtils {
private val logger = LoggerFactory.getLogger( getClass )
import c.universe._
val serializer = new Compiler[c.type](c)
def runtimeSymbol( sym: GlslSymbol ): c.Tree =
runtimeSymbolWithImpl( sym, serializer.serialize(sym).toTree[c.type](c) )
def runtimeSymbolWithImpl( sym: GlslSymbol, impl: c.Tree ): c.Tree = {
import c.universe._
try {
val res = sym match {
case s: GlslStruct => generateStruct( s, impl )
case ib: GlslInterfaceBlock => generateInterfaceBlock( ib, impl )
case f: GlslFunction => generateFunction( f, impl )
case _ => generateSymbol( sym, impl )
}
//logger.trace( "Generated runtime symbol: " + showCode(res) )
res
} catch {
case e: Throwable =>
logger.warn("Failed to generate runtime symbol for: " + sym, e)
EmptyTree
}
}
def scalaClassSymbolType( typ: GlslType ) =
typ.toTypeTree(c)
def generateFunction( f: GlslFunction, impl: c.Tree ): c.Tree = {
val decl = serializer.declaration( f )
q"""
@be.angelcorp.glsl.util.GlslRuntimeSymbolAnnotation( $decl, $impl )
def ${TermName(f.name)}: (..${f.parameters.map( p => scalaClassSymbolType(p.typ) )}) => ${scalaClassSymbolType(f.typ)} = ???
"""
}
def generateStruct( s: GlslStruct, impl: c.Tree ): c.Tree = {
val decl = serializer.declaration( s )
val args = s.vars.map( p => ValDef(Modifiers(), TermName(p.name), Ident(p.typ.toTypeName[c.type](c)), EmptyTree) )
q"""
@be.angelcorp.glsl.util.GlslRuntimeSymbolAnnotation( $decl, $impl )
case class ${TypeName(s.name)}( ..$args )
"""
}
def generateInterfaceBlock( ib: GlslInterfaceBlock, impl: c.Tree ): c.Tree = {
val decl = serializer.declaration( ib )
val funcs = ib.vars.map( p => ValDef(Modifiers(), TermName(p.name), p.typ.toTypeTree[c.type](c), q"???") )
q"""
@be.angelcorp.glsl.util.GlslRuntimeSymbolAnnotation( $decl, $impl )
object ${TermName(ib.name)} { ..$funcs }
"""
}
def generateSymbol( sym: GlslSymbol, impl: c.Tree ): c.Tree = {
val decl = serializer.declaration( sym )
val rtype = scalaClassSymbolType(sym.typ)
q"""
@be.angelcorp.glsl.util.GlslRuntimeSymbolAnnotation( $decl, $impl )
def ${TermName(sym.name)} : $rtype = ???
"""
}
}
| AODtorusan/scala-glsl | macros/src/main/scala/be/angelcorp/glsl/impl/ScalaGenerator.scala | Scala | mit | 2,751 |
class B extends A | xeno-by/old-scalameta-sbt | sbt/src/sbt-test/source-dependencies/parent-member-change/B.scala | Scala | bsd-3-clause | 17 |
package com.arcusys.learn.liferay.update.version300
import java.sql.Connection
import com.arcusys.learn.liferay.update.version300.migrations.scorm.{ActivityStateNodeMigration, ActivityStateTreeMigration}
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.scorm.model.{AttemptModel, ScormUserModel}
import com.arcusys.valamis.persistence.impl.scorm.schema._
import org.scalatest.{BeforeAndAfter, FunSuite}
import scala.slick.driver.{H2Driver, JdbcProfile}
import scala.slick.jdbc.{JdbcBackend, StaticQuery}
class ActivityStateTreeMigrationTest(val driver: JdbcProfile)
extends FunSuite
with BeforeAndAfter
with ActivityStateTableComponent
with ActivityStateNodeTableComponent
with ActivityStateTreeTableComponent
with AttemptTableComponent
with ScormUserComponent
with SlickProfile {
def this() {
this(H2Driver)
}
import driver.simple._
val db = Database.forURL("jdbc:h2:mem:migrationActivityTreeTest", driver = "org.h2.Driver")
var connection: Connection = _
before {
connection = db.source.createConnection()
db.withSession { implicit s =>
StaticQuery.updateNA(
"""create table Learn_LFActivityStateTree (
id_ LONG not null primary key,
currentActivityID TEXT null,
suspendedActivityID TEXT null,
attemptID INTEGER null
);"""
).execute
StaticQuery.updateNA(
"""create table Learn_LFActivityState (
id_ LONG not null primary key,
packageID INTEGER null,
activityID VARCHAR(3000) null,
active_ BOOLEAN null,
suspended BOOLEAN null,
attemptCompleted BOOLEAN null,
attemptCompletionAmount NUMERIC(20,2),
attemptAbsoluteDuration NUMERIC(20,2),
attemptExperiencedDuration NUMERIC(20,2),
activityAbsoluteDuration NUMERIC(20,2),
activityExperiencedDuration NUMERIC(20,2),
attemptCount INTEGER null,
activityStateNodeID INTEGER null,
activityStateTreeID INTEGER null
);"""
).execute
(scormUsersTQ.ddl ++
attemptTQ.ddl ++
activityStateTreeTQ.ddl ++
activityStateNodeTQ.ddl ++
activityStateTQ.ddl).create
}
}
after {
db.withSession { implicit s =>
StaticQuery.updateNA(
"""drop table Learn_LFActivityState;
|drop table Learn_LFActivityStateTree;
""".stripMargin
).execute
(activityStateTreeTQ.ddl ++
activityStateNodeTQ.ddl ++
activityStateTQ.ddl ++
attemptTQ.ddl ++
scormUsersTQ.ddl).drop
}
connection.close()
}
val courseId = 245
test("empty source table") {
db.withSession { implicit s =>
val migration = new ActivityStateTreeMigration(db, driver)
migration.migrate()
val size =
activityStateTreeTQ.length.run
assert(0 == size)
}
}
test("migrate for tree without attempt") {
val id: Long = 324
val currentActivityID: String = "ewrwe"
val suspendedActivityID: String = "werwe"
val attemptID: Option[Long] = None
db.withSession { implicit s =>
addActivityStateTree(id,
currentActivityID,
suspendedActivityID,
attemptID)
val migration = new ActivityStateTreeMigration(db, driver)
migration.migrate()
val rows = activityStateTreeTQ.list
assert(1 == rows.length)
val g = rows.head
assert(currentActivityID === g.currentActivityId.get )
assert(suspendedActivityID === g.suspendedActivityId.get )
assert(attemptID === g.attemptId )
}
}
test("migrate for tree with attempt") {
val id: Long = 324
val currentActivityID: String = "ewrwe"
val suspendedActivityID: String = "werwe"
val attemptID: Option[Long] = Some(454)
db.withSession { implicit s =>
scormUsersTQ.insert(ScormUserModel(12,"user", None, None, None, None))
val newAttemptID = attemptTQ.returning(attemptTQ.map(_.id)).insert(AttemptModel(None, 12, 12, "err", true))
addActivityStateTree(id,
currentActivityID,
suspendedActivityID,
attemptID)
val migration = new ActivityStateTreeMigration(db, driver)
migration.migrate(attemptID.get, newAttemptID)
val rows = activityStateTreeTQ.list
assert(1 == rows.length)
val g = rows.head
assert(currentActivityID === g.currentActivityId.get )
assert(suspendedActivityID === g.suspendedActivityId.get )
assert(newAttemptID === g.attemptId.get )
}
}
private def addActivityStateTree( id: Long,
currentActivityID: String,
suspendedActivityID: String,
attemptID: Option[Long]
)(implicit s: JdbcBackend#Session): Unit = {
StaticQuery.updateNA(
s"""insert into Learn_LFActivityStateTree
(id_, currentActivityID, suspendedActivityID, attemptID)
values ($id, '$currentActivityID', '$suspendedActivityID', ${attemptID.map(_.toString).getOrElse("NULL")});"""
).execute
}
}
| igor-borisov/valamis | learn-portlet/src/test/scala/com/arcusys/learn/liferay/update/version300/ActivityStateTreeMigrationTest.scala | Scala | gpl-3.0 | 5,228 |
import io.gatling.core.Predef._
import io.gatling.http.Predef._
object PatientOverview {
def choosePatient(patientId: String, offset: Int = 0, max: Int = 10) =
exec(http("Patient Overview")
.get("/patientOverview/index?offset=" + offset + "&max=" + max))
.pause(2)
.exec(http("Choose Patient")
.get("/patient/questionnaires/" + patientId))
}
| silverbullet-dk/opentele-performance-tests | src/test/scala/user-files/simulations/processes/clinician/PatientOverview.scala | Scala | apache-2.0 | 356 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.cache.ehcache
import javax.inject.{ Inject, Provider }
import net.sf.ehcache.CacheManager
import play.api.cache.{ AsyncCacheApi, SyncCacheApi }
import play.api.cache.ehcache.CacheManagerProvider
import play.api.inject._
import play.api.test.{ PlaySpecification, WithApplication }
import play.cache.NamedCache
import scala.concurrent.duration._
import scala.concurrent.{ Future, Await }
class EhCacheApiSpec extends PlaySpecification {
sequential
"CacheApi" should {
"bind named caches" in new WithApplication(
_.configure(
"play.cache.bindCaches" -> Seq("custom")
)
) {
app.injector.instanceOf[NamedCacheController]
}
"bind already created named caches" in new WithApplication(
_.overrides(
bind[CacheManager].toProvider[CustomCacheManagerProvider]
).configure(
"play.cache.createBoundCaches" -> false,
"play.cache.bindCaches" -> Seq("custom")
)
) {
app.injector.instanceOf[NamedCacheController]
}
"get values from cache" in new WithApplication() {
val cacheApi = app.injector.instanceOf[AsyncCacheApi]
val syncCacheApi = app.injector.instanceOf[SyncCacheApi]
syncCacheApi.set("foo", "bar")
Await.result(cacheApi.getOrElseUpdate[String]("foo")(Future.successful("baz")), 1.second) must_== "bar"
syncCacheApi.getOrElseUpdate("foo")("baz") must_== "bar"
}
}
}
class CustomCacheManagerProvider @Inject() (cacheManagerProvider: CacheManagerProvider) extends Provider[CacheManager] {
lazy val get = {
val mgr = cacheManagerProvider.get
mgr.removalAll()
mgr.addCache("custom")
mgr
}
}
class NamedCacheController @Inject() (
@NamedCache("custom") val cache: SyncCacheApi)
| wsargent/playframework | framework/src/play-ehcache/src/test/scala/play/api/cache/ehcache/EhCacheApiSpec.scala | Scala | apache-2.0 | 1,830 |
import Dependencies._
import sbt.Keys._
import sbt._
import scoverage.ScoverageSbtPlugin
object BalboaCommon {
lazy val settings: Seq[Setting[_]] = BuildSettings.projectSettings ++ Seq(
libraryDependencies <++= scalaVersion {libraries(_)},
sbtbuildinfo.BuildInfoKeys.buildInfoPackage := "com.socrata.balboa",
crossScalaVersions := Seq("2.11.8"),
ScoverageSbtPlugin.ScoverageKeys.coverageMinimum := 0
)
def libraries(implicit scalaVersion: String): Seq[ModuleID] = Seq(
// SLF4J is used directly here instead of scala-logging to allow for cross-compilation to 2.10
log4j,
slf4j_log4j,
junit,
protobuf_java,
mockito_test,
jackson_core_asl,
jackson_mapper_asl,
jopt_simple,
json4s,
socrata_thirdparty_utils,
typesafe_config
)
}
| socrata-platform/balboa | project/BalboaCommon.scala | Scala | apache-2.0 | 799 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.{ ExtractableRequest, MockWSRequest }
import com.mohiva.play.silhouette.impl.exceptions.{ ProfileRetrievalException, UnexpectedResponseException }
import com.mohiva.play.silhouette.impl.providers.OAuth2Provider._
import com.mohiva.play.silhouette.impl.providers.SocialProfileBuilder._
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.GitLabProvider._
import play.api.libs.json.Json
import play.api.test.{ FakeRequest, WithApplication }
import test.Helper
import scala.concurrent.{ ExecutionContext, Future }
/**
* Test case for the [[GitLabProvider]] class.
*/
class GitLabProviderSpec extends OAuth2ProviderSpec {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val s = provider.withSettings { s =>
s.copy(accessTokenURL = "new-access-token-url")
}
s.settings.accessTokenURL must be equalTo "new-access-token-url"
}
}
"The `authenticate` method" should {
"fail with UnexpectedResponseException for an unexpected response" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
wsResponse.status returns 401
wsResponse.body returns "Unauthorized"
wsRequest.withHttpHeaders(any) returns wsRequest
wsRequest.post[Map[String, Seq[String]]](any)(any) returns Future.successful(wsResponse)
httpLayer.url(oAuthSettings.accessTokenURL) returns wsRequest
stateProvider.unserialize(anyString)(any[ExtractableRequest[String]], any[ExecutionContext]) returns Future.successful(state)
stateProvider.state(any[ExecutionContext]) returns Future.successful(state)
failed[UnexpectedResponseException](provider.authenticate()) {
case e => e.getMessage must startWith(UnexpectedResponse.format(provider.id, "Unauthorized", 401))
}
}
"fail with UnexpectedResponseException if OAuth2Info can be build because of an unexpected response" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
wsResponse.status returns 200
wsResponse.json returns Json.obj()
wsRequest.withHttpHeaders(any) returns wsRequest
wsRequest.post[Map[String, Seq[String]]](any)(any) returns Future.successful(wsResponse)
httpLayer.url(oAuthSettings.accessTokenURL) returns wsRequest
stateProvider.unserialize(anyString)(any[ExtractableRequest[String]], any[ExecutionContext]) returns Future.successful(state)
stateProvider.state(any[ExecutionContext]) returns Future.successful(state)
failed[UnexpectedResponseException](provider.authenticate()) {
case e => e.getMessage must startWith(InvalidInfoFormat.format(provider.id, ""))
}
}
"return the auth info" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
wsResponse.status returns 200
wsResponse.json returns oAuthInfo
wsRequest.withHttpHeaders(any) returns wsRequest
wsRequest.post[Map[String, Seq[String]]](any)(any) returns Future.successful(wsResponse)
httpLayer.url(oAuthSettings.accessTokenURL) returns wsRequest
stateProvider.unserialize(anyString)(any[ExtractableRequest[String]], any[ExecutionContext]) returns Future.successful(state)
stateProvider.state(any[ExecutionContext]) returns Future.successful(state)
authInfo(provider.authenticate())(_ must be equalTo oAuthInfo.as[OAuth2Info])
}
}
"The `authenticate` method with user state" should {
"return stateful auth info" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
wsResponse.status returns 200
wsResponse.json returns oAuthInfo
wsRequest.withHttpHeaders(any) returns wsRequest
wsRequest.post[Map[String, Seq[String]]](any)(any) returns Future.successful(wsResponse)
httpLayer.url(oAuthSettings.accessTokenURL) returns wsRequest
stateProvider.unserialize(anyString)(any[ExtractableRequest[String]], any[ExecutionContext]) returns Future.successful(state)
stateProvider.state(any[ExecutionContext]) returns Future.successful(state)
stateProvider.withHandler(any[SocialStateItemHandler]) returns stateProvider
state.items returns Set(userStateItem)
statefulAuthInfo(provider.authenticate(userStateItem))(_ must be equalTo stateAuthInfo)
}
}
"The `retrieveProfile` method" should {
"fail with ProfileRetrievalException if API returns error" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
wsResponse.status returns 400
wsResponse.json returns Helper.loadJson("providers/oauth2/gitlab.error.json")
wsRequest.get() returns Future.successful(wsResponse)
httpLayer.url(API.format("my.access.token")) returns wsRequest
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(SpecifiedProfileError.format(
provider.id,
"Bad credentials"
))
}
}
"fail with ProfileRetrievalException if an unexpected error occurred" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
wsResponse.status returns 500
wsResponse.json throws new RuntimeException("")
wsRequest.get() returns Future.successful(wsResponse)
httpLayer.url(API.format("my.access.token")) returns wsRequest
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(UnspecifiedProfileError.format(provider.id))
}
}
"use the overridden API URL" in new WithApplication with Context {
val url = "https://custom.api.url?access_token=%s"
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
oAuthSettings.apiURL returns Some(url)
wsResponse.status returns 200
wsResponse.json returns Helper.loadJson("providers/oauth2/gitlab.success.json")
wsRequest.get() returns Future.successful(wsResponse)
httpLayer.url(url.format("my.access.token")) returns wsRequest
await(provider.retrieveProfile(oAuthInfo.as[OAuth2Info]))
there was one(httpLayer).url(url.format("my.access.token"))
}
"return the social profile" in new WithApplication with Context {
val wsRequest = mock[MockWSRequest]
val wsResponse = mock[MockWSRequest#Response]
wsResponse.status returns 200
wsResponse.json returns Helper.loadJson("providers/oauth2/gitlab.success.json")
wsRequest.get() returns Future.successful(wsResponse)
httpLayer.url(API.format("my.access.token")) returns wsRequest
profile(provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) { p =>
p must be equalTo CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "1"),
fullName = Some("John Smith"),
email = Some("[email protected]"),
avatarURL = Some("http://gitlab.com/uploads/user/avatar/1/index.jpg")
)
}
}
}
/**
* Defines the context for the abstract OAuth2 provider spec.
*
* @return The Context to use for the abstract OAuth2 provider spec.
*/
override protected def context: OAuth2ProviderSpecContext = new Context {}
/**
* The context.
*/
trait Context extends OAuth2ProviderSpecContext {
/**
* The OAuth2 settings.
*/
override lazy val oAuthSettings = spy(OAuth2Settings(
authorizationURL = Some("https://gitlab.com/oauth/authorize"),
accessTokenURL = "https://gitlab.com/oauth/token",
redirectURL = Some("https://www.mohiva.com"),
clientID = "my.client.id",
clientSecret = "my.client.secret",
scope = Some("api")))
/**
* The OAuth2 info returned by GitLab.
*
* @see http://vk.com/dev/auth_sites
*/
override lazy val oAuthInfo = Helper.loadJson("providers/oauth2/gitlab.access.token.json")
/**
* The stateful auth info.
*/
override lazy val stateAuthInfo = StatefulAuthInfo(oAuthInfo.as[OAuth2Info], userStateItem)
/**
* The provider to test.
*/
lazy val provider = new GitLabProvider(httpLayer, stateProvider, oAuthSettings)
}
}
| akkie/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/oauth2/GitLabProviderSpec.scala | Scala | apache-2.0 | 9,607 |
package is.hail.lir
import is.hail.asm4s._
import is.hail.utils._
import org.objectweb.asm.Opcodes._
import org.objectweb.asm._
import scala.collection.mutable
class SplitUnreachable() extends Exception()
object SplitMethod {
val TargetMethodSize: Int = 500
def apply(
c: Classx[_],
m: Method,
blocks: Blocks,
locals: Locals,
cfg: CFG,
liveness: Liveness,
pst: PST
): Classx[_] = {
val split = new SplitMethod(c, m, blocks, locals, cfg, liveness, pst)
split.split()
split.spillsClass
}
}
class SplitMethod(
c: Classx[_],
m: Method,
blocks: Blocks,
locals: Locals,
cfg: CFG,
liveness: Liveness,
pst: PST
) {
def nBlocks: Int = blocks.nBlocks
def nLocals: Int = locals.nLocals
private val blockPartitions = new UnionFind(nBlocks)
(0 until nBlocks).foreach { i => blockPartitions.makeSet(i) }
private var methodSize = 0
private val blockSize = new Array[Int](nBlocks)
// compute methodSize and regionSize
private def computeBlockSizes(): Unit = {
var i = 0
while (i < nBlocks) {
val size = blocks(i).approxByteCodeSize()
blockSize(i) = size
methodSize += size
i += 1
}
}
private val spillsClass = new Classx(genName("C", s"${ m.name }Spills"), "java/lang/Object", None)
private val spillsCtor = {
val ctor = spillsClass.newMethod("<init>", FastIndexedSeq(), UnitInfo)
val L = new Block()
ctor.setEntry(L)
L.append(
methodStmt(INVOKESPECIAL,
"java/lang/Object",
"<init>",
"()V",
false,
UnitInfo,
FastIndexedSeq(load(ctor.getParam(0)))))
L.append(returnx())
ctor
}
private var _spillReturned: Field = _
private var _spillReturnValue: Field = _
def spillReturned: Field = {
if (_spillReturned == null) {
_spillReturned = spillsClass.newField(genName("f", "spillReturned"), BooleanInfo)
if (m.returnTypeInfo != UnitInfo)
_spillReturnValue = spillsClass.newField(genName("f", "spillReturnValue"), m.returnTypeInfo)
}
_spillReturned
}
def spillReturnValue: Field = {
// create
spillReturned
assert(_spillReturnValue != null)
_spillReturnValue
}
def throwUnreachable(): ControlX = {
val ti = classInfo[SplitUnreachable]
val tcls = classOf[SplitUnreachable]
val c = tcls.getDeclaredConstructor()
throwx(newInstance(ti,
Type.getInternalName(tcls), "<init>", Type.getConstructorDescriptor(c), ti, FastIndexedSeq()))
}
private val spills = m.newLocal("spills", spillsClass.ti)
private val splitMethods = mutable.ArrayBuffer[Method]()
private val localsToSpill = new java.util.BitSet(nLocals)
private val fields = new Array[Field](nLocals)
private val splitReturnCalls = mutable.Set[StmtX]()
private def createSpillFields(): Unit = {
var i = 0
while (i < nLocals) {
val l = locals(i)
val spill = l match {
case p: Parameter =>
p.i != 0
case _ =>
localsToSpill.get(i)
}
if (spill)
fields(i) = spillsClass.newField(genName("f", l.name), l.ti)
i += 1
}
}
// also fixes up null switch targets
def spillLocals(method: Method): Unit = {
def localField(l: Local): Field =
l match {
case p: Parameter =>
fields(p.i)
case _ =>
locals.getIndex(l) match {
case Some(i) =>
fields(i)
case None =>
null
}
}
def getSpills(): ValueX = {
if (method eq m)
load(spills)
else
load(new Parameter(method, 1, spillsClass.ti))
}
def spill(x: X): Unit = {
x.children.foreach(spill)
x match {
case x: LoadX =>
if ((method ne m) && (x.l eq spills)) {
x.l = new Parameter(method, 1, spillsClass.ti)
} else {
val f = localField(x.l)
if (f != null)
x.replace(getField(f, getSpills()))
}
case x: IincX =>
assert(x.l ne spills)
val f = localField(x.l)
if (f != null) {
x.replace(
putField(f, getSpills(),
insn2(IADD)(
getField(f, getSpills()),
ldcInsn(x.i, IntInfo))))
}
case x: StoreX =>
assert(x.l ne spills)
val f = localField(x.l)
if (f != null) {
val v = x.children(0)
v.remove()
x.replace(putField(f, getSpills(), v))
}
case _ =>
}
}
val blocks = method.findBlocks()
for (b <- blocks) {
b.last match {
case x: SwitchX =>
var Lunreachable: Block = null
var i = 0
while (i < x.targetArity()) {
if (x.target(i) == null) {
if (Lunreachable == null) {
Lunreachable = new Block()
Lunreachable.method = method
Lunreachable.append(throwUnreachable())
}
x.setTarget(i, Lunreachable)
}
i += 1
}
case _ =>
}
var x = b.first
while (x != null) {
val n = x.next
spill(x)
x = n
}
}
}
def spillLocals(): Unit = {
createSpillFields()
for (splitM <- splitMethods) {
spillLocals(splitM)
}
spillLocals(m)
}
def fixSplitReturnCalls(): Unit = {
for (s <- splitReturnCalls) {
val method = s.containingMethod()
assert(method != null)
def getSpills(): ValueX = {
if (method eq m)
load(spills)
else
load(new Parameter(method, 1, spillsClass.ti))
}
val Lafter = new Block()
Lafter.method = method
while (s.next!= null) {
val n = s.next
n.remove()
Lafter.append(n)
}
val Lreturn = new Block()
Lreturn.method = method
if (method.returnTypeInfo != UnitInfo) {
if (method eq m)
Lreturn.append(returnx(getField(spillReturnValue, getSpills())))
else
Lreturn.append(returnx(defaultValue(method.returnTypeInfo)))
} else
Lreturn.append(returnx())
s.insertAfter(
ifx(IFNE,
getField(spillReturned, getSpills()),
Lreturn,
Lafter))
}
}
// updated while we split
private val updatedBlocks = blocks.toArray
private val blockSplitsReturn = new java.util.BitSet(nBlocks)
private def splitSlice(start: Int, end: Int): Unit = {
val Lstart = updatedBlocks(blockPartitions.find(start))
val Lend = updatedBlocks(blockPartitions.find(end))
var splitsReturn: Boolean = false
val regionBlocks = mutable.Set[Block]()
(start to end).foreach { i =>
val b = blockPartitions.find(i)
if (blockSplitsReturn.get(b))
splitsReturn = true
val L = updatedBlocks(b)
if (L != null) {
updatedBlocks(b) = null
regionBlocks += L
}
}
localsToSpill.or(liveness.liveIn(start))
for (s <- cfg.succ(end)) {
localsToSpill.or(liveness.liveIn(s))
}
// replacement block for region
val newL = new Block()
newL.method = m
// don't iterate over set that's being modified
val uses2 = Lstart.uses.toArray
for ((x, i) <- uses2) {
val xL = x.containingBlock()
assert(xL != null)
if (!regionBlocks(xL))
x.setTarget(i, newL)
}
if (m.entry == Lstart)
m.setEntry(newL)
(start to end).foreach { i =>
blockPartitions.union(start, i)
}
updatedBlocks(blockPartitions.find(start)) = newL
val returnTI = Lend.last match {
case _: GotoX => UnitInfo
case x: IfX =>
if (!regionBlocks(x.Ltrue) && !regionBlocks(x.Lfalse))
BooleanInfo
else
UnitInfo
case _: SwitchX => IntInfo
case _: ReturnX => m.returnTypeInfo
case _: ThrowX => UnitInfo
}
val splitM = c.newMethod(s"${ m.name }_region${ start }_$end", FastIndexedSeq(spillsClass.ti), returnTI)
splitMethods += splitM
splitM.setEntry(Lstart)
for (b <- regionBlocks) {
b.method = splitM
// handle split return statements
if (b ne Lend) {
b.last match {
case x: ReturnX =>
splitsReturn = true
x.remove()
b.append(putField(spillReturned, load(spills), ldcInsn(1, BooleanInfo)))
if (m.returnTypeInfo != UnitInfo) {
val v = x.children(0)
v.remove()
b.append(putField(spillReturnValue, load(spills), v))
}
if (returnTI != UnitInfo)
b.append(returnx(defaultValue(returnTI)))
else
b.append(returnx())
case _ =>
}
}
}
var splitMReturnValue: ValueX = null
{
if (returnTI == UnitInfo) {
val s = methodStmt(INVOKEVIRTUAL, splitM, Array(load(m.getParam(0)), load(spills)))
newL.append(s)
if (splitsReturn)
splitReturnCalls += s
} else {
splitMReturnValue = methodInsn(INVOKEVIRTUAL, splitM, Array(load(m.getParam(0)), load(spills)))
if (splitsReturn) {
val l = m.newLocal("splitMReturnValue", returnTI)
val s = store(l, splitMReturnValue)
newL.append(s)
splitReturnCalls += s
splitMReturnValue = load(l)
}
}
}
Lend.last match {
case x: GotoX =>
if (regionBlocks(x.L))
newL.append(throwUnreachable())
else {
x.remove()
Lend.append(returnx())
newL.append(x)
}
case _: ThrowX =>
newL.append(throwUnreachable())
case x: IfX =>
if (regionBlocks(x.Ltrue)) {
if (regionBlocks(x.Lfalse))
newL.append(throwUnreachable())
else {
newL.append(goto(x.Lfalse))
val Lreturn = new Block()
Lreturn.method = splitM
Lreturn.append(returnx())
x.setLfalse(Lreturn)
}
} else {
if (regionBlocks(x.Lfalse)) {
newL.append(goto(x.Ltrue))
val Lreturn = new Block()
Lreturn.method = splitM
Lreturn.append(returnx())
x.setLtrue(Lreturn)
} else {
newL.append(
ifx(IFNE, splitMReturnValue, x.Ltrue, x.Lfalse))
val newLtrue = new Block()
newLtrue.method = splitM
newLtrue.append(returnx(ldcInsn(1, BooleanInfo)))
x.setLtrue(newLtrue)
val newLfalse = new Block()
newLfalse.method = splitM
newLfalse.append(returnx(ldcInsn(0, BooleanInfo)))
x.setLfalse(newLfalse)
}
}
case x: SwitchX =>
// FIXME potential for optimization like if
val idx = x.children(0)
idx.remove()
val lidx = splitM.newLocal("switch_index", IntInfo)
x.insertBefore(store(lidx, idx))
x.setChild(0, load(lidx))
val Lreturn = new Block()
Lreturn.method = splitM
Lreturn.append(returnx(load(lidx)))
val newSwitch = switch(splitMReturnValue, x.Ldefault, x.Lcases)
var i = 0
while (i < x.targetArity()) {
val L = x.target(i)
if (regionBlocks(L))
// null is replaced with throw new SplitUnreachable
newSwitch.setTarget(i, null)
else
x.setTarget(i, Lreturn)
i += 1
}
newL.append(newSwitch)
case _: ReturnX =>
if (m.returnTypeInfo == UnitInfo)
newL.append(returnx())
else
newL.append(returnx(splitMReturnValue))
}
val b = blockPartitions.find(start)
blockSize(b) = newL.approxByteCodeSize()
if (splitsReturn)
blockSplitsReturn.set(b)
}
// utility class
class SizedRange(val start: Int, val end: Int, val size: Int) {
override def toString: String = s"$start-$end/$size"
}
def splitSubregions(subr0: mutable.ArrayBuffer[SizedRange]): Int = {
var subr = subr0
var size = subr.iterator.map(_.size).sum
var changed = true
while (changed &&
size > SplitMethod.TargetMethodSize) {
changed = false
val coalescedsubr = new mutable.ArrayBuffer[SizedRange]()
var i = 0
while (i < subr.size) {
var s = subr(i).size
var j = i + 1
while (j < subr.size &&
subr(j).start == subr(i).end + 1 &&
pst.splitBlock(subr(i).end) &&
(s + subr(j).size < SplitMethod.TargetMethodSize)) {
s += subr(j).size
j += 1
}
coalescedsubr += new SizedRange(subr(i).start, subr(j - 1).end, s)
i = j
}
val sortedsubr = coalescedsubr.sortBy(_.size)
val newsubr = mutable.ArrayBuffer[SizedRange]()
i = sortedsubr.length - 1
while (i >= 0) {
val ri = sortedsubr(i)
if (ri.size > 20 &&
size > SplitMethod.TargetMethodSize) {
size -= ri.size
splitSlice(ri.start, ri.end)
val s = blockSize(blockPartitions.find(ri.start))
assert(s < 20)
size += s
newsubr += new SizedRange(ri.start, ri.end, s)
changed = true
} else
newsubr += ri
i -= 1
}
subr = newsubr.sortBy(_.start)
}
size
}
def splitRegions(): Unit = {
val regionSize = new Array[Int](pst.nRegions)
var i = 0
while (i < pst.nRegions) {
val r = pst.regions(i)
if (r.children.nonEmpty) {
val subr = mutable.ArrayBuffer[SizedRange]()
subr ++= r.children.iterator.map { ci =>
val cr = pst.regions(ci)
assert(ci < i)
new SizedRange(cr.start, cr.end, regionSize(ci))
}
regionSize(i) = splitSubregions(subr)
} else {
assert(r.start == r.end)
regionSize(i) = blockSize(blockPartitions.find(r.start))
}
/*
if (i != pst.root && pst.loopRegion.get(i)) {
splitSlice(r.start, r.end)
regionSize(i) = blockSize(blockPartitions.find(r.start))
}
*/
i += 1
}
}
def split(): Unit = {
computeBlockSizes()
splitRegions()
spillLocals()
fixSplitReturnCalls()
// spill parameters
var x: StmtX = store(spills, new NewInstanceX(spillsClass.ti, spillsCtor))
m.entry.prepend(x)
m.parameterTypeInfo.indices.foreach { i =>
val f = fields(i + 1)
assert(f != null)
val putParam = putField(
f,
load(spills),
load(m.getParam(i + 1)))
x.insertAfter(putParam)
x = putParam
}
}
}
| hail-is/hail | hail/src/main/scala/is/hail/lir/SplitMethod.scala | Scala | mit | 14,746 |
/*
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.request.builder
import io.gatling.commons.validation._
import io.gatling.core.{ ValidationImplicits, CoreComponents }
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session._
import io.gatling.http.cache.HttpCaches
import io.gatling.http.protocol._
import com.softwaremill.quicklens._
import org.asynchttpclient.Request
import org.asynchttpclient.Dsl._
import org.openjdk.jmh.annotations.Benchmark
object HttpRequestExpressionBuilderBenchmark extends ValidationImplicits {
private val config = GatlingConfiguration.loadForTest()
private val coreComponents = CoreComponents(
controller = null,
throttler = null,
statsEngine = null,
exit = null,
configuration = config
)
private val httpProtocol = HttpProtocolBuilder(config)
.baseURL("http://localhost:8000")
.build
private val httpCaches = new HttpCaches(config)
private val httpComponents = HttpComponents(
httpProtocol = httpProtocol,
httpEngine = null,
httpCaches = httpCaches,
responseProcessor = null
)
val Reference: Expression[Request] = _ =>
request("GET", "http://localhost:8000/ping")
.build.success
val SimpleRequest: Expression[Request] =
new Http("requestName").get("/ping")
.build(coreComponents, httpComponents, throttled = false).ahcRequest
val RequestWithStaticQueryParams: Expression[Request] =
new Http("requestName").get("/ping")
.queryParam("hello", "world")
.queryParam("foo", "bar")
.build(coreComponents, httpComponents, throttled = false).ahcRequest
val RequestWithDynamicUrl: Expression[Request] =
new Http("requestName").get("/ping?foo=${key}")
.build(coreComponents, httpComponents, throttled = false).ahcRequest
val RequestWithStaticHeaders: Expression[Request] = {
val httpProtocol = HttpProtocolBuilder(config)
.baseURL("http://localhost:8000")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("en-GB,en;q=0.5")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:35.0) Gecko/20100101 Firefox/35.0")
.build
new Http("requestName").get("/ping")
.build(
coreComponents,
httpComponents.modify(_.httpProtocol).setTo(httpProtocol),
throttled = false
).ahcRequest
}
val EmptySession: Session = Session("scenario", 0)
val NonEmptySession: Session = Session("scenario", 0, attributes = Map("key" -> "value"))
}
class HttpRequestExpressionBuilderBenchmark {
import HttpRequestExpressionBuilderBenchmark._
@Benchmark
def testReference(): Validation[Request] =
Reference(EmptySession)
@Benchmark
def testSimpleRequest(): Validation[Request] =
SimpleRequest(EmptySession)
@Benchmark
def testRequestWithStaticQueryParams(): Validation[Request] =
RequestWithStaticQueryParams(EmptySession)
@Benchmark
def testRequestWithStaticHeaders(): Validation[Request] =
RequestWithStaticHeaders(EmptySession)
@Benchmark
def testRequestWithDynamicUrl(): Validation[Request] =
RequestWithDynamicUrl(NonEmptySession)
}
| wiacekm/gatling | gatling-benchmarks/src/main/scala/io/gatling/http/request/builder/HttpRequestExpressionBuilderBenchmark.scala | Scala | apache-2.0 | 3,734 |
package com.seanshubin.schulze.persistence
import datomic.{Connection, Peer}
import ReferenceLookup._
import com.seanshubin.schulze.persistence.datomic_util.ScalaAdaptor
import ScalaAdaptor._
class DatomicPersistenceApi(connection: Connection) extends PersistenceApi {
def createElection(name: String) {
val db = connection.db()
if(countElection(db, name) > 0) return
val electionId = Peer.tempid(":db.part/user")
val election = Map(
":db/id" -> electionId,
":election/name" -> name)
transact(connection, Seq(election))
}
def deleteElection(name: String) {
val db = connection.db()
val election: Long = lookupElection(db, name)
val candidates: Seq[Long] = lookupCandidateIds(db, election)
val rankings: Seq[Long] = lookupRankingIdsByElection(db, election)
val entities: Seq[Long] = Seq(election) ++ candidates.toSeq ++ rankings.toSeq
DatomicUtil.deleteEntities(connection, entities)
}
def createVoter(name: String) {
val db = connection.db()
if(countVoter(db, name) > 0) return
val voterId = Peer.tempid(":db.part/user")
val voter = Map(
":db/id" -> voterId,
":voter/name" -> name)
transact(connection, Seq(voter))
}
def deleteVoter(name: String) {
val db = connection.db()
val voter: Long = lookupVoter(db, name)
val rankings: Seq[Long] = lookupRankingIdsByVoter(db, voter)
val entities: Seq[Long] = Seq(voter) ++ rankings.toSeq
DatomicUtil.deleteEntities(connection, entities)
}
def createCandidate(electionName: String, candidateName: String) {
val db = connection.db()
val candidateId = Peer.tempid(":db.part/user")
val election = lookupElection(db, electionName)
if(countCandidate(db, election, candidateName) > 0) return
val candidate = Map(
":db/id" -> candidateId,
":candidate/name" -> candidateName,
":candidate/election" -> election)
transact(connection, Seq(candidate))
}
def updateCandidate(electionName: String, candidateName: String, maybeDescription: Option[String]) {
val db = connection.db()
val election = lookupElection(db, electionName)
val (candidate, maybeOriginalDescription) = lookupCandidateAndDescription(db, election, candidateName)
maybeDescription match {
case Some(description) =>
val candidateMap = Map(
":db/id" -> candidate,
":candidate/description" -> description)
transact(connection, Seq(candidateMap))
case None => maybeOriginalDescription match {
case Some(originalDescription) =>
val redaction = Seq(":db/retract", candidate, ":candidate/description", originalDescription)
transact(connection, Seq(redaction))
case None =>
//do nothing
}
}
}
def deleteCandidate(electionName: String, candidateName: String) {
val db = connection.db()
val election: Long = lookupElection(db, electionName)
val candidate: Long = lookupCandidate(db, election, candidateName)
val rankings: Seq[Long] = lookupRankingIdsByCandidate(db, candidate)
val entities: Seq[Long] = Seq(candidate) ++ rankings.toSeq
DatomicUtil.deleteEntities(connection, entities)
}
def updateVote(electionName: String, voterName: String, rankingsByCandidateName: Map[String, Long]) {
UpdateVoteHelper.updateVote(connection, electionName, voterName, rankingsByCandidateName)
}
def resetVote(electionName: String, voterName: String) {
updateVote(electionName, voterName, Map())
}
def snapshot: PersistenceSnapshotApi = new DatomicPersistenceSnapshotApi(connection.db())
}
| SeanShubin/schulze | persistence/src/main/scala/com/seanshubin/schulze/persistence/DatomicPersistenceApi.scala | Scala | unlicense | 3,608 |
package org.aj.ner
import org.aj.ner.SNerPipeline.Pipeline
import org.scalatest.{FunSpecLike, Matchers}
/**
* Created by ajlnx on 9/24/16.
*/
class NerTest extends FunSpecLike with Matchers {
val sner = SNer()
val input = Array[String](
"Democratic presidential nominee Hillary Clinton.",
"A homeowner in Los Angeles, dubbed the \\"Wet Prince of Bel Air\\" for reportedly using 11.8 million gallons of water last year during California's drought, remains unidentified by authorities, but the Center of Investigative Reporting has narrowed the list of possible perpetrators to seven.",
"Republican presidential nominee Aa Bb.")
val phc = EmbeddedToken("PERSON", "Hillary Clinton")
val lla = EmbeddedToken("LOCATION", "Los Angeles")
val ola = EmbeddedToken("ORGANIZATION", "Los Angeles")
val aabb = EmbeddedToken("PERSON", "Aa Bb")
describe("StanfordCoreNLP module only to process input and identify PERSON, and LOCATION tags in a text") {
val pipeLine: Pipeline = SNerPipeline()
it("Should be able to identify EmbeddedToken(PERSON,Hillary Clinton)") {
val tokens = sner.process(pipeLine, input, Set.empty[String])
tokens.contains(phc) should equal(true)
}
it("Should be able to identify EmbeddedToken(LOCATION,Los Angeles)") {
val tokens = sner.process(pipeLine, input, Set("LOCATION"))
tokens.contains(lla) should equal(true)
}
it("Should not be able to identify Aa Bb as a PERSON") {
val tokens = sner.process(pipeLine, input, Set("PERSON"))
tokens.contains(aabb) should equal(false)
}
}
describe("StanfordCoreNLP module with RegexNER to improve accuracy, or override CoreNLP results.") {
val regexnerPathTest = Some("./model/myTokensRegex.txt")
val nerPathTest = Some("./lib/english.all.3class.distsim.crf.ser.gz")
val pipeLine = SNerPipeline(regexnerPath = regexnerPathTest)
it("Should still be able to identify EmbeddedToken(PERSON,Hillary Clinton).") {
val tokens = sner.process(pipeLine, "Democratic presidential nominee Hillary Clinton.", Set("PERSON"))
tokens.contains(phc) should equal(true)
}
it("Should be able to identify Los Angeles as EmbeddedToken(ORGANIZATION,Los Angeles), LOCATION value should be overriden via RegexNER source file.") {
val tokens = sner.process(pipeLine, input, Set("ORGANIZATION"))
tokens.contains(ola) should equal(true)
}
it("Should be able to identify Aa Bb as EmbeddedToken(PERSON,Aa Bb). RegexNER is used to improve the coverage.") {
val tokens = sner.process(pipeLine, input, Set("PERSON"))
tokens.contains(aabb) should equal(true)
}
}
}
| ajmnsk/ner | src/test/scala/org/aj/ner/NerTest.scala | Scala | mit | 2,669 |
import org.specs2.mutable._
import play.api.libs.iteratee._
import reactivemongo.api._
import reactivemongo.api.gridfs.{ReadFile, DefaultFileToSave, GridFS}
import reactivemongo.api.gridfs.Implicits._
import reactivemongo.bson._
import scala.concurrent._
import reactivemongo.api.gridfs
import scala.concurrent.duration._
class GridfsSpec extends Specification {
import Common._
sequential
lazy val gfs = GridFS(db)
lazy val file = DefaultFileToSave("somefile", Some("application/file"))
lazy val fileContent = Enumerator((1 to 100).view.map(_.toByte).toArray)
"ReactiveMongo" should {
"store a file in gridfs" in {
val actual = Await.result(gfs.save(fileContent, file), timeout)
actual.filename mustEqual "somefile"
}
"find this file in gridfs" in {
val futureFile = gfs.find(BSONDocument("filename" -> "somefile")).collect[List]()
val actual = Await.result(futureFile, timeout).head
(actual.filename mustEqual file.filename) and
(actual.uploadDate must beSome) and
(actual.contentType mustEqual file.contentType)
import scala.collection.mutable.ArrayBuilder
val res = Await.result(gfs.enumerate(actual) |>>> Iteratee.fold(ArrayBuilder.make[Byte]()) { (result, arr) =>
result ++= arr
}, timeout)
res.result mustEqual ((1 to 100).map(_.toByte).toArray)
}
"delete this file from gridfs" in {
val actual = Await.result(gfs.remove(file.id), timeout)
actual.n mustEqual 1
}
}
}
| lunatech-labs/lunatech-reactivemongo | driver/src/test/scala/GridfsSpec.scala | Scala | apache-2.0 | 1,506 |
import sbt.Keys.version
import sbt._
object Common {
val versionString = "1.46"
val scalaVersionString = "2.11.8"
val organisationString = "dvla"
val organisationNameString = "Driver & Vehicle Licensing Agency"
val nexus = "http://rep002-01.skyscape.preview-dvla.co.uk:8081/nexus/content/repositories"
val scalaOptionsSeq = Seq(
"-deprecation",
"-unchecked",
"-feature",
"-Xlint",
"-language:reflectiveCalls",
"-Xmax-classfile-name", "128"
)
val projectResolvers = Seq(
"typesafe repo" at "http://repo.typesafe.com/typesafe/releases",
"spray repo" at "http://repo.spray.io/",
"local nexus snapshots" at s"$nexus/snapshots",
"local nexus releases" at s"$nexus/releases"
)
val publishResolver: sbt.Def.Initialize[Option[sbt.Resolver]] = version { v: String =>
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at s"$nexus/snapshots")
else
Some("releases" at s"$nexus/releases")
}
val sbtCredentials = Credentials(Path.userHome / ".sbt/.credentials")
}
| dvla/vrm-assign-online | project/Common.scala | Scala | mit | 1,039 |
package monocle.bench
import java.util.concurrent.TimeUnit
import monocle.bench.BenchModel._
import monocle.bench.input.Nested0Input
import org.openjdk.jmh.annotations._
import shapeless._
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
class ShapelessLensBench extends LensBench {
val _n1 = lens[Nested0].n
val _n2 = lens[Nested1].n
val _n3 = lens[Nested2].n
val _n4 = lens[Nested3].n
val _n5 = lens[Nested4].n
val _n6 = lens[Nested5].n
val _n0_i = lens[Nested0].i
val _n3_i = lens[Nested3].i
val _n6_i = lens[Nested6].i
val _n0Ton3I = _n3_i compose _n3 compose _n2 compose _n1
val _n0Ton6I = _n6_i compose _n6 compose _n5 compose _n4 compose _n3 compose _n2 compose _n1
@Benchmark def lensGet0(in: Nested0Input) = _n0_i.get(in.n0)
@Benchmark def lensGet3(in: Nested0Input) = _n0Ton3I.get(in.n0)
@Benchmark def lensGet6(in: Nested0Input) = _n0Ton6I.get(in.n0)
@Benchmark def lensSet0(in: Nested0Input) = _n0_i.set(in.n0)(43)
@Benchmark def lensSet3(in: Nested0Input) = _n0Ton3I.set(in.n0)(43)
@Benchmark def lensSet6(in: Nested0Input) = _n0Ton6I.set(in.n0)(43)
@Benchmark def lensModify0(in: Nested0Input) = _n0_i.modify(in.n0)(_ + 1)
@Benchmark def lensModify3(in: Nested0Input) = _n0Ton3I.modify(in.n0)(_ + 1)
@Benchmark def lensModify6(in: Nested0Input) = _n0Ton6I.modify(in.n0)(_ + 1)
def lensModifyF0(in: Nested0Input): Option[Nested0] = ???
def lensModifyF3(in: Nested0Input): Option[Nested0] = ???
def lensModifyF6(in: Nested0Input): Option[Nested0] = ???
}
| aoiroaoino/Monocle | bench/src/main/scala/monocle/bench/ShapelessLensBench.scala | Scala | mit | 1,655 |
package edu.rice.habanero.benchmarks.bitonicsort
import edu.rice.habanero.actors.{ScalazActor, ScalazActorState, ScalazPool}
import edu.rice.habanero.benchmarks.philosopher.PhilosopherAkkaActorBenchmark.ExitMessage
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner, PseudoRandom}
import scala.collection.mutable.ListBuffer
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object BitonicSortScalazActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new BitonicSortScalazActorBenchmark)
}
private final class BitonicSortScalazActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
BitonicSortConfig.parseArgs(args)
}
def printArgInfo() {
BitonicSortConfig.printArgs()
}
def runIteration() {
val validationActor = new ValidationActor(BitonicSortConfig.N)
validationActor.start()
val adapterActor = new DataValueAdapterActor(validationActor)
adapterActor.start()
val kernelActor = new BitonicSortKernelActor(BitonicSortConfig.N, true, adapterActor)
kernelActor.start()
val sourceActor = new IntSourceActor(BitonicSortConfig.N, BitonicSortConfig.M, BitonicSortConfig.S, kernelActor)
sourceActor.start()
sourceActor.send(StartMessage())
ScalazActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double): Unit = {
if (lastIteration) {
ScalazPool.shutdown()
}
}
}
private case class NextActorMessage(actor: ScalazActor[AnyRef])
private case class ValueMessage(value: Long)
private case class DataMessage(orderId: Int, value: Long)
private case class StartMessage()
private class ValueDataAdapterActor(orderId: Int, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(new DataMessage(orderId, vm.value))
case dm: DataMessage =>
nextActor.send(dm)
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class DataValueAdapterActor(nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
nextActor.send(vm)
case dm: DataMessage =>
nextActor.send(new ValueMessage(dm.value))
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
private class RoundRobinSplitterActor(name: String, length: Int, receivers: Array[ScalazActor[AnyRef]]) extends ScalazActor[AnyRef] {
private var receiverIndex = 0
private var currentRun = 0
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
receivers(receiverIndex).send(vm)
currentRun += 1
if (currentRun == length) {
receiverIndex = (receiverIndex + 1) % receivers.length
currentRun = 0
}
case em: ExitMessage =>
receivers.foreach(loopActor => loopActor.send(em))
exit()
}
}
}
private class RoundRobinJoinerActor(name: String, length: Int, numJoiners: Int, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
private val receivedData = Array.tabulate[ListBuffer[DataMessage]](numJoiners)(i => new ListBuffer[DataMessage]())
private var forwardIndex = 0
private var currentRun = 0
private var exitsReceived = 0
override def process(msg: AnyRef) {
msg match {
case dm: DataMessage =>
receivedData(dm.orderId).append(dm)
tryForwardMessages(dm)
case em: ExitMessage =>
exitsReceived += 1
if (exitsReceived == numJoiners) {
nextActor.send(em)
exit()
}
}
}
def tryForwardMessages(dm: DataMessage) {
while (receivedData(forwardIndex).nonEmpty) {
val dm = receivedData(forwardIndex).remove(0)
val vm = new ValueMessage(dm.value)
nextActor.send(vm)
currentRun += 1
if (currentRun == length) {
forwardIndex = (forwardIndex + 1) % numJoiners
currentRun = 0
}
}
}
}
/**
* Compares the two input keys and exchanges their order if they are not sorted.
*
* sortDirection determines if the sort is nondecreasing (UP) [true] or nonincreasing (DOWN) [false].
*/
private class CompareExchangeActor(orderId: Int, sortDirection: Boolean, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
private var k1: Long = 0
private var valueAvailable = false
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
if (!valueAvailable) {
valueAvailable = true
k1 = vm.value
} else {
valueAvailable = false
val k2 = vm.value
val (minK, maxK) = if (k1 <= k2) (k1, k2) else (k2, k1)
if (sortDirection) {
// UP sort
nextActor.send(DataMessage(orderId, minK))
nextActor.send(DataMessage(orderId, maxK))
} else {
// DOWN sort
nextActor.send(DataMessage(orderId, maxK))
nextActor.send(DataMessage(orderId, minK))
}
}
case em: ExitMessage =>
nextActor.send(em)
exit()
}
}
}
/**
* Partition the input bitonic sequence of length L into two bitonic sequences of length L/2,
* with all numbers in the first sequence <= all numbers in the second sequence if sortdir is UP (similar case for DOWN sortdir)
*
* Graphically, it is a bunch of CompareExchanges with same sortdir, clustered together in the sort network at a particular step (of some merge stage).
*/
private class PartitionBitonicSequenceActor(orderId: Int, length: Int, sortDir: Boolean, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val halfLength = length / 2
val forwardActor = {
val actor = new ValueDataAdapterActor(orderId, nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("Partition-" + orderId, 1, halfLength, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[ScalazActor[AnyRef]](halfLength)(i => {
val actor = new CompareExchangeActor(i, sortDir, joinerActor)
actor.start()
actor
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("Partition-" + orderId, 1, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of a particular merge stage (used by all merge stages except the last)
*
* directionCounter determines which step we are in the current merge stage (which in turn is determined by <L, numSeqPartitions>)
*/
private class StepOfMergeActor(orderId: Int, length: Int, numSeqPartitions: Int, directionCounter: Int, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val forwardActor = {
val actor = new DataValueAdapterActor(nextActor)
actor.start()
actor
}
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfMerge-" + orderId + ":" + length, length, numSeqPartitions, forwardActor)
actor.start()
actor
}
val workerActors = Array.tabulate[ScalazActor[AnyRef]](numSeqPartitions)(i => {
// finding out the currentDirection is a bit tricky -
// the direction depends only on the subsequence number during the FIRST step.
// So to determine the FIRST step subsequence to which this sequence belongs, divide this sequence's number j by directionCounter
// (bcoz 'directionCounter' tells how many subsequences of the current step make up one subsequence of the FIRST step).
// Then, test if that result is even or odd to determine if currentDirection is UP or DOWN respectively.
val currentDirection = (i / directionCounter) % 2 == 0
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, currentDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, currentDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfMerge-" + orderId + ":" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* One step of the last merge stage
*
* Main difference form StepOfMerge is the direction of sort.
* It is always in the same direction - sortdir.
*/
private class StepOfLastMergeActor(length: Int, numSeqPartitions: Int, sortDirection: Boolean, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val joinerActor = {
val actor = new RoundRobinJoinerActor("StepOfLastMerge-" + length, length, numSeqPartitions, nextActor)
actor.start()
actor
}
val workerActors = Array.tabulate[ScalazActor[AnyRef]](numSeqPartitions)(i => {
// The last step needs special care to avoid split-joins with just one branch.
if (length > 2) {
val actor = new PartitionBitonicSequenceActor(i, length, sortDirection, joinerActor)
actor.start()
actor
} else {
// PartitionBitonicSequence of the last step (L=2) is simply a CompareExchange
val actor = new CompareExchangeActor(i, sortDirection, joinerActor)
actor.start()
actor
}
})
val splitterActor = {
val actor = new RoundRobinSplitterActor("StepOfLastMerge-" + length, length, workerActors)
actor.start()
actor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
splitterActor.send(vm)
case em: ExitMessage =>
splitterActor.send(em)
exit()
}
}
}
/**
* Divide the input sequence of length N into subsequences of length P and sort each of them
* (either UP or DOWN depending on what subsequence number [0 to N/P-1] they get.
* All even subsequences are sorted UP and all odd subsequences are sorted DOWN).
* In short, a MergeStage is N/P Bitonic Sorters of order P each.
* But, this MergeStage is implemented *iteratively* as logP STEPS.
*/
private class MergeStageActor(P: Int, N: Int, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val forwardActor = {
var loopActor: ScalazActor[AnyRef] = nextActor
// for each of the lopP steps (except the last step) of this merge stage
var i = P / 2
while (i >= 1) {
// length of each sequence for the current step - goes like P, P/2, ..., 2.
val L = P / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = (N / P) * i
val directionCounter = i
val tempActor = new StepOfMergeActor(i, L, numSeqPartitions, directionCounter, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The LastMergeStage is basically one Bitonic Sorter of order N i.e.,
* it takes the bitonic sequence produced by the previous merge stages
* and applies a bitonic merge on it to produce the final sorted sequence.
*
* This is implemented iteratively as logN steps.
*/
private class LastMergeStageActor(N: Int, sortDirection: Boolean, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val forwardActor = {
var loopActor: ScalazActor[AnyRef] = nextActor
// for each of the lopN steps (except the last step) of this merge stage
var i = N / 2
while (i >= 1) {
// length of each sequence for the current step - goes like N, N/2, ..., 2.
val L = N / i
// numSeqPartitions is the number of PartitionBitonicSequence-rs in this step
val numSeqPartitions = i
val tempActor = new StepOfLastMergeActor(L, numSeqPartitions, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
/**
* The top-level kernel of bitonic-sort (iterative version) -
* It has logN merge stages and all merge stages except the last progressively builds a bitonic sequence out of the input sequence.
* The last merge stage acts on the resultant bitonic sequence to produce the final sorted sequence (sortdir determines if it is UP or DOWN).
*/
private class BitonicSortKernelActor(N: Int, sortDirection: Boolean, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
val forwardActor = {
var loopActor: ScalazActor[AnyRef] = nextActor
{
val tempActor = new LastMergeStageActor(N, sortDirection, loopActor)
tempActor.start()
loopActor = tempActor
}
var i = N / 2
while (i >= 2) {
val tempActor = new MergeStageActor(i, N, loopActor)
tempActor.start()
loopActor = tempActor
i /= 2
}
loopActor
}
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
forwardActor.send(vm)
case em: ExitMessage =>
forwardActor.send(em)
exit()
}
}
}
private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long, nextActor: ScalazActor[AnyRef]) extends ScalazActor[AnyRef] {
private val random = new PseudoRandom(seed)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case nm: StartMessage =>
var i = 0
while (i < numValues) {
val candidate = Math.abs(random.nextLong()) % maxValue
if (BitonicSortConfig.debug) {
sb.append(candidate + " ")
}
val message = new ValueMessage(candidate)
nextActor.send(message)
i += 1
}
if (BitonicSortConfig.debug) {
println(" SOURCE: " + sb)
}
nextActor.send(ExitMessage())
exit()
}
}
}
private class ValidationActor(numValues: Int) extends ScalazActor[AnyRef] {
private var sumSoFar = 0.0
private var valuesSoFar = 0
private var prevValue = 0L
private var errorValue = (-1L, -1)
private val sb = new StringBuilder()
override def process(msg: AnyRef) {
msg match {
case vm: ValueMessage =>
valuesSoFar += 1
if (BitonicSortConfig.debug) {
sb.append(vm.value + " ")
}
if (vm.value < prevValue && errorValue._1 < 0) {
errorValue = (vm.value, valuesSoFar - 1)
}
prevValue = vm.value
sumSoFar += prevValue
case em: ExitMessage =>
if (valuesSoFar == numValues) {
if (BitonicSortConfig.debug) {
println(" OUTPUT: " + sb)
}
if (errorValue._1 >= 0) {
println(" ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2)
} else {
println(" Elements sum: " + sumSoFar)
}
} else {
println(" ERROR: early exit triggered, received only " + valuesSoFar + " values!")
}
exit()
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/bitonicsort/BitonicSortScalazActorBenchmark.scala | Scala | gpl-2.0 | 16,599 |
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sandius.rembulan.test.fragments
import net.sandius.rembulan.test.FragmentExecTestSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class StringLibFragmentsRunSpec extends FragmentExecTestSuite {
override def bundles = Seq(StringLibFragments)
override def expectations = Seq(StringLibFragments)
override def contexts = Seq(Str, Full)
override def steps = Seq(1, Int.MaxValue)
}
| mjanicek/rembulan | rembulan-tests/src/test/scala/net/sandius/rembulan/test/fragments/StringLibFragmentsRunSpec.scala | Scala | apache-2.0 | 1,064 |
/*
* Copyright (c) 2013-2014 Telefónica Investigación y Desarrollo S.A.U.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.tid.cosmos.api.profile
import org.scalatest.FlatSpec
import org.scalatest.matchers.MustMatchers
class HandleConstraintTest extends FlatSpec with MustMatchers {
"Handle constraint" must "reject invalid unix handles" in {
HandleConstraint("not a unix handle") must be (false)
}
it must "reject blacklisted names" in {
HandleConstraint.blacklist.foreach { handle =>
HandleConstraint(handle) must be (false)
}
}
it must "accept valid unix names" in {
HandleConstraint("MrValidHandle123") must be (true)
}
}
| telefonicaid/fiware-cosmos-platform | cosmos-api/test/es/tid/cosmos/api/profile/HandleConstraintTest.scala | Scala | apache-2.0 | 1,193 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBoolean, Linked}
import uk.gov.hmrc.ct.computations.CPQ18
case class B31(value: Option[Boolean]) extends CtBoxIdentifier with CtOptionalBoolean
object B31 extends Linked[CPQ18, B31] {
override def apply(source: CPQ18): B31 = B31(source.value)
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B31.scala | Scala | apache-2.0 | 942 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.IOException
import java.security.PrivilegedExceptionAction
import java.text.DateFormat
import java.util.{Arrays, Comparator, Date, Locale}
import scala.collection.JavaConverters._
import scala.collection.immutable.Map
import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util.control.NonFatal
import com.google.common.primitives.Longs
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path, PathFilter}
import org.apache.hadoop.fs.permission.FsAction
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.security.{Credentials, UserGroupInformation}
import org.apache.hadoop.security.token.{Token, TokenIdentifier}
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* :: DeveloperApi ::
* Contains util methods to interact with Hadoop from Spark.
*/
@DeveloperApi
class SparkHadoopUtil extends Logging {
private val sparkConf = new SparkConf(false).loadFromSystemProperties(true)
val conf: Configuration = newConfiguration(sparkConf)
UserGroupInformation.setConfiguration(conf)
/**
* Runs the given function with a Hadoop UserGroupInformation as a thread local variable
* (distributed to child threads), used for authenticating HDFS and YARN calls.
*
* IMPORTANT NOTE: If this function is going to be called repeated in the same process
* you need to look https://issues.apache.org/jira/browse/HDFS-3545 and possibly
* do a FileSystem.closeAllForUGI in order to avoid leaking Filesystems
*/
def runAsSparkUser(func: () => Unit) {
val user = Utils.getCurrentUserName()
logDebug("running as user: " + user)
val ugi = UserGroupInformation.createRemoteUser(user)
transferCredentials(UserGroupInformation.getCurrentUser(), ugi)
ugi.doAs(new PrivilegedExceptionAction[Unit] {
def run: Unit = func()
})
}
def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation) {
for (token <- source.getTokens.asScala) {
dest.addToken(token)
}
}
/**
* Appends S3-specific, spark.hadoop.*, and spark.buffer.size configurations to a Hadoop
* configuration.
*/
def appendS3AndSparkHadoopConfigurations(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Note: this null check is around more than just access to the "conf" object to maintain
// the behavior of the old implementation of this code, for backwards compatibility.
if (conf != null) {
// Explicitly check for S3 environment variables
val keyId = System.getenv("AWS_ACCESS_KEY_ID")
val accessKey = System.getenv("AWS_SECRET_ACCESS_KEY")
if (keyId != null && accessKey != null) {
hadoopConf.set("fs.s3.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3n.awsAccessKeyId", keyId)
hadoopConf.set("fs.s3a.access.key", keyId)
hadoopConf.set("fs.s3.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3n.awsSecretAccessKey", accessKey)
hadoopConf.set("fs.s3a.secret.key", accessKey)
val sessionToken = System.getenv("AWS_SESSION_TOKEN")
if (sessionToken != null) {
hadoopConf.set("fs.s3a.session.token", sessionToken)
}
}
appendSparkHadoopConfigs(conf, hadoopConf)
val bufferSize = conf.get("spark.buffer.size", "65536")
hadoopConf.set("io.file.buffer.size", bufferSize)
}
}
/**
* Appends spark.hadoop.* configurations from a [[SparkConf]] to a Hadoop
* configuration without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(conf: SparkConf, hadoopConf: Configuration): Unit = {
// Copy any "spark.hadoop.foo=bar" spark properties into conf as "foo=bar"
for ((key, value) <- conf.getAll if key.startsWith("spark.hadoop.")) {
hadoopConf.set(key.substring("spark.hadoop.".length), value)
}
}
/**
* Appends spark.hadoop.* configurations from a Map to another without the spark.hadoop. prefix.
*/
def appendSparkHadoopConfigs(
srcMap: Map[String, String],
destMap: HashMap[String, String]): Unit = {
// Copy any "spark.hadoop.foo=bar" system properties into destMap as "foo=bar"
for ((key, value) <- srcMap if key.startsWith("spark.hadoop.")) {
destMap.put(key.substring("spark.hadoop.".length), value)
}
}
/**
* Return an appropriate (subclass) of Configuration. Creating config can initializes some Hadoop
* subsystems.
*/
def newConfiguration(conf: SparkConf): Configuration = {
val hadoopConf = new Configuration()
appendS3AndSparkHadoopConfigurations(conf, hadoopConf)
hadoopConf
}
/**
* Add any user credentials to the job conf which are necessary for running on a secure Hadoop
* cluster.
*/
def addCredentials(conf: JobConf) {}
def isYarnMode(): Boolean = { false }
def getCurrentUserCredentials(): Credentials = { null }
def addCurrentUserCredentials(creds: Credentials) {}
def addSecretKeyToUserCredentials(key: String, secret: String) {}
def getSecretKeyFromUserCredentials(key: String): Array[Byte] = { null }
def loginUserFromKeytab(principalName: String, keytabFilename: String) {
UserGroupInformation.loginUserFromKeytab(principalName, keytabFilename)
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes read. If
* getFSBytesReadOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes read on r since t.
*/
private[spark] def getFSBytesReadOnThreadCallback(): () => Long = {
val f = () => FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics.getBytesRead).sum
val baseline = (Thread.currentThread().getId, f())
/**
* This function may be called in both spawned child threads and parent task thread (in
* PythonRDD), and Hadoop FileSystem uses thread local variables to track the statistics.
* So we need a map to track the bytes read from the child threads and parent thread,
* summing them together to get the bytes read of this task.
*/
new Function0[Long] {
private val bytesReadMap = new mutable.HashMap[Long, Long]()
override def apply(): Long = {
bytesReadMap.synchronized {
bytesReadMap.put(Thread.currentThread().getId, f())
bytesReadMap.map { case (k, v) =>
v - (if (k == baseline._1) baseline._2 else 0)
}.sum
}
}
}
}
/**
* Returns a function that can be called to find Hadoop FileSystem bytes written. If
* getFSBytesWrittenOnThreadCallback is called from thread r at time t, the returned callback will
* return the bytes written on r since t.
*
* @return None if the required method can't be found.
*/
private[spark] def getFSBytesWrittenOnThreadCallback(): () => Long = {
val threadStats = FileSystem.getAllStatistics.asScala.map(_.getThreadStatistics)
val f = () => threadStats.map(_.getBytesWritten).sum
val baselineBytesWritten = f()
() => f() - baselineBytesWritten
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafStatuses(fs, fs.getFileStatus(basePath))
}
/**
* Get [[FileStatus]] objects for all leaf children (files) under the given base path. If the
* given path points to a file, return a single-element collection containing [[FileStatus]] of
* that file.
*/
def listLeafStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, leaves) = fs.listStatus(status.getPath).partition(_.isDirectory)
leaves ++ directories.flatMap(f => listLeafStatuses(fs, f))
}
if (baseStatus.isDirectory) recurse(baseStatus) else Seq(baseStatus)
}
def listLeafDirStatuses(fs: FileSystem, basePath: Path): Seq[FileStatus] = {
listLeafDirStatuses(fs, fs.getFileStatus(basePath))
}
def listLeafDirStatuses(fs: FileSystem, baseStatus: FileStatus): Seq[FileStatus] = {
def recurse(status: FileStatus): Seq[FileStatus] = {
val (directories, files) = fs.listStatus(status.getPath).partition(_.isDirectory)
val leaves = if (directories.isEmpty) Seq(status) else Seq.empty[FileStatus]
leaves ++ directories.flatMap(dir => listLeafDirStatuses(fs, dir))
}
assert(baseStatus.isDirectory)
recurse(baseStatus)
}
def isGlobPath(pattern: Path): Boolean = {
pattern.toString.exists("{}[]*?\\".toSet.contains)
}
def globPath(pattern: Path): Seq[Path] = {
val fs = pattern.getFileSystem(conf)
globPath(fs, pattern)
}
def globPath(fs: FileSystem, pattern: Path): Seq[Path] = {
Option(fs.globStatus(pattern)).map { statuses =>
statuses.map(_.getPath.makeQualified(fs.getUri, fs.getWorkingDirectory)).toSeq
}.getOrElse(Seq.empty[Path])
}
def globPathIfNecessary(pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(pattern) else Seq(pattern)
}
def globPathIfNecessary(fs: FileSystem, pattern: Path): Seq[Path] = {
if (isGlobPath(pattern)) globPath(fs, pattern) else Seq(pattern)
}
/**
* Lists all the files in a directory with the specified prefix, and does not end with the
* given suffix. The returned {{FileStatus}} instances are sorted by the modification times of
* the respective files.
*/
def listFilesSorted(
remoteFs: FileSystem,
dir: Path,
prefix: String,
exclusionSuffix: String): Array[FileStatus] = {
try {
val fileStatuses = remoteFs.listStatus(dir,
new PathFilter {
override def accept(path: Path): Boolean = {
val name = path.getName
name.startsWith(prefix) && !name.endsWith(exclusionSuffix)
}
})
Arrays.sort(fileStatuses, new Comparator[FileStatus] {
override def compare(o1: FileStatus, o2: FileStatus): Int = {
Longs.compare(o1.getModificationTime, o2.getModificationTime)
}
})
fileStatuses
} catch {
case NonFatal(e) =>
logWarning("Error while attempting to list files from application staging dir", e)
Array.empty
}
}
private[spark] def getSuffixForCredentialsPath(credentialsPath: Path): Int = {
val fileName = credentialsPath.getName
fileName.substring(
fileName.lastIndexOf(SparkHadoopUtil.SPARK_YARN_CREDS_COUNTER_DELIM) + 1).toInt
}
private val HADOOP_CONF_PATTERN = "(\\$\\{hadoopconf-[^\\}\\$\\s]+\\})".r.unanchored
/**
* Substitute variables by looking them up in Hadoop configs. Only variables that match the
* ${hadoopconf- .. } pattern are substituted.
*/
def substituteHadoopVariables(text: String, hadoopConf: Configuration): String = {
text match {
case HADOOP_CONF_PATTERN(matched) =>
logDebug(text + " matched " + HADOOP_CONF_PATTERN)
val key = matched.substring(13, matched.length() - 1) // remove ${hadoopconf- .. }
val eval = Option[String](hadoopConf.get(key))
.map { value =>
logDebug("Substituted " + matched + " with " + value)
text.replace(matched, value)
}
if (eval.isEmpty) {
// The variable was not found in Hadoop configs, so return text as is.
text
} else {
// Continue to substitute more variables.
substituteHadoopVariables(eval.get, hadoopConf)
}
case _ =>
logDebug(text + " didn't match " + HADOOP_CONF_PATTERN)
text
}
}
/**
* Start a thread to periodically update the current user's credentials with new credentials so
* that access to secured service does not fail.
*/
private[spark] def startCredentialUpdater(conf: SparkConf) {}
/**
* Stop the thread that does the credential updates.
*/
private[spark] def stopCredentialUpdater() {}
/**
* Return a fresh Hadoop configuration, bypassing the HDFS cache mechanism.
* This is to prevent the DFSClient from using an old cached token to connect to the NameNode.
*/
private[spark] def getConfBypassingFSCache(
hadoopConf: Configuration,
scheme: String): Configuration = {
val newConf = new Configuration(hadoopConf)
val confKey = s"fs.${scheme}.impl.disable.cache"
newConf.setBoolean(confKey, true)
newConf
}
/**
* Dump the credentials' tokens to string values.
*
* @param credentials credentials
* @return an iterator over the string values. If no credentials are passed in: an empty list
*/
private[spark] def dumpTokens(credentials: Credentials): Iterable[String] = {
if (credentials != null) {
credentials.getAllTokens.asScala.map(tokenToString)
} else {
Seq.empty
}
}
/**
* Convert a token to a string for logging.
* If its an abstract delegation token, attempt to unmarshall it and then
* print more details, including timestamps in human-readable form.
*
* @param token token to convert to a string
* @return a printable string value.
*/
private[spark] def tokenToString(token: Token[_ <: TokenIdentifier]): String = {
val df = DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT, Locale.US)
val buffer = new StringBuilder(128)
buffer.append(token.toString)
try {
val ti = token.decodeIdentifier
buffer.append("; ").append(ti)
ti match {
case dt: AbstractDelegationTokenIdentifier =>
// include human times and the renewer, which the HDFS tokens toString omits
buffer.append("; Renewer: ").append(dt.getRenewer)
buffer.append("; Issued: ").append(df.format(new Date(dt.getIssueDate)))
buffer.append("; Max Date: ").append(df.format(new Date(dt.getMaxDate)))
case _ =>
}
} catch {
case e: IOException =>
logDebug(s"Failed to decode $token: $e", e)
}
buffer.toString
}
private[spark] def checkAccessPermission(status: FileStatus, mode: FsAction): Boolean = {
val perm = status.getPermission
val ugi = UserGroupInformation.getCurrentUser
if (ugi.getShortUserName == status.getOwner) {
if (perm.getUserAction.implies(mode)) {
return true
}
} else if (ugi.getGroupNames.contains(status.getGroup)) {
if (perm.getGroupAction.implies(mode)) {
return true
}
} else if (perm.getOtherAction.implies(mode)) {
return true
}
logDebug(s"Permission denied: user=${ugi.getShortUserName}, " +
s"path=${status.getPath}:${status.getOwner}:${status.getGroup}" +
s"${if (status.isDirectory) "d" else "-"}$perm")
false
}
}
object SparkHadoopUtil {
private lazy val hadoop = new SparkHadoopUtil
private lazy val yarn = try {
Utils.classForName("org.apache.spark.deploy.yarn.YarnSparkHadoopUtil")
.newInstance()
.asInstanceOf[SparkHadoopUtil]
} catch {
case e: Exception => throw new SparkException("Unable to load YARN support", e)
}
val SPARK_YARN_CREDS_TEMP_EXTENSION = ".tmp"
val SPARK_YARN_CREDS_COUNTER_DELIM = "-"
/**
* Number of records to update input metrics when reading from HadoopRDDs.
*
* Each update is potentially expensive because we need to use reflection to access the
* Hadoop FileSystem API of interest (only available in 2.5), so we should do this sparingly.
*/
private[spark] val UPDATE_INPUT_METRICS_INTERVAL_RECORDS = 1000
def get: SparkHadoopUtil = {
// Check each time to support changing to/from YARN
val yarnMode = java.lang.Boolean.parseBoolean(
System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
if (yarnMode) {
yarn
} else {
hadoop
}
}
}
| UndeadBaneGitHub/spark | core/src/main/scala/org/apache/spark/deploy/SparkHadoopUtil.scala | Scala | apache-2.0 | 16,924 |
package pigpio.scaladsl
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
/**
*
*/
trait DigitalIO {
def gpioGetMode(gpio: Gpio)(implicit pigpio: PigpioLibrary): Try[PinMode]
def gpioSetMode(gpio: Gpio, mode: PinMode)(implicit pigpio: PigpioLibrary): Try[GpioResult]
def gpioRead(gpio: Gpio)(implicit pigpio: PigpioLibrary): Try[Level]
def gpioWrite(gpio: Gpio, level: Level)(implicit pigpio: PigpioLibrary): Try[GpioResult]
def gpioSetPullUpDown(gpio: Gpio, pud: GpioPull)(implicit pigpio: PigpioLibrary): Try[GpioResult]
}
object DefaultDigitalIO extends DefaultDigitalIO
trait DefaultDigitalIO extends DigitalIO {
def gpioGetMode(gpio: Gpio)(implicit pigpio: PigpioLibrary): Try[PinMode] = {
try Success(PinMode(pigpio.gpioGetMode(gpio.value)))
catch {
case NonFatal(e) => Failure(e)
}
}
def gpioSetMode(gpio: Gpio, mode: PinMode)(implicit pigpio: PigpioLibrary): Try[GpioResult] =
GpioResultOf(pigpio.gpioSetMode(gpio.value, mode.value))
def gpioSetPullUpDown(gpio: Gpio, pud: GpioPull)(implicit pigpio: PigpioLibrary): Try[GpioResult] =
GpioResultOf(pigpio.gpioSetPullUpDown(gpio.value, pud.value))
def gpioRead(gpio: Gpio)(implicit pigpio: PigpioLibrary): Try[Level] = {
try Success(Level(pigpio.gpioRead(gpio.value)))
catch {
case NonFatal(e) => Failure(e)
}
}
def gpioWrite(gpio: Gpio, level: Level)(implicit pigpio: PigpioLibrary): Try[GpioResult] =
GpioResultOf(pigpio.gpioWrite(gpio.value, level.value))
}
| jw3/pigpio-scala | src/main/scala/pigpio/scaladsl/DigitalIO.scala | Scala | apache-2.0 | 1,527 |
// Copyright (c) 2019 Ben Zimmer. All rights reserved.
// Simple Swing GUI for configuring properties.
package bdzimmer.secondary.export.view
import java.awt.Font // scalastyle:ignore illegal.imports
import java.awt.{GridLayout, BorderLayout}
import java.awt.event.{ActionListener, ActionEvent}
import javax.swing.{JFrame, JLabel, JButton, JTextField, JPanel, JComboBox, JCheckBox, SwingConstants}
import javax.swing.border.EmptyBorder
import javax.swing.event.{DocumentListener, DocumentEvent}
import bdzimmer.util.PropertiesWrapper
import bdzimmer.util.StringUtils._
import bdzimmer.secondary.export.model.ConfigurationModel._
class ConfigurationGUI(
prop: PropertiesWrapper,
reqProps: List[ConfigField],
guiTitle: String) extends JFrame {
val propFile = prop.file
val fieldFont = new Font("monospaced", Font.PLAIN, 12)
val emptyborder = new EmptyBorder(10, 10, 10, 10)
val saveStatus = new JLabel("")
val save = new JButton("Save")
save.addActionListener(new ActionListener() {
override def actionPerformed(e: ActionEvent): Unit = {
prop.prop.store(
new java.io.FileOutputStream(propFile),
"created with configuration editor")
saveStatus.setText("Saved.")
}
})
val done = new JButton("Done")
done.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = {
dispose()
}
})
val savePanel = new JPanel(new GridLayout(1, 4))
savePanel.add(saveStatus)
savePanel.add(new JLabel())
savePanel.add(save)
savePanel.add(done)
def setProperty(key: String, text: String): Unit = {
prop.set(key, text)
saveStatus.setText("Modified.")
}
def textConfigField(key: String, default: String) = {
val res = new JTextField(
prop(key).getOrElse(default),
40
)
res.getDocument.addDocumentListener(new DocumentListener {
def changedUpdate(e: DocumentEvent): Unit = setProperty(key, res.getText)
def removeUpdate(e: DocumentEvent): Unit = setProperty(key, res.getText)
def insertUpdate(e: DocumentEvent): Unit = setProperty(key, res.getText)
})
res
}
def chooseConfigField(key: String, default: String, choices: List[String])= {
val res = new JComboBox[String](choices.toArray)
res.setFont(fieldFont)
res.setSelectedItem(prop(key).getOrElse(default))
res.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = {
setProperty(key, res.getSelectedItem.toString)
}
})
res
}
def boolConfigField(key: String, default: String) = {
val res = new JCheckBox()
res.setSelected(prop(key).getOrElse(default).toBooleanSafe)
res.addActionListener(new ActionListener {
override def actionPerformed(e: ActionEvent): Unit = {
setProperty(key, res.isSelected.toString)
}
})
res
}
val columnPairs = reqProps.map(x => x match {
case t: TextConfigField => (textConfigField(t.key, t.default), t.description + ":")
case c: ChooseConfigField => (chooseConfigField(c.key, c.default, c.choices), c.description + ":")
case b: BoolConfigField => (boolConfigField(b.key, b.default), b.description + ":")
}) :+ (savePanel, "")
setTitle(guiTitle)
val contents = new JPanel(new BorderLayout())
contents.add({
val res = new JPanel()
res.add(new JLabel("Configuration file: " + propFile.getAbsolutePath))
res
}, BorderLayout.SOUTH)
contents.add({
val res = new JPanel(new GridLayout(columnPairs.length, 1))
res.setBorder(emptyborder)
columnPairs.foreach(x => {
val label = new JLabel(x._2)
label.setHorizontalAlignment(SwingConstants.LEFT)
res.add(label)
})
res
}, BorderLayout.WEST)
contents.add({
val res = new JPanel(new GridLayout(columnPairs.length, 1))
res.setBorder(emptyborder)
columnPairs.foreach(x => {
res.add(x._1)
})
res
}, BorderLayout.CENTER)
add(contents, BorderLayout.CENTER)
pack()
setVisible(true)
}
| bdzimmer/secondary | src/main/scala/bdzimmer/secondary/export/view/ConfigurationGUI.scala | Scala | bsd-3-clause | 4,040 |
package no.digipost.labs.security
import org.scalatra.ScalatraServlet
trait SecurityHeaders { self: ScalatraServlet =>
after() {
response.headers += Headers.CacheControl -> "no-cache, no-store, no-transform"
response.headers += Headers.XFrameOptions -> "deny"
response.headers += Headers.XContentTypeOptions -> "nosniff"
response.headers += Headers.XPermittedCrossDomainPolicies -> "master-only"
response.headers += Headers.StrictTransportSecurity -> "max-age=31536000"
response.headers += Headers.XXSSProtection -> "1; mode=block"
}
}
| digipost/labs | backend/src/main/scala/no/digipost/labs/security/SecurityHeaders.scala | Scala | apache-2.0 | 567 |
package ru.maizy.cheesecake.core.tests.utils
import org.scalatest.FlatSpecLike
import ru.maizy.cheesecake.core.utils.CollectionsUtils
import ru.maizy.cheesecake.core.tests.BaseSpec
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016
* See LICENSE.txt for details.
*/
class CollectionsSpec extends BaseSpec with FlatSpecLike {
"tuplesToMultiMap" should "work" in {
val tuples = List(("a", 1), ("a", 2), ("b", 3))
CollectionsUtils.tuplesToMultiMap(tuples) shouldBe Map("a" -> Seq(1, 2), "b" -> Seq(3))
}
}
| maizy/cheesecake | core/src/test/scala/ru/maizy/cheesecake/core/tests/utils/CollectionsSpec.scala | Scala | apache-2.0 | 522 |
package scroll.internal
/** This package contains interfaces and traits for reflective access and role-based dispatch.
*/
package object dispatch
| max-leuthaeuser/SCROLL | core/src/main/scala/scroll/internal/dispatch/package.scala | Scala | lgpl-3.0 | 149 |
package play.filters.csrf
import play.api.libs.ws.WS.WSRequestHolder
import scala.concurrent.Future
import play.api.libs.ws.{WS, Response}
import play.api.mvc._
/**
* Specs for the Scala per action CSRF actions
*/
object ScalaCSRFActionSpec extends CSRFCommonSpecs {
def buildCsrfCheckRequest(configuration: (String, String)*) = new CsrfTester {
def apply[T](makeRequest: (WSRequestHolder) => Future[Response])(handleResponse: (Response) => T) = withServer(configuration) {
case _ => CSRFCheck(Action(Results.Ok))
} {
handleResponse(await(makeRequest(WS.url("http://localhost:" + testServerPort))))
}
}
def buildCsrfAddToken(configuration: (String, String)*) = new CsrfTester {
def apply[T](makeRequest: (WSRequestHolder) => Future[Response])(handleResponse: (Response) => T) = withServer(configuration) {
case _ => CSRFAddToken(Action { implicit req =>
CSRF.getToken(req).map { token =>
Results.Ok(token.value)
} getOrElse Results.NotFound
})
} {
handleResponse(await(makeRequest(WS.url("http://localhost:" + testServerPort))))
}
}
}
| michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play-filters-helpers/src/test/scala/play/filters/csrf/ScalaCSRFActionSpec.scala | Scala | mit | 1,129 |
package scorex.network
import akka.actor.{ActorRef, Props}
import akka.testkit.TestProbe
import org.h2.mvstore.MVStore
import scorex.ActorTestingCommons
import scorex.block.Block
import scorex.block.Block._
import scorex.network.NetworkController.DataFromPeer
import scorex.settings.SettingsMock
import scorex.transaction.{BlockStorage, History}
import scala.concurrent.duration.{FiniteDuration, _}
import scala.language.{implicitConversions, postfixOps}
import scala.util.Random
class BlockchainSynchronizerSpecification extends ActorTestingCommons {
import BlockchainSynchronizer._
import scorex.network.Coordinator._
private def mockHistory(last: Int): History = {
val history = mock[History]
(history.contains(_: BlockId)) expects * onCall { id: BlockId => id(0) <= last } anyNumberOfTimes()
history
}
private case object BlacklistAssertion
private def setBlacklistExpectations(blacklist: Boolean): Unit = {
(peer.blacklist _).when().onCall {
_ => if (blacklist) self ! BlacklistAssertion else fail("No blacklisting should be in this case")
}
}
private val lastHistoryBlockId = 10
private val testHistory = mockHistory(lastHistoryBlockId)
private val initialScore = BigInt(1000)
testHistory.scoreOf _ expects * onCall {
blockId: BlockId =>
assert(BlockIdExtraction.extract(blockId) == lastHistoryBlockId, s"defined only for block id $lastHistoryBlockId")
initialScore
} noMoreThanOnce()
private val testCoordinator = TestProbe("Coordinator")
private val entireForkLoad = mockFunction[Boolean]
private def setloadEntireForkChunk(value: Boolean) = entireForkLoad expects() returns value anyNumberOfTimes
object TestSettings extends SettingsMock {
override lazy val historySynchronizerTimeout: FiniteDuration = testDuration * 2
override lazy val MaxRollback: Int = lastHistoryBlockId - 1
override lazy val retriesBeforeBlacklisted: Int = 0
override lazy val operationRetries: Int = retriesBeforeBlacklisted + 13930975
override lazy val pinToInitialPeer: Boolean = true
override lazy val loadEntireChain: Boolean = entireForkLoad()
}
private val blockScore = BigInt(100)
private trait App extends ApplicationMock {
private val testBlockStorage = mock[BlockStorage]
consensusModule.blockScore _ when * returns blockScore
override lazy val settings = TestSettings
override lazy val coordinator: ActorRef = testCoordinator.ref
override lazy val history: History = testHistory
override val blockStorage: BlockStorage = testBlockStorage
}
private val app = stub[App]
import app.basicMessagesSpecsRepo._
private def reasonableTimeInterval = (TestSettings.historySynchronizerTimeout.toMillis / 2) millis
private def validateStatus(status: Status): Unit = {
actorRef ! GetSyncStatus
expectMsg(status)
}
private def assertLatestBlockFromNonSyncPeer(): Unit = {
val peer = stub[ConnectedPeer]
val block = blockMock(lastHistoryBlockId + 3729047)
actorRef ! DataFromPeer(BlockMessageSpec.messageCode, block, peer)
testCoordinator.expectMsg(AddBlock(block, Some(peer)))
}
private def assertThatPeerGotBlacklisted(): Unit = {
setBlacklistExpectations(true)
testCoordinator.expectMsg(reasonableTimeInterval, SyncFinished.unsuccessfully)
expectMsg(BlacklistAssertion)
}
private def assertPeerNeverGotBlacklisted(): Unit = setBlacklistExpectations(false)
private def expectedGetSignaturesSpec(blockIds: Int*): Unit = expectNetworkMessage(GetSignaturesSpec, blockIds.toSeq)
private def sendBlock(block: Block): Unit = dataFromNetwork(BlockMessageSpec, block)
private def sendSignatures(blockIds: BlockId*): Unit = dataFromNetwork(SignaturesSpec, blockIds.toSeq)
protected override val actorRef = system.actorOf(Props(classOf[BlockchainSynchronizer], app))
testSafely {
validateStatus(Idle)
assertLatestBlockFromNonSyncPeer()
val t = System.currentTimeMillis()
def adjustedTimeout(correction: Float): Long = {
val withElapsedTime = TestSettings.historySynchronizerTimeout.toMillis - (System.currentTimeMillis() - t)
withElapsedTime * correction toLong
}
def aBitLessThanTimeout = adjustedTimeout(0.9f) millis
def aBitLongerThanTimeout = adjustedTimeout(1.1f) millis
testHistory.lastBlockIds _ expects TestSettings.MaxRollback returns blockIds(lastHistoryBlockId, 9) // ids come in reverse order
actorRef ! GetExtension(Map(peer -> 0))
expectedGetSignaturesSpec(lastHistoryBlockId, 9)
validateStatus(GettingExtension)
"one block is a sign to start getting blocks" in {
sendSignatures(lastHistoryBlockId, 11)
expectNetworkMessage(GetBlockSpec, 11)
validateStatus(GettingBlocks)
}
"at least one block id in response must be among requested ones" in {
val notInTheHistoryBlockId = lastHistoryBlockId + 1
val notRequestedBlockFromHistoryBeginning = 1
assertPeerNeverGotBlacklisted()
sendSignatures(notRequestedBlockFromHistoryBeginning, notInTheHistoryBlockId)
testCoordinator.expectMsg(reasonableTimeInterval, SyncFinished.unsuccessfully)
}
"become idle on timeout in GettingExtension" in {
assertPeerNeverGotBlacklisted()
testCoordinator.expectNoMsg(aBitLessThanTimeout)
testCoordinator.expectMsg(SyncFinished.unsuccessfully)
validateStatus(Idle)
}
"go to GettingExtension" - {
assertLatestBlockFromNonSyncPeer()
sendSignatures(9, lastHistoryBlockId, 11, 12, 13)
expectedGetSignaturesSpec(13, 12)
"sending same signatures twice should not lead to blacklisting" in {
assertPeerNeverGotBlacklisted()
sendSignatures(9, lastHistoryBlockId, 11, 12, 13)
}
"go to GettingExtensionTail" - {
validateStatus(GettingExtensionTail)
val validBlockIds = blockIds(13, 14, 15)
"extension tail from another peer(s) should not lead to the peers blacklisting" in {
assertPeerNeverGotBlacklisted()
dataFromNetwork(SignaturesSpec, validBlockIds, stub[ConnectedPeer])
}
"blacklist on timeout in states following GettingExtension" in {
testCoordinator.expectNoMsg(aBitLessThanTimeout)
assertThatPeerGotBlacklisted()
validateStatus(Idle)
}
"follow ledger download scenario" - {
sendSignatures(validBlockIds: _*)
expectedGetSignaturesSpec(15, 14)
sendSignatures(14, 15)
val finalBlockIdInterval = 11 to 15
validateStatus(GettingBlocks)
"react on GetExtension in the Idle state only" in {
actorRef ! GetExtension(Map(peer -> 10000))
validateStatus(GettingBlocks)
}
"blocks loading" - {
assertLatestBlockFromNonSyncPeer()
val numberOfBlocks = finalBlockIdInterval.size
def setHistoryScoreExpectations(delta: BigInt): Unit =
testHistory.score _ expects() returns (initialScore + (numberOfBlocks * blockScore) + delta) repeat (0 to numberOfBlocks)
def sendBlocks(): Unit = {
finalBlockIdInterval foreach { expectNetworkMessage(GetBlockSpec, _) }
Random.shuffle(finalBlockIdInterval) foreach { id => sendBlock(blockMock(id)) }
}
def assertThatBlocksLoaded(): Unit = {
assertPeerNeverGotBlacklisted()
testCoordinator.expectMsgPF(hint = s"$numberOfBlocks fork blocks") {
case SyncFinished(true, Some((lastCommonBlockId, blockIterator, Some(connectedPeer)))) =>
connectedPeer shouldBe peer
BlockIdExtraction.extract(lastCommonBlockId) shouldBe lastHistoryBlockId
val forkStorageBlockIds = blockIterator.map(id => InnerId(id.uniqueId)).toSeq
forkStorageBlockIds shouldBe blockIds(finalBlockIdInterval: _*).map(InnerId)
}
validateStatus(Idle)
}
"entire fork loading" - {
setloadEntireForkChunk(true)
"fork has two blocks better score" in {
setHistoryScoreExpectations(-(blockScore * 2 + 1))
sendBlocks()
assertThatBlocksLoaded()
}
}
"partial fork loading" - {
setloadEntireForkChunk(false)
"fork has lower score" in {
setHistoryScoreExpectations(1)
assertPeerNeverGotBlacklisted()
sendBlocks()
testCoordinator.expectMsg(reasonableTimeInterval, SyncFinished.unsuccessfully)
validateStatus(Idle)
}
"fork has better score" - {
setHistoryScoreExpectations(-1)
"same block twice should not reset timeout" in {
val firstSubsequentBlockId = finalBlockIdInterval.head
sendBlock(blockMock(firstSubsequentBlockId))
Thread sleep aBitLessThanTimeout.toMillis
sendBlock(blockMock(firstSubsequentBlockId))
assertThatPeerGotBlacklisted()
}
"happy path" in {
sendBlocks()
assertThatBlocksLoaded()
}
}
}
}
}
}
}
"a (sub)sequience of block ids to download" - {
implicit def toInnerIds(i: Seq[Int]): InnerIds = i.map(toInnerId)
implicit def toInnerId(i: Int): InnerId = InnerId(Array(i.toByte))
def historyContaining(blockIds: Int*): History = {
val history = mock[History]
(history.contains(_: BlockId)) expects * onCall { id: BlockId => blockIds.contains(id.head.toInt) } anyNumberOfTimes()
history
}
def test(blockIds: InnerIds, h: History, expectedLastCommon: InnerId, expected: Seq[Int]): Unit = {
val Some((commonId, tail)) = BlockchainSynchronizer.blockIdsToStartDownload(blockIds, h)
commonId shouldBe expectedLastCommon
tail should contain theSameElementsInOrderAs toInnerIds(expected)
}
"a sample sequience" in {
test(Seq(1, 2, 3, 4), historyContaining(1, 2), 2, Seq(3, 4))
}
"all blocks are in history" in {
test(Seq(1, 2, 3, 4), historyContaining(1, 2, 3, 4), 4, Seq())
}
"suspicious block id" in {
test(Seq(1, 2, 3, 4), historyContaining(1, 3), 1, Seq(2, 3, 4))
}
"first block(s) are not in history" in {
blockIdsToStartDownload(Seq(10000, 2, 3, 4), historyContaining(1, 2, 3)) shouldEqual None
}
}
}
}
| B83YPoj/Waves | src/test/scala/scorex/network/BlockchainSynchronizerSpecification.scala | Scala | apache-2.0 | 10,718 |
package io.really.coffeescript
/**
* Created by asoliman on 4/28/15.
*/
class CompilerError {
}
| reallylabs/coffeescript-compiler | src/main/scala/io/really/coffeescript/CompilerError.scala | Scala | apache-2.0 | 100 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.classification.LogisticRegressionSuite._
import org.apache.spark.ml.feature.{Instance, OffsetInstance}
import org.apache.spark.ml.feature.{LabeledPoint, RFormula}
import org.apache.spark.ml.linalg.{BLAS, DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.random._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.FloatType
class GeneralizedLinearRegressionSuite extends MLTest with DefaultReadWriteTest {
import testImplicits._
private val seed: Int = 42
@transient var datasetGaussianIdentity: DataFrame = _
@transient var datasetGaussianLog: DataFrame = _
@transient var datasetGaussianInverse: DataFrame = _
@transient var datasetBinomial: DataFrame = _
@transient var datasetPoissonLog: DataFrame = _
@transient var datasetPoissonLogWithZero: DataFrame = _
@transient var datasetPoissonIdentity: DataFrame = _
@transient var datasetPoissonSqrt: DataFrame = _
@transient var datasetGammaInverse: DataFrame = _
@transient var datasetGammaIdentity: DataFrame = _
@transient var datasetGammaLog: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
import GeneralizedLinearRegressionSuite._
datasetGaussianIdentity = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "identity").toDF()
datasetGaussianLog = generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "log").toDF()
datasetGaussianInverse = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gaussian", link = "inverse").toDF()
datasetBinomial = {
val nPoints = 10000
val coefficients = Array(-0.57997, 0.912083, -0.371077, -0.819866, 2.688191)
val xMean = Array(5.843, 3.057, 3.758, 1.199)
val xVariance = Array(0.6856, 0.1899, 3.116, 0.581)
val testData =
generateMultinomialLogisticInput(coefficients, xMean, xVariance,
addIntercept = true, nPoints, seed)
testData.toDF()
}
datasetPoissonLog = generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "log").toDF()
datasetPoissonLogWithZero = Seq(
LabeledPoint(0.0, Vectors.dense(18, 1.0)),
LabeledPoint(1.0, Vectors.dense(12, 0.0)),
LabeledPoint(0.0, Vectors.dense(15, 0.0)),
LabeledPoint(0.0, Vectors.dense(13, 2.0)),
LabeledPoint(0.0, Vectors.dense(15, 1.0)),
LabeledPoint(1.0, Vectors.dense(16, 1.0))
).toDF()
datasetPoissonIdentity = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "identity").toDF()
datasetPoissonSqrt = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "poisson", link = "sqrt").toDF()
datasetGammaInverse = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "inverse").toDF()
datasetGammaIdentity = generateGeneralizedLinearRegressionInput(
intercept = 2.5, coefficients = Array(2.2, 0.6), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "identity").toDF()
datasetGammaLog = generateGeneralizedLinearRegressionInput(
intercept = 0.25, coefficients = Array(0.22, 0.06), xMean = Array(2.9, 10.5),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, noiseLevel = 0.01,
family = "gamma", link = "log").toDF()
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glm and glmnet package.
*/
ignore("export test data into CSV format") {
datasetGaussianIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianIdentity")
datasetGaussianLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianLog")
datasetGaussianInverse.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGaussianInverse")
datasetBinomial.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetBinomial")
datasetPoissonLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLog")
datasetPoissonLogWithZero.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonLogWithZero")
datasetPoissonIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonIdentity")
datasetPoissonSqrt.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetPoissonSqrt")
datasetGammaInverse.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaInverse")
datasetGammaIdentity.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaIdentity")
datasetGammaLog.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/GeneralizedLinearRegressionSuite/datasetGammaLog")
}
test("params") {
ParamsSuite.checkParams(new GeneralizedLinearRegression)
val model = new GeneralizedLinearRegressionModel("genLinReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("generalized linear regression: default params") {
val glr = new GeneralizedLinearRegression
assert(glr.getLabelCol === "label")
assert(glr.getFeaturesCol === "features")
assert(glr.getPredictionCol === "prediction")
assert(glr.getFitIntercept)
assert(glr.getTol === 1E-6)
assert(!glr.isDefined(glr.weightCol))
assert(glr.getRegParam === 0.0)
assert(glr.getSolver == "irls")
assert(glr.getVariancePower === 0.0)
// TODO: Construct model directly instead of via fitting.
val model = glr.setFamily("gaussian").setLink("identity")
.fit(datasetGaussianIdentity)
MLTestingUtils.checkCopyAndUids(glr, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.hasParent)
assert(model.getFamily === "gaussian")
assert(model.getLink === "identity")
}
test("prediction on single instance") {
val glr = new GeneralizedLinearRegression
val model = glr.setFamily("gaussian").setLink("identity")
.fit(datasetGaussianIdentity)
testPredictionModelSinglePrediction(model, datasetGaussianIdentity)
}
test("generalized linear regression: gaussian family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="gaussian", data=data)
print(as.vector(coef(model)))
}
[1] 2.2960999 0.8087933
[1] 2.5002642 2.2000403 0.5999485
data <- read.csv("path", header=FALSE)
model1 <- glm(f1, family=gaussian(link=log), data=data, start=c(0,0))
model2 <- glm(f2, family=gaussian(link=log), data=data, start=c(0,0,0))
print(as.vector(coef(model1)))
print(as.vector(coef(model2)))
[1] 0.23069326 0.07993778
[1] 0.25001858 0.22002452 0.05998789
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=gaussian(link=inverse), data=data)
print(as.vector(coef(model)))
}
[1] 2.3010179 0.8198976
[1] 2.4108902 2.2130248 0.6086152
*/
val expected = Seq(
Vectors.dense(0.0, 2.2960999, 0.8087933),
Vectors.dense(2.5002642, 2.2000403, 0.5999485),
Vectors.dense(0.0, 0.23069326, 0.07993778),
Vectors.dense(0.25001858, 0.22002452, 0.05998789),
Vectors.dense(0.0, 2.3010179, 0.8198976),
Vectors.dense(2.4108902, 2.2130248, 0.6086152))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("identity", datasetGaussianIdentity), ("log", datasetGaussianLog),
("inverse", datasetGaussianInverse))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("gaussian").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Vector)](dataset, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"gaussian family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with gaussian family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: gaussian family against glmnet") {
/*
R code:
library(glmnet)
data <- read.csv("path", header=FALSE)
label = data$V1
features = as.matrix(data.frame(data$V2, data$V3))
for (intercept in c(FALSE, TRUE)) {
for (lambda in c(0.0, 0.1, 1.0)) {
model <- glmnet(features, label, family="gaussian", intercept=intercept,
lambda=lambda, alpha=0, thresh=1E-14)
print(as.vector(coef(model)))
}
}
[1] 0.0000000 2.2961005 0.8087932
[1] 0.0000000 2.2130368 0.8309556
[1] 0.0000000 1.7176137 0.9610657
[1] 2.5002642 2.2000403 0.5999485
[1] 3.1106389 2.0935142 0.5712711
[1] 6.7597127 1.4581054 0.3994266
*/
val expected = Seq(
Vectors.dense(0.0, 2.2961005, 0.8087932),
Vectors.dense(0.0, 2.2130368, 0.8309556),
Vectors.dense(0.0, 1.7176137, 0.9610657),
Vectors.dense(2.5002642, 2.2000403, 0.5999485),
Vectors.dense(3.1106389, 2.0935142, 0.5712711),
Vectors.dense(6.7597127, 1.4581054, 0.3994266))
var idx = 0
for (fitIntercept <- Seq(false, true);
regParam <- Seq(0.0, 0.1, 1.0)) {
val trainer = new GeneralizedLinearRegression().setFamily("gaussian")
.setFitIntercept(fitIntercept).setRegParam(regParam)
val model = trainer.fit(datasetGaussianIdentity)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gaussian family, " +
s"fitIntercept = $fitIntercept and regParam = $regParam.")
idx += 1
}
}
test("generalized linear regression: binomial family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5 - 1
f2 <- data$V1 ~ data$V2 + data$V3 + data$V4 + data$V5
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="binomial", data=data)
print(as.vector(coef(model)))
}
[1] -0.3560284 1.3010002 -0.3570805 -0.7406762
[1] 2.8367406 -0.5896187 0.8931655 -0.3925169 -0.7996989
for (formula in c(f1, f2)) {
model <- glm(formula, family=binomial(link=probit), data=data)
print(as.vector(coef(model)))
}
[1] -0.2134390 0.7800646 -0.2144267 -0.4438358
[1] 1.6995366 -0.3524694 0.5332651 -0.2352985 -0.4780850
for (formula in c(f1, f2)) {
model <- glm(formula, family=binomial(link=cloglog), data=data)
print(as.vector(coef(model)))
}
[1] -0.2832198 0.8434144 -0.2524727 -0.5293452
[1] 1.5063590 -0.4038015 0.6133664 -0.2687882 -0.5541758
*/
val expected = Seq(
Vectors.dense(0.0, -0.3560284, 1.3010002, -0.3570805, -0.7406762),
Vectors.dense(2.8367406, -0.5896187, 0.8931655, -0.3925169, -0.7996989),
Vectors.dense(0.0, -0.2134390, 0.7800646, -0.2144267, -0.4438358),
Vectors.dense(1.6995366, -0.3524694, 0.5332651, -0.2352985, -0.4780850),
Vectors.dense(0.0, -0.2832198, 0.8434144, -0.2524727, -0.5293452),
Vectors.dense(1.5063590, -0.4038015, 0.6133664, -0.2687882, -0.5541758))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("logit", datasetBinomial), ("probit", datasetBinomial),
("cloglog", datasetBinomial))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("binomial").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1),
model.coefficients(2), model.coefficients(3))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with binomial family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Vector)](dataset, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"binomial family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with binomial family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: poisson family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="poisson", data=data)
print(as.vector(coef(model)))
}
[1] 0.22999393 0.08047088
[1] 0.25022353 0.21998599 0.05998621
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=poisson(link=identity), data=data)
print(as.vector(coef(model)))
}
[1] 2.2929501 0.8119415
[1] 2.5012730 2.1999407 0.5999107
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=poisson(link=sqrt), data=data)
print(as.vector(coef(model)))
}
[1] 2.2958947 0.8090515
[1] 2.5000480 2.1999972 0.5999968
*/
val expected = Seq(
Vectors.dense(0.0, 0.22999393, 0.08047088),
Vectors.dense(0.25022353, 0.21998599, 0.05998621),
Vectors.dense(0.0, 2.2929501, 0.8119415),
Vectors.dense(2.5012730, 2.1999407, 0.5999107),
Vectors.dense(0.0, 2.2958947, 0.8090515),
Vectors.dense(2.5000480, 2.1999972, 0.5999968))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("log", datasetPoissonLog), ("identity", datasetPoissonIdentity),
("sqrt", datasetPoissonSqrt))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("poisson").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with poisson family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Vector)](dataset, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"poisson family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with poisson family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: poisson family against glm (with zero values)") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="poisson", data=data)
print(as.vector(coef(model)))
}
[1] -0.0457441 -0.6833928
[1] 1.8121235 -0.1747493 -0.5815417
*/
val expected = Seq(
Vectors.dense(0.0, -0.0457441, -0.6833928),
Vectors.dense(1.8121235, -0.1747493, -0.5815417))
import GeneralizedLinearRegression._
var idx = 0
val link = "log"
val dataset = datasetPoissonLogWithZero
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("poisson").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with poisson family, " +
s"$link link and fitIntercept = $fitIntercept (with zero values).")
idx += 1
}
}
test("generalized linear regression: gamma family against glm") {
/*
R code:
f1 <- data$V1 ~ data$V2 + data$V3 - 1
f2 <- data$V1 ~ data$V2 + data$V3
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family="Gamma", data=data)
print(as.vector(coef(model)))
}
[1] 2.3392419 0.8058058
[1] 2.3507700 2.2533574 0.6042991
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=Gamma(link=identity), data=data)
print(as.vector(coef(model)))
}
[1] 2.2908883 0.8147796
[1] 2.5002406 2.1998346 0.6000059
data <- read.csv("path", header=FALSE)
for (formula in c(f1, f2)) {
model <- glm(formula, family=Gamma(link=log), data=data)
print(as.vector(coef(model)))
}
[1] 0.22958970 0.08091066
[1] 0.25003210 0.21996957 0.06000215
*/
val expected = Seq(
Vectors.dense(0.0, 2.3392419, 0.8058058),
Vectors.dense(2.3507700, 2.2533574, 0.6042991),
Vectors.dense(0.0, 2.2908883, 0.8147796),
Vectors.dense(2.5002406, 2.1998346, 0.6000059),
Vectors.dense(0.0, 0.22958970, 0.08091066),
Vectors.dense(0.25003210, 0.21996957, 0.06000215))
import GeneralizedLinearRegression._
var idx = 0
for ((link, dataset) <- Seq(("inverse", datasetGammaInverse),
("identity", datasetGammaIdentity), ("log", datasetGammaLog))) {
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily("Gamma").setLink(link)
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with gamma family, " +
s"$link link and fitIntercept = $fitIntercept.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Vector)](dataset, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"gamma family, $link link and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with gamma family, $link link and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("generalized linear regression: tweedie family against glm") {
/*
R code:
library(statmod)
df <- as.data.frame(matrix(c(
1.0, 1.0, 0.0, 5.0,
0.5, 1.0, 1.0, 2.0,
1.0, 1.0, 2.0, 1.0,
2.0, 1.0, 3.0, 3.0), 4, 4, byrow = TRUE))
f1 <- V1 ~ -1 + V3 + V4
f2 <- V1 ~ V3 + V4
for (f in c(f1, f2)) {
for (lp in c(0, 1, -1))
for (vp in c(1.6, 2.5)) {
model <- glm(f, df, family = tweedie(var.power = vp, link.power = lp))
print(as.vector(coef(model)))
}
}
[1] 0.1496480 -0.0122283
[1] 0.1373567 -0.0120673
[1] 0.3919109 0.1846094
[1] 0.3684426 0.1810662
[1] 0.1759887 0.2195818
[1] 0.1108561 0.2059430
[1] -1.3163732 0.4378139 0.2464114
[1] -1.4396020 0.4817364 0.2680088
[1] -0.7090230 0.6256309 0.3294324
[1] -0.9524928 0.7304267 0.3792687
[1] 2.1188978 -0.3360519 -0.2067023
[1] 2.1659028 -0.3499170 -0.2128286
*/
val datasetTweedie = Seq(
Instance(1.0, 1.0, Vectors.dense(0.0, 5.0)),
Instance(0.5, 1.0, Vectors.dense(1.0, 2.0)),
Instance(1.0, 1.0, Vectors.dense(2.0, 1.0)),
Instance(2.0, 1.0, Vectors.dense(3.0, 3.0))
).toDF()
val expected = Seq(
Vectors.dense(0, 0.149648, -0.0122283),
Vectors.dense(0, 0.1373567, -0.0120673),
Vectors.dense(0, 0.3919109, 0.1846094),
Vectors.dense(0, 0.3684426, 0.1810662),
Vectors.dense(0, 0.1759887, 0.2195818),
Vectors.dense(0, 0.1108561, 0.205943),
Vectors.dense(-1.3163732, 0.4378139, 0.2464114),
Vectors.dense(-1.439602, 0.4817364, 0.2680088),
Vectors.dense(-0.709023, 0.6256309, 0.3294324),
Vectors.dense(-0.9524928, 0.7304267, 0.3792687),
Vectors.dense(2.1188978, -0.3360519, -0.2067023),
Vectors.dense(2.1659028, -0.349917, -0.2128286))
import GeneralizedLinearRegression._
var idx = 0
for (fitIntercept <- Seq(false, true);
linkPower <- Seq(0.0, 1.0, -1.0);
variancePower <- Seq(1.6, 2.5)) {
val trainer = new GeneralizedLinearRegression().setFamily("tweedie")
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
.setVariancePower(variancePower).setLinkPower(linkPower)
val model = trainer.fit(datasetTweedie)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with tweedie family, " +
s"linkPower = $linkPower, fitIntercept = $fitIntercept " +
s"and variancePower = $variancePower.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Double, Vector)](datasetTweedie, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"tweedie family, linkPower = $linkPower, fitIntercept = $fitIntercept " +
s"and variancePower = $variancePower.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with tweedie family, linkPower = $linkPower, fitIntercept = $fitIntercept " +
s"and variancePower = $variancePower.")
}
idx += 1
}
}
test("generalized linear regression: tweedie family against glm (default power link)") {
/*
R code:
library(statmod)
df <- as.data.frame(matrix(c(
1.0, 1.0, 0.0, 5.0,
0.5, 1.0, 1.0, 2.0,
1.0, 1.0, 2.0, 1.0,
2.0, 1.0, 3.0, 3.0), 4, 4, byrow = TRUE))
var.power <- c(0, 1, 2, 1.5)
f1 <- V1 ~ -1 + V3 + V4
f2 <- V1 ~ V3 + V4
for (f in c(f1, f2)) {
for (vp in var.power) {
model <- glm(f, df, family = tweedie(var.power = vp))
print(as.vector(coef(model)))
}
}
[1] 0.4310345 0.1896552
[1] 0.15776482 -0.01189032
[1] 0.1468853 0.2116519
[1] 0.2282601 0.2132775
[1] -0.5158730 0.5555556 0.2936508
[1] -1.2689559 0.4230934 0.2388465
[1] 2.137852 -0.341431 -0.209090
[1] 1.5953393 -0.1884985 -0.1106335
*/
val datasetTweedie = Seq(
Instance(1.0, 1.0, Vectors.dense(0.0, 5.0)),
Instance(0.5, 1.0, Vectors.dense(1.0, 2.0)),
Instance(1.0, 1.0, Vectors.dense(2.0, 1.0)),
Instance(2.0, 1.0, Vectors.dense(3.0, 3.0))
).toDF()
val expected = Seq(
Vectors.dense(0, 0.4310345, 0.1896552),
Vectors.dense(0, 0.15776482, -0.01189032),
Vectors.dense(0, 0.1468853, 0.2116519),
Vectors.dense(0, 0.2282601, 0.2132775),
Vectors.dense(-0.515873, 0.5555556, 0.2936508),
Vectors.dense(-1.2689559, 0.4230934, 0.2388465),
Vectors.dense(2.137852, -0.341431, -0.20909),
Vectors.dense(1.5953393, -0.1884985, -0.1106335))
import GeneralizedLinearRegression._
var idx = 0
for (fitIntercept <- Seq(false, true)) {
for (variancePower <- Seq(0.0, 1.0, 2.0, 1.5)) {
val trainer = new GeneralizedLinearRegression().setFamily("tweedie")
.setFitIntercept(fitIntercept).setLinkPredictionCol("linkPrediction")
.setVariancePower(variancePower)
val model = trainer.fit(datasetTweedie)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with tweedie family, " +
s"fitIntercept = $fitIntercept and variancePower = $variancePower.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Double, Vector)](datasetTweedie, model,
"features", "prediction", "linkPrediction") {
case Row(features: DenseVector, prediction1: Double, linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"tweedie family, fitIntercept = $fitIntercept " +
s"and variancePower = $variancePower.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with tweedie family, fitIntercept = $fitIntercept " +
s"and variancePower = $variancePower.")
}
idx += 1
}
}
}
test("generalized linear regression: intercept only") {
/*
R code:
library(statmod)
y <- c(1.0, 0.5, 0.7, 0.3)
w <- c(1, 2, 3, 4)
for (fam in list(binomial(), Gamma(), gaussian(), poisson(), tweedie(1.6))) {
model1 <- glm(y ~ 1, family = fam)
model2 <- glm(y ~ 1, family = fam, weights = w)
print(as.vector(c(coef(model1), coef(model2))))
}
[1] 0.5108256 0.1201443
[1] 1.600000 1.886792
[1] 0.625 0.530
[1] -0.4700036 -0.6348783
[1] 1.325782 1.463641
*/
val dataset = Seq(
Instance(1.0, 1.0, Vectors.zeros(0)),
Instance(0.5, 2.0, Vectors.zeros(0)),
Instance(0.7, 3.0, Vectors.zeros(0)),
Instance(0.3, 4.0, Vectors.zeros(0))
).toDF()
val expected = Seq(0.5108256, 0.1201443, 1.600000, 1.886792, 0.625, 0.530,
-0.4700036, -0.6348783, 1.325782, 1.463641)
import GeneralizedLinearRegression._
var idx = 0
for (family <- GeneralizedLinearRegression.supportedFamilyNames.sortWith(_ < _)) {
for (useWeight <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression().setFamily(family)
if (useWeight) trainer.setWeightCol("weight")
if (family == "tweedie") trainer.setVariancePower(1.6)
val model = trainer.fit(dataset)
val actual = model.intercept
assert(actual ~== expected(idx) absTol 1E-3, "Model mismatch: intercept only GLM with " +
s"useWeight = $useWeight and family = $family.")
assert(model.coefficients === new DenseVector(Array.empty[Double]))
idx += 1
}
}
// throw exception for empty model
val trainer = new GeneralizedLinearRegression().setFitIntercept(false)
withClue("Specified model is empty with neither intercept nor feature") {
intercept[IllegalArgumentException] {
trainer.fit(dataset)
}
}
}
test("generalized linear regression with weight and offset") {
/*
R code:
library(statmod)
df <- as.data.frame(matrix(c(
0.2, 1.0, 2.0, 0.0, 5.0,
0.5, 2.1, 0.5, 1.0, 2.0,
0.9, 0.4, 1.0, 2.0, 1.0,
0.7, 0.7, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE))
families <- list(binomial, Gamma, gaussian, poisson, tweedie(1.5))
f1 <- V1 ~ -1 + V4 + V5
f2 <- V1 ~ V4 + V5
for (f in c(f1, f2)) {
for (fam in families) {
model <- glm(f, df, family = fam, weights = V2, offset = V3)
print(as.vector(coef(model)))
}
}
[1] 0.9419107 -0.6864404
[1] -0.2869094 0.7857710
[1] 0.5169222 -0.3344444
[1] 0.1812436 -0.6568422
[1] 0.1055254 0.2979113
[1] -0.2147117 0.9911750 -0.6356096
[1] 0.3390397 -0.3406099 0.6870259
[1] -0.05990345 0.53188982 -0.32118415
[1] -1.5616130 0.6646470 -0.3192581
[1] 0.3665034 0.1039416 0.1484616
*/
val dataset = Seq(
OffsetInstance(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
OffsetInstance(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
OffsetInstance(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
OffsetInstance(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))
).toDF()
val expected = Seq(
Vectors.dense(0, 0.9419107, -0.6864404),
Vectors.dense(0, -0.2869094, 0.785771),
Vectors.dense(0, 0.5169222, -0.3344444),
Vectors.dense(0, 0.1812436, -0.6568422),
Vectors.dense(0, 0.1055254, 0.2979113),
Vectors.dense(-0.2147117, 0.991175, -0.6356096),
Vectors.dense(0.3390397, -0.3406099, 0.6870259),
Vectors.dense(-0.05990345, 0.53188982, -0.32118415),
Vectors.dense(-1.561613, 0.664647, -0.3192581),
Vectors.dense(0.3665034, 0.1039416, 0.1484616))
import GeneralizedLinearRegression._
var idx = 0
for (fitIntercept <- Seq(false, true)) {
for (family <- GeneralizedLinearRegression.supportedFamilyNames.sortWith(_ < _)) {
val trainer = new GeneralizedLinearRegression().setFamily(family)
.setFitIntercept(fitIntercept).setOffsetCol("offset")
.setWeightCol("weight").setLinkPredictionCol("linkPrediction")
if (family == "tweedie") trainer.setVariancePower(1.5)
val model = trainer.fit(dataset)
val actual = Vectors.dense(model.intercept, model.coefficients(0), model.coefficients(1))
assert(actual ~= expected(idx) absTol 1e-4, s"Model mismatch: GLM with family = $family," +
s" and fitIntercept = $fitIntercept.")
val familyLink = FamilyAndLink(trainer)
testTransformer[(Double, Double, Double, Vector)](dataset, model,
"features", "offset", "prediction", "linkPrediction") {
case Row(features: DenseVector, offset: Double, prediction1: Double,
linkPrediction1: Double) =>
val eta = BLAS.dot(features, model.coefficients) + model.intercept + offset
val prediction2 = familyLink.fitted(eta)
val linkPrediction2 = eta
assert(prediction1 ~= prediction2 relTol 1E-5, "Prediction mismatch: GLM with " +
s"family = $family, and fitIntercept = $fitIntercept.")
assert(linkPrediction1 ~= linkPrediction2 relTol 1E-5, "Link Prediction mismatch: " +
s"GLM with family = $family, and fitIntercept = $fitIntercept.")
}
idx += 1
}
}
}
test("glm summary: gaussian family with weight and offset") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
off <- c(2, 3, 1, 4)
df <- as.data.frame(cbind(A, b))
*/
val dataset = Seq(
OffsetInstance(17.0, 1.0, 2.0, Vectors.dense(0.0, 5.0).toSparse),
OffsetInstance(19.0, 2.0, 3.0, Vectors.dense(1.0, 7.0)),
OffsetInstance(23.0, 3.0, 1.0, Vectors.dense(2.0, 11.0)),
OffsetInstance(29.0, 4.0, 4.0, Vectors.dense(3.0, 13.0))
).toDF()
/*
R code:
model <- glm(formula = "b ~ .", family = "gaussian", data = df,
weights = w, offset = off)
summary(model)
Deviance Residuals:
1 2 3 4
0.9600 -0.6788 -0.5543 0.4800
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 5.5400 4.8040 1.153 0.455
V1 -0.9600 2.7782 -0.346 0.788
V2 1.7000 0.9798 1.735 0.333
(Dispersion parameter for gaussian family taken to be 1.92)
Null deviance: 152.10 on 3 degrees of freedom
Residual deviance: 1.92 on 1 degrees of freedom
AIC: 13.238
Number of Fisher Scoring iterations: 2
residuals(model, type = "pearson")
1 2 3 4
0.9600000 -0.6788225 -0.5542563 0.4800000
residuals(model, type = "working")
1 2 3 4
0.96 -0.48 -0.32 0.24
residuals(model, type = "response")
1 2 3 4
0.96 -0.48 -0.32 0.24
*/
val trainer = new GeneralizedLinearRegression()
.setWeightCol("weight").setOffsetCol("offset")
val model = trainer.fit(dataset)
val coefficientsR = Vectors.dense(Array(-0.96, 1.7))
val interceptR = 5.54
val devianceResidualsR = Array(0.96, -0.67882, -0.55426, 0.48)
val pearsonResidualsR = Array(0.96, -0.67882, -0.55426, 0.48)
val workingResidualsR = Array(0.96, -0.48, -0.32, 0.24)
val responseResidualsR = Array(0.96, -0.48, -0.32, 0.24)
val seCoefR = Array(2.7782, 0.9798, 4.804)
val tValsR = Array(-0.34555, 1.73506, 1.15321)
val pValsR = Array(0.78819, 0.33286, 0.45478)
val dispersionR = 1.92
val nullDevianceR = 152.1
val residualDevianceR = 1.92
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 13.23758
assert(model.hasSummary)
val summary = model.summary
assert(summary.isInstanceOf[GeneralizedLinearRegressionTrainingSummary])
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
val summary2: GeneralizedLinearRegressionSummary = model.evaluate(dataset)
assert(summary.predictions.columns.toSet === summary2.predictions.columns.toSet)
assert(summary.predictionCol === summary2.predictionCol)
assert(summary.rank === summary2.rank)
assert(summary.degreesOfFreedom === summary2.degreesOfFreedom)
assert(summary.residualDegreeOfFreedom === summary2.residualDegreeOfFreedom)
assert(summary.residualDegreeOfFreedomNull === summary2.residualDegreeOfFreedomNull)
assert(summary.nullDeviance === summary2.nullDeviance)
assert(summary.deviance === summary2.deviance)
assert(summary.dispersion === summary2.dispersion)
assert(summary.aic === summary2.aic)
}
test("glm summary: binomial family with weight and offset") {
/*
R code:
df <- as.data.frame(matrix(c(
0.2, 1.0, 2.0, 0.0, 5.0,
0.5, 2.1, 0.5, 1.0, 2.0,
0.9, 0.4, 1.0, 2.0, 1.0,
0.7, 0.7, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE))
*/
val dataset = Seq(
OffsetInstance(0.2, 1.0, 2.0, Vectors.dense(0.0, 5.0)),
OffsetInstance(0.5, 2.1, 0.5, Vectors.dense(1.0, 2.0)),
OffsetInstance(0.9, 0.4, 1.0, Vectors.dense(2.0, 1.0)),
OffsetInstance(0.7, 0.7, 0.0, Vectors.dense(3.0, 3.0))
).toDF()
/*
R code:
model <- glm(formula = "V1 ~ V4 + V5", family = "binomial", data = df,
weights = V2, offset = V3)
summary(model)
Deviance Residuals:
1 2 3 4
0.002584 -0.003800 0.012478 -0.001796
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) -0.2147 3.5687 -0.060 0.952
V4 0.9912 1.2344 0.803 0.422
V5 -0.6356 0.9669 -0.657 0.511
(Dispersion parameter for binomial family taken to be 1)
Null deviance: 2.17560881 on 3 degrees of freedom
Residual deviance: 0.00018005 on 1 degrees of freedom
AIC: 10.245
Number of Fisher Scoring iterations: 4
residuals(model, type = "pearson")
1 2 3 4
0.002586113 -0.003799744 0.012372235 -0.001796892
residuals(model, type = "working")
1 2 3 4
0.006477857 -0.005244163 0.063541250 -0.004691064
residuals(model, type = "response")
1 2 3 4
0.0010324375 -0.0013110318 0.0060225522 -0.0009832738
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("Binomial")
.setWeightCol("weight")
.setOffsetCol("offset")
val model = trainer.fit(dataset)
val coefficientsR = Vectors.dense(Array(0.99117, -0.63561))
val interceptR = -0.21471
val devianceResidualsR = Array(0.00258, -0.0038, 0.01248, -0.0018)
val pearsonResidualsR = Array(0.00259, -0.0038, 0.01237, -0.0018)
val workingResidualsR = Array(0.00648, -0.00524, 0.06354, -0.00469)
val responseResidualsR = Array(0.00103, -0.00131, 0.00602, -0.00098)
val seCoefR = Array(1.23439, 0.9669, 3.56866)
val tValsR = Array(0.80297, -0.65737, -0.06017)
val pValsR = Array(0.42199, 0.51094, 0.95202)
val dispersionR = 1.0
val nullDevianceR = 2.17561
val residualDevianceR = 0.00018
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 10.24453
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion === dispersionR)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("glm summary: poisson family with weight and offset") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(2, 8, 3, 9)
w <- c(1, 2, 3, 4)
off <- c(2, 3, 1, 4)
df <- as.data.frame(cbind(A, b))
*/
val dataset = Seq(
OffsetInstance(2.0, 1.0, 2.0, Vectors.dense(0.0, 5.0).toSparse),
OffsetInstance(8.0, 2.0, 3.0, Vectors.dense(1.0, 7.0)),
OffsetInstance(3.0, 3.0, 1.0, Vectors.dense(2.0, 11.0)),
OffsetInstance(9.0, 4.0, 4.0, Vectors.dense(3.0, 13.0))
).toDF()
/*
R code:
model <- glm(formula = "b ~ .", family = "poisson", data = df,
weights = w, offset = off)
summary(model)
Deviance Residuals:
1 2 3 4
-2.0480 1.2315 1.8293 -0.7107
Coefficients:
Estimate Std. Error z value Pr(>|z|)
(Intercept) -4.5678 1.9625 -2.328 0.0199
V1 -2.8784 1.1683 -2.464 0.0137
V2 0.8859 0.4170 2.124 0.0336
(Dispersion parameter for poisson family taken to be 1)
Null deviance: 22.5585 on 3 degrees of freedom
Residual deviance: 9.5622 on 1 degrees of freedom
AIC: 51.242
Number of Fisher Scoring iterations: 5
residuals(model, type = "pearson")
1 2 3 4
-1.7480418 1.3037611 2.0750099 -0.6972966
residuals(model, type = "working")
1 2 3 4
-0.6891489 0.3833588 0.9710682 -0.1096590
residuals(model, type = "response")
1 2 3 4
-4.433948 2.216974 1.477983 -1.108487
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("Poisson")
.setWeightCol("weight")
.setOffsetCol("offset")
val model = trainer.fit(dataset)
val coefficientsR = Vectors.dense(Array(-2.87843, 0.88589))
val interceptR = -4.56784
val devianceResidualsR = Array(-2.04796, 1.23149, 1.82933, -0.71066)
val pearsonResidualsR = Array(-1.74804, 1.30376, 2.07501, -0.6973)
val workingResidualsR = Array(-0.68915, 0.38336, 0.97107, -0.10966)
val responseResidualsR = Array(-4.43395, 2.21697, 1.47798, -1.10849)
val seCoefR = Array(1.16826, 0.41703, 1.96249)
val tValsR = Array(-2.46387, 2.12428, -2.32757)
val pValsR = Array(0.01374, 0.03365, 0.01993)
val dispersionR = 1.0
val nullDevianceR = 22.55853
val residualDevianceR = 9.5622
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 51.24218
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion === dispersionR)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("glm summary: gamma family with weight and offset") {
/*
R code:
A <- matrix(c(0, 5, 1, 2, 2, 1, 3, 3), 4, 2, byrow = TRUE)
b <- c(1, 2, 1, 2)
w <- c(1, 2, 3, 4)
off <- c(0, 0.5, 1, 0)
df <- as.data.frame(cbind(A, b))
*/
val dataset = Seq(
OffsetInstance(1.0, 1.0, 0.0, Vectors.dense(0.0, 5.0)),
OffsetInstance(2.0, 2.0, 0.5, Vectors.dense(1.0, 2.0)),
OffsetInstance(1.0, 3.0, 1.0, Vectors.dense(2.0, 1.0)),
OffsetInstance(2.0, 4.0, 0.0, Vectors.dense(3.0, 3.0))
).toDF()
/*
R code:
model <- glm(formula = "b ~ .", family = "Gamma", data = df,
weights = w, offset = off)
summary(model)
Deviance Residuals:
1 2 3 4
-0.17095 0.19867 -0.23604 0.03241
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.56474 0.23866 -2.366 0.255
V1 0.07695 0.06931 1.110 0.467
V2 0.28068 0.07320 3.835 0.162
(Dispersion parameter for Gamma family taken to be 0.1212174)
Null deviance: 2.02568 on 3 degrees of freedom
Residual deviance: 0.12546 on 1 degrees of freedom
AIC: 0.93388
Number of Fisher Scoring iterations: 4
residuals(model, type = "pearson")
1 2 3 4
-0.16134949 0.20807694 -0.22544551 0.03258777
residuals(model, type = "working")
1 2 3 4
0.135315831 -0.084390309 0.113219135 -0.008279688
residuals(model, type = "response")
1 2 3 4
-0.1923918 0.2565224 -0.1496381 0.0320653
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("Gamma")
.setWeightCol("weight")
.setOffsetCol("offset")
val model = trainer.fit(dataset)
val coefficientsR = Vectors.dense(Array(0.07695, 0.28068))
val interceptR = -0.56474
val devianceResidualsR = Array(-0.17095, 0.19867, -0.23604, 0.03241)
val pearsonResidualsR = Array(-0.16135, 0.20808, -0.22545, 0.03259)
val workingResidualsR = Array(0.13532, -0.08439, 0.11322, -0.00828)
val responseResidualsR = Array(-0.19239, 0.25652, -0.14964, 0.03207)
val seCoefR = Array(0.06931, 0.0732, 0.23866)
val tValsR = Array(1.11031, 3.83453, -2.3663)
val pValsR = Array(0.46675, 0.16241, 0.25454)
val dispersionR = 0.12122
val nullDevianceR = 2.02568
val residualDevianceR = 0.12546
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val aicR = 0.93388
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.aic ~== aicR absTol 1E-3)
assert(summary.solver === "irls")
}
test("glm summary: tweedie family with weight and offset") {
/*
R code:
df <- as.data.frame(matrix(c(
1.0, 1.0, 1.0, 0.0, 5.0,
0.5, 2.0, 3.0, 1.0, 2.0,
1.0, 3.0, 2.0, 2.0, 1.0,
0.0, 4.0, 0.0, 3.0, 3.0), 4, 5, byrow = TRUE))
*/
val dataset = Seq(
OffsetInstance(1.0, 1.0, 1.0, Vectors.dense(0.0, 5.0)),
OffsetInstance(0.5, 2.0, 3.0, Vectors.dense(1.0, 2.0)),
OffsetInstance(1.0, 3.0, 2.0, Vectors.dense(2.0, 1.0)),
OffsetInstance(0.0, 4.0, 0.0, Vectors.dense(3.0, 3.0))
).toDF()
/*
R code:
library(statmod)
model <- glm(V1 ~ V4 + V5, data = df, weights = V2, offset = V3,
family = tweedie(var.power = 1.6, link.power = 0.0))
summary(model)
Deviance Residuals:
1 2 3 4
0.8917 -2.1396 1.2252 -1.7946
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) -0.03047 3.65000 -0.008 0.995
V4 -1.14577 1.41674 -0.809 0.567
V5 -0.36585 0.97065 -0.377 0.771
(Dispersion parameter for Tweedie family taken to be 6.334961)
Null deviance: 12.784 on 3 degrees of freedom
Residual deviance: 10.095 on 1 degrees of freedom
AIC: NA
Number of Fisher Scoring iterations: 18
residuals(model, type = "pearson")
1 2 3 4
1.1472554 -1.4642569 1.4935199 -0.8025842
residuals(model, type = "working")
1 2 3 4
1.3624928 -0.8322375 0.9894580 -1.0000000
residuals(model, type = "response")
1 2 3 4
0.57671828 -2.48040354 0.49735052 -0.01040646
*/
val trainer = new GeneralizedLinearRegression()
.setFamily("tweedie")
.setVariancePower(1.6)
.setLinkPower(0.0)
.setWeightCol("weight")
.setOffsetCol("offset")
val model = trainer.fit(dataset)
val coefficientsR = Vectors.dense(Array(-1.14577, -0.36585))
val interceptR = -0.03047
val devianceResidualsR = Array(0.89171, -2.13961, 1.2252, -1.79463)
val pearsonResidualsR = Array(1.14726, -1.46426, 1.49352, -0.80258)
val workingResidualsR = Array(1.36249, -0.83224, 0.98946, -1)
val responseResidualsR = Array(0.57672, -2.4804, 0.49735, -0.01041)
val seCoefR = Array(1.41674, 0.97065, 3.65)
val tValsR = Array(-0.80873, -0.37691, -0.00835)
val pValsR = Array(0.56707, 0.77053, 0.99468)
val dispersionR = 6.33496
val nullDevianceR = 12.78358
val residualDevianceR = 10.09488
val residualDegreeOfFreedomNullR = 3
val residualDegreeOfFreedomR = 1
val summary = model.summary
val devianceResiduals = summary.residuals()
.select(col("devianceResiduals"))
.collect()
.map(_.getDouble(0))
val pearsonResiduals = summary.residuals("pearson")
.select(col("pearsonResiduals"))
.collect()
.map(_.getDouble(0))
val workingResiduals = summary.residuals("working")
.select(col("workingResiduals"))
.collect()
.map(_.getDouble(0))
val responseResiduals = summary.residuals("response")
.select(col("responseResiduals"))
.collect()
.map(_.getDouble(0))
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
pearsonResiduals.zip(pearsonResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
workingResiduals.zip(workingResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
responseResiduals.zip(responseResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
assert(summary.dispersion ~== dispersionR absTol 1E-3)
assert(summary.nullDeviance ~== nullDevianceR absTol 1E-3)
assert(summary.deviance ~== residualDevianceR absTol 1E-3)
assert(summary.residualDegreeOfFreedom === residualDegreeOfFreedomR)
assert(summary.residualDegreeOfFreedomNull === residualDegreeOfFreedomNullR)
assert(summary.solver === "irls")
}
test("glm handle collinear features") {
val collinearInstances = Seq(
Instance(1.0, 1.0, Vectors.dense(1.0, 2.0)),
Instance(2.0, 1.0, Vectors.dense(2.0, 4.0)),
Instance(3.0, 1.0, Vectors.dense(3.0, 6.0)),
Instance(4.0, 1.0, Vectors.dense(4.0, 8.0))
).toDF()
val trainer = new GeneralizedLinearRegression()
val model = trainer.fit(collinearInstances)
// to make it clear that underlying WLS did not solve analytically
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
intercept[UnsupportedOperationException] {
model.summary.pValues
}
intercept[UnsupportedOperationException] {
model.summary.tValues
}
}
test("read/write") {
def checkModelData(
model: GeneralizedLinearRegressionModel,
model2: GeneralizedLinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients.toArray === model2.coefficients.toArray)
}
val glr = new GeneralizedLinearRegression()
testEstimatorAndModelReadWrite(glr, datasetPoissonLog,
GeneralizedLinearRegressionSuite.allParamSettings,
GeneralizedLinearRegressionSuite.allParamSettings, checkModelData)
}
test("should support all NumericType labels and weights, and not support other types") {
val glr = new GeneralizedLinearRegression().setMaxIter(1)
MLTestingUtils.checkNumericTypes[
GeneralizedLinearRegressionModel, GeneralizedLinearRegression](
glr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
test("glm accepts Dataset[LabeledPoint]") {
val context = spark
import context.implicits._
new GeneralizedLinearRegression()
.setFamily("gaussian")
.fit(datasetGaussianIdentity.as[LabeledPoint])
}
test("glm summary: feature name") {
// dataset1 with no attribute
val dataset1 = Seq(
Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)),
Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)),
Instance(2.0, 5.0, Vectors.dense(2.0, 3.0))
).toDF()
// dataset2 with attribute
val datasetTmp = Seq(
(2.0, 1.0, 0.0, 5.0),
(8.0, 2.0, 1.0, 7.0),
(3.0, 3.0, 2.0, 11.0),
(9.0, 4.0, 3.0, 13.0),
(2.0, 5.0, 2.0, 3.0)
).toDF("y", "w", "x1", "x2")
val formula = new RFormula().setFormula("y ~ x1 + x2")
val dataset2 = formula.fit(datasetTmp).transform(datasetTmp)
val expectedFeature = Seq(Array("features_0", "features_1"), Array("x1", "x2"))
var idx = 0
for (dataset <- Seq(dataset1, dataset2)) {
val model = new GeneralizedLinearRegression().fit(dataset)
model.summary.featureNames.zip(expectedFeature(idx))
.foreach{ x => assert(x._1 === x._2) }
idx += 1
}
}
test("glm summary: coefficient with statistics") {
/*
R code:
A <- matrix(c(0, 1, 2, 3, 2, 5, 7, 11, 13, 3), 5, 2)
b <- c(2, 8, 3, 9, 2)
df <- as.data.frame(cbind(A, b))
model <- glm(formula = "b ~ .", data = df)
summary(model)
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 0.7903 4.0129 0.197 0.862
V1 0.2258 2.1153 0.107 0.925
V2 0.4677 0.5815 0.804 0.506
*/
val dataset = Seq(
Instance(2.0, 1.0, Vectors.dense(0.0, 5.0)),
Instance(8.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(3.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(9.0, 4.0, Vectors.dense(3.0, 13.0)),
Instance(2.0, 5.0, Vectors.dense(2.0, 3.0))
).toDF()
val expectedFeature = Seq(Array("features_0", "features_1"),
Array("(Intercept)", "features_0", "features_1"))
val expectedEstimate = Seq(Vectors.dense(0.2884, 0.538),
Vectors.dense(0.7903, 0.2258, 0.4677))
val expectedStdError = Seq(Vectors.dense(1.724, 0.3787),
Vectors.dense(4.0129, 2.1153, 0.5815))
var idx = 0
for (fitIntercept <- Seq(false, true)) {
val trainer = new GeneralizedLinearRegression()
.setFamily("gaussian")
.setFitIntercept(fitIntercept)
val model = trainer.fit(dataset)
val coefficientsWithStatistics = model.summary.coefficientsWithStatistics
coefficientsWithStatistics.map(_._1).zip(expectedFeature(idx)).foreach { x =>
assert(x._1 === x._2, "Feature name mismatch in coefficientsWithStatistics") }
assert(Vectors.dense(coefficientsWithStatistics.map(_._2)) ~= expectedEstimate(idx)
absTol 1E-3, "Coefficients mismatch in coefficientsWithStatistics")
assert(Vectors.dense(coefficientsWithStatistics.map(_._3)) ~= expectedStdError(idx)
absTol 1E-3, "Standard error mismatch in coefficientsWithStatistics")
idx += 1
}
}
test("generalized linear regression: regularization parameter") {
/*
R code:
a1 <- c(0, 1, 2, 3)
a2 <- c(5, 2, 1, 3)
b <- c(1, 0, 1, 0)
data <- as.data.frame(cbind(a1, a2, b))
df <- suppressWarnings(createDataFrame(data))
for (regParam in c(0.0, 0.1, 1.0)) {
model <- spark.glm(df, b ~ a1 + a2, regParam = regParam)
print(as.vector(summary(model)$aic))
}
[1] 12.88188
[1] 12.92681
[1] 13.32836
*/
val dataset = Seq(
LabeledPoint(1, Vectors.dense(5, 0)),
LabeledPoint(0, Vectors.dense(2, 1)),
LabeledPoint(1, Vectors.dense(1, 2)),
LabeledPoint(0, Vectors.dense(3, 3))
).toDF()
val expected = Seq(12.88188, 12.92681, 13.32836)
var idx = 0
for (regParam <- Seq(0.0, 0.1, 1.0)) {
val trainer = new GeneralizedLinearRegression()
.setRegParam(regParam)
.setLabelCol("label")
.setFeaturesCol("features")
val model = trainer.fit(dataset)
val actual = model.summary.aic
assert(actual ~= expected(idx) absTol 1e-4, "Model mismatch: GLM with regParam = $regParam.")
idx += 1
}
}
test("evaluate with labels that are not doubles") {
// Evaulate with a dataset that contains Labels not as doubles to verify correct casting
val dataset = Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 1.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 1.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 1.0, Vectors.dense(3.0, 13.0))
).toDF()
val trainer = new GeneralizedLinearRegression()
.setMaxIter(1)
val model = trainer.fit(dataset)
assert(model.hasSummary)
val summary = model.summary
val longLabelDataset = dataset.select(col(model.getLabelCol).cast(FloatType),
col(model.getFeaturesCol))
val evalSummary = model.evaluate(longLabelDataset)
// The calculations below involve pattern matching with Label as a double
assert(evalSummary.nullDeviance === summary.nullDeviance)
assert(evalSummary.deviance === summary.deviance)
assert(evalSummary.aic === summary.aic)
}
}
object GeneralizedLinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"family" -> "poisson",
"link" -> "log",
"fitIntercept" -> true,
"maxIter" -> 2, // intentionally small
"tol" -> 0.8,
"regParam" -> 0.01,
"predictionCol" -> "myPrediction",
"variancePower" -> 1.0)
def generateGeneralizedLinearRegressionInput(
intercept: Double,
coefficients: Array[Double],
xMean: Array[Double],
xVariance: Array[Double],
nPoints: Int,
seed: Int,
noiseLevel: Double,
family: String,
link: String): Seq[LabeledPoint] = {
val rnd = new Random(seed)
def rndElement(i: Int) = {
(rnd.nextDouble() - 0.5) * math.sqrt(12.0 * xVariance(i)) + xMean(i)
}
val (generator, mean) = family match {
case "gaussian" => (new StandardNormalGenerator, 0.0)
case "poisson" => (new PoissonGenerator(1.0), 1.0)
case "gamma" => (new GammaGenerator(1.0, 1.0), 1.0)
}
generator.setSeed(seed)
(0 until nPoints).map { _ =>
val features = Vectors.dense(coefficients.indices.map(rndElement).toArray)
val eta = BLAS.dot(Vectors.dense(coefficients), features) + intercept
val mu = link match {
case "identity" => eta
case "log" => math.exp(eta)
case "sqrt" => math.pow(eta, 2.0)
case "inverse" => 1.0 / eta
}
val label = mu + noiseLevel * (generator.nextValue() - mean)
// Return LabeledPoints with DenseVector
LabeledPoint(label, features)
}
}
}
| brad-kaiser/spark | mllib/src/test/scala/org/apache/spark/ml/regression/GeneralizedLinearRegressionSuite.scala | Scala | apache-2.0 | 69,237 |
/*
* Copyright (C) 2013-2014 by Michael Hombre Brinkmann
*/
package net.twibs.util
import com.ibm.icu.util.ULocale
import net.twibs.testutil.TwibsTest
class LocaleUtilsTest extends TwibsTest {
test("Locale lookup") {
val locales = ULocale.GERMAN :: ULocale.US :: ULocale.UK :: Nil
LocaleUtils.lookupLocale(locales, ULocale.GERMAN) should be(ULocale.GERMAN)
LocaleUtils.lookupLocale(locales, ULocale.GERMANY) should be(ULocale.GERMAN)
LocaleUtils.lookupLocale(locales, ULocale.US) should be(ULocale.US)
LocaleUtils.lookupLocale(locales, ULocale.UK) should be(ULocale.UK)
LocaleUtils.lookupLocale(locales, ULocale.ENGLISH) should be(ULocale.GERMAN)
}
} | hombre/twibs | twibs-util-test/src/test/scala/net/twibs/util/LocaleUtilsTest.scala | Scala | apache-2.0 | 683 |
package funsets
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* This class is a test suite for the methods in object FunSets. To run
* the test suite, you can either:
* - run the "test" command in the SBT console
* - right-click the file in eclipse and chose "Run As" - "JUnit Test"
*/
@RunWith(classOf[JUnitRunner])
class FunSetSuite extends FunSuite {
/**
* Link to the scaladoc - very clear and detailed tutorial of FunSuite
*
* http://doc.scalatest.org/1.9.1/index.html#org.scalatest.FunSuite
*
* Operators
* - test
* - ignore
* - pending
*/
/**
* Tests are written using the "test" operator and the "assert" method.
*/
test("string take") {
val message = "hello, world"
assert(message.take(5) == "hello")
}
/**
* For ScalaTest tests, there exists a special equality operator "===" that
* can be used inside "assert". If the assertion fails, the two values will
* be printed in the error message. Otherwise, when using "==", the test
* error message will only say "assertion failed", without showing the values.
*
* Try it out! Change the values so that the assertion fails, and look at the
* error message.
*/
test("adding ints") {
assert(1 + 2 === 3)
}
import FunSets._
test("contains is implemented") {
assert(contains(x => true, 100))
}
/**
* When writing tests, one would often like to re-use certain values for multiple
* tests. For instance, we would like to create an Int-set and have multiple test
* about it.
*
* Instead of copy-pasting the code for creating the set into every test, we can
* store it in the test class using a val:
*
* val s1 = singletonSet(1)
*
* However, what happens if the method "singletonSet" has a bug and crashes? Then
* the test methods are not even executed, because creating an instance of the
* test class fails!
*
* Therefore, we put the shared values into a separate trait (traits are like
* abstract classes), and create an instance inside each test method.
*
*/
trait TestSets {
def foo = ((x: Int) => x < 12)
def bar = ((x: Int) => x > 7)
def evens = ((x: Int) => x % 2 == 0)
def odds = ((x: Int) => x % 2 == 1)
def plusOne = (x: Int) => x + 1
def plusTwo = (x: Int) => x + 2
def positive = (x: Int) => x >= 0
def negative = (x: Int) => x < 0
val s1 = singletonSet(1)
val s2 = singletonSet(2)
val s3 = singletonSet(3)
val s4 = foo
val s5 = bar
val us1s2 = union(s1, s2);
val us4s5 = union(s4, s5)
val is4s5 = intersect(s4, s5)
val ds4s5 = diff(s4, s5)
val ds5s4 = diff(s5, s4)
}
/**
* This test is currently disabled (by using "ignore") because the method
* "singletonSet" is not yet implemented and the test would fail.
*
* Once you finish your implementation of "singletonSet", exchange the
* function "ignore" by "test".
*/
//ignore("singletonSet(1) contains 1") {
test("singletonSet(1) contains 1") {
/**
* We create a new instance of the "TestSets" trait, this gives us access
* to the values "s1" to "s3".
*/
new TestSets {
/**
* The string argument of "assert" is a message that is printed in case
* the test fails. This helps identifying which assertion failed.
*/
assert(contains(s1, 1), "Singleton")
}
}
test("union contains all elements") {
new TestSets {
val s = union(s1, s2)
assert(contains(s, 1), "Union 1")
assert(contains(s, 2), "Union 2")
assert(!contains(s, 3), "Union 3")
}
}
test("intersect contains the correct elements") {
new TestSets {
val s = intersect(s4, s5)
assert(contains(s, 10), "Intersection 10")
assert(contains(s, 11), "Intersection 11")
assert(!contains(s, 7), "Intersection 7")
}
}
test("diff contains the correct elements") {
new TestSets {
val s = diff(s4, s5)
assert(contains(s, 3), "diff 3")
assert(contains(s, 4), "diff 4")
assert(!contains(s, 10), "diff 10")
}
}
test("filter f by g") {
new TestSets {
val s = filter(bar, (x: Int) => x % 2 == 0)
assert(contains(s, 10), "filter bar by even for 10")
assert(!contains(s, 11), "filter bar by even for 11")
assert(!contains(s, 9), "filter bar by even for 7, should be false, because 9 is odd ")
assert(!contains(s, 4), "filter bar by even for 7, should be false, because 4 is < 7")
}
}
test("Exists even number in bar") {
new TestSets {
val sx = exists(bar, (x: Int) => x % 2 == 0)
assert(exists(bar, (x: Int) => x % 2 == 0), "even members of bar exist")
assert(exists(bar, (x: Int) => x % 2 == 0), "even members of bar exist")
}
}
test("ForAll s1, are they greater than zero?") {
new TestSets {
def fortyplus = ((x: Int) => x > 39)
assert(forall(fortyplus, positive), "all numbers >- 40 are positive")
assert(!forall(fortyplus, negative), "all numbers >- 40 are positive")
}
}
test("map addOne over Set evens") {
new TestSets {
val s = map(evens, plusOne)
assert(contains(s, 101), "if 100 is in base set, 101 is in mapped set")
assert(!contains(s, 10), "10 is not an even number plus one!")
}
}
test("Map square to singleton set of 3") {
new TestSets {
val s = map(s3, (x: Int) => x * x)
assert(contains(s, 9), "nine is a square of 3")
assert(!contains(s, 3), "three is not is a square of 3")
}
}
}
| gvamos/MilanOpera | done/funsets/src/test/scala/funsets/FunSetSuite.scala | Scala | gpl-2.0 | 5,645 |
package com.blinkbox.books.storageservice
import akka.actor._
import akka.util.Timeout
import com.blinkbox.books.config.Configuration
import com.blinkbox.books.logging.{DiagnosticExecutionContext, Loggers}
import com.blinkbox.books.spray.Directives._
import com.blinkbox.books.spray.{HealthCheckHttpService, HttpServer, v2}
import com.blinkbox.books.spray.v2.Implicits.throwableMarshaller
import com.blinkbox.books.storageservice.util.{StoreMappingUtils, Token}
import com.typesafe.scalalogging.StrictLogging
import spray.can.Http
import spray.http.StatusCodes._
import spray.http.Uri.Path
import spray.http._
import spray.httpx.unmarshalling._
import spray.routing._
import spray.routing.directives.DebuggingDirectives
import spray.util.NotImplementedException
import scala.concurrent.ExecutionContext
case class StorageServiceRoutes(appConfig: AppConfig, qms: StorageService, actorRefFactory: ActorRefFactory)(implicit context: ExecutionContext) extends HttpService
with v2.JsonSupport with StrictLogging {
val mappingUri = "mappings"
val resourcesUri = "resources"
val localUrl = appConfig.api.localUrl
val mappingRoute = path(mappingUri) {
get {
complete(StatusCodes.OK, qms.mappings)
}
}
val setResourcesRoute = {
implicit val formUnmarshaller = FormDataUnmarshallers.multipartFormDataUnmarshaller(strict = false)
path(resourcesUri) {
post {
entity(as[MultipartFormData]) { form =>
val extractor = FormFieldExtractor(form)
val data = extractor.field("data").as[Array[Byte]]
val label = extractor.field("label").as[String]
val storeResult = for {
d <- data.right
l <- label.right
} yield
qms.storeAsset(l, d)
storeResult match {
case Right(result) =>
onSuccess(result)(complete(Accepted, _))
case Left(e) =>
e match {
case ContentExpected => complete(BadRequest, "Please provide content for the fields label and data")
case _ => complete(InternalServerError)
}
}
}
}
}
}
val getResourcesRoute = {
path(resourcesUri / Rest) { token =>
get {
complete(qms.getTokenStatus(Token(token)))
}
}
}
private def exceptionHandler = ExceptionHandler {
case e: NotImplementedException =>
failWith(new IllegalRequestException(BadRequest, "Unhandled error, no Storage Providers Found"))
case e: IllegalArgumentException =>
failWith(new IllegalRequestException(BadRequest, e.getMessage))
}
val routes = monitor(logger, throwableMarshaller){
handleExceptions(exceptionHandler) {
neverCache {
DebuggingDirectives.logRequest("get-user") {
rootPath(Path(localUrl.getPath)) {
mappingRoute ~ setResourcesRoute ~ getResourcesRoute
}
}
}
}
}
}
class WebService(appConfig: AppConfig, qms: StorageService) extends HttpServiceActor {
implicit val executionContext = DiagnosticExecutionContext(actorRefFactory.dispatcher)
val healthService = new HealthCheckHttpService {
override implicit val actorRefFactory = WebService.this.actorRefFactory
override val basePath = Path./
}
val routes = new StorageServiceRoutes(appConfig, qms, actorRefFactory).routes
override def receive: Actor.Receive = runRoute(routes ~ healthService.routes)
}
object Boot extends App with Configuration with Loggers with StrictLogging {
val appConfig = AppConfig(config)
logger.info("Starting QuarterMaster StorageServer")
implicit val system = ActorSystem("storage-service", config)
implicit val executionContext = DiagnosticExecutionContext(system.dispatcher)
implicit val timeout = Timeout(appConfig.api.timeout)
val service = system.actorOf(Props(new WebService(appConfig, new StorageService(appConfig))))
val localUrl = appConfig.api.localUrl
HttpServer(Http.Bind(service, localUrl.getHost, port = localUrl.getPort))
}
| blinkboxbooks/storage-service | src/main/scala/com/blinkbox/books/storageservice/API.scala | Scala | mit | 4,027 |
/**
* Copyright (C) 2015 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.builder
import scala.scalajs.js.JSApp
// Scala.js starting point for Form Builder
object FormBuilderApp extends JSApp {
override def main(): Unit = {
println("Starting Form Builder...")
def initializeOnDomReady(): Unit = {
StaticUpload
}
$(initializeOnDomReady _)
}
} | ajw625/orbeon-forms | builder/src/builder/scala/org/orbeon/builder/FormBuilderApp.scala | Scala | lgpl-2.1 | 991 |
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair
import org.scalatest.funsuite.AnyFunSuite
import scodec.bits._
class UInt64Spec extends AnyFunSuite {
test("handle values from 0 to 2^64-1") {
val a = UInt64(hex"0xffffffffffffffff")
val b = UInt64(hex"0xfffffffffffffffe")
val c = UInt64(42)
val z = UInt64(0)
val l = UInt64(Long.MaxValue)
val l1 = UInt64(hex"8000000000000000") // Long.MaxValue + 1
assert(a > b)
assert(a.toBigInt > b.toBigInt)
assert(b < a)
assert(b.toBigInt < a.toBigInt)
assert(l.toBigInt < l1.toBigInt)
assert(z < a && z < b && z < c && z < l && c < l && l < l1 && l < b && l < a)
assert(a == a)
assert(a == UInt64.MaxValue)
assert(l.toByteVector == hex"7fffffffffffffff")
assert(l.toString == Long.MaxValue.toString)
assert(l.toBigInt == BigInt(Long.MaxValue))
assert(l1.toByteVector == hex"8000000000000000")
assert(l1.toString == "9223372036854775808")
assert(l1.toBigInt == BigInt("9223372036854775808"))
assert(a.toByteVector === hex"ffffffffffffffff")
assert(a.toString === "18446744073709551615") // 2^64 - 1
assert(a.toBigInt === BigInt("18446744073709551615"))
assert(b.toByteVector === hex"fffffffffffffffe")
assert(b.toString === "18446744073709551614")
assert(b.toBigInt === BigInt("18446744073709551614"))
assert(c.toByteVector === hex"00000000000002a")
assert(c.toString === "42")
assert(c.toBigInt === BigInt("42"))
assert(z.toByteVector === hex"000000000000000")
assert(z.toString === "0")
assert(z.toBigInt === BigInt("0"))
assert(UInt64(hex"ff").toByteVector == hex"0000000000000ff")
assert(UInt64(hex"800").toByteVector == hex"000000000000800")
}
test("use unsigned comparison when comparing millisatoshis to uint64") {
assert(UInt64(123) <= MilliSatoshi(123) && UInt64(123) >= MilliSatoshi(123))
assert(UInt64(123) < MilliSatoshi(1234))
assert(UInt64(1234) > MilliSatoshi(123))
assert(UInt64(hex"ffffffffffffffff") > MilliSatoshi(123))
assert(UInt64(hex"ffffffffffffffff") > MilliSatoshi(-123))
assert(UInt64(hex"7ffffffffffffffe") < MilliSatoshi(Long.MaxValue)) // 7ffffffffffffffe == Long.MaxValue - 1
assert(UInt64(hex"7fffffffffffffff") <= MilliSatoshi(Long.MaxValue) && UInt64(hex"7fffffffffffffff") >= MilliSatoshi(Long.MaxValue)) // 7fffffffffffffff == Long.MaxValue
assert(UInt64(hex"8000000000000000") > MilliSatoshi(Long.MaxValue)) // 8000000000000000 == Long.MaxValue + 1
assert(UInt64(1) > MilliSatoshi(-1))
assert(UInt64(0) > MilliSatoshi(Long.MinValue))
}
} | ACINQ/eclair | eclair-core/src/test/scala/fr/acinq/eclair/UInt64Spec.scala | Scala | apache-2.0 | 3,183 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.rest.kubernetes
import java.io.{File, FileInputStream, FileOutputStream, InputStreamReader}
import java.security.{KeyStore, PrivateKey}
import java.security.cert.Certificate
import java.util.UUID
import com.google.common.base.Charsets
import org.bouncycastle.asn1.pkcs.PrivateKeyInfo
import org.bouncycastle.cert.X509CertificateHolder
import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter
import org.bouncycastle.openssl.{PEMKeyPair, PEMParser}
import org.bouncycastle.openssl.jcajce.JcaPEMKeyConverter
import scala.collection.mutable
import org.apache.spark.SparkException
import org.apache.spark.util.Utils
private[spark] object PemsToKeyStoreConverter {
/**
* Loads the given key-cert pair into a temporary keystore file. Returns the File pointing
* to where the keyStore was written to disk.
*/
def convertPemsToTempKeyStoreFile(
keyPemFile: File,
certPemFile: File,
keyAlias: String,
keyStorePassword: String,
keyPassword: String,
keyStoreType: Option[String]): File = {
require(keyPemFile.isFile, s"Key PEM file provided at ${keyPemFile.getAbsolutePath}" +
" does not exist or is not a file.")
require(certPemFile.isFile, s"Cert PEM file provided at ${certPemFile.getAbsolutePath}" +
" does not exist or is not a file.")
val privateKey = parsePrivateKeyFromPemFile(keyPemFile)
val certificates = parseCertificatesFromPemFile(certPemFile)
val resolvedKeyStoreType = keyStoreType.getOrElse(KeyStore.getDefaultType)
val keyStore = KeyStore.getInstance(resolvedKeyStoreType)
keyStore.load(null, null)
keyStore.setKeyEntry(
keyAlias,
privateKey,
keyPassword.toCharArray,
certificates)
val keyStoreDir = Utils.createTempDir("temp-keystores")
val keyStoreFile = new File(keyStoreDir, s"keystore-${UUID.randomUUID()}.$resolvedKeyStoreType")
Utils.tryWithResource(new FileOutputStream(keyStoreFile)) { storeStream =>
keyStore.store(storeStream, keyStorePassword.toCharArray)
}
keyStoreFile
}
def convertCertPemToTrustStore(
certPemFile: File,
trustStoreType: Option[String]): KeyStore = {
require(certPemFile.isFile, s"Cert PEM file provided at ${certPemFile.getAbsolutePath}" +
" does not exist or is not a file.")
val trustStore = KeyStore.getInstance(trustStoreType.getOrElse(KeyStore.getDefaultType))
trustStore.load(null, null)
parseCertificatesFromPemFile(certPemFile).zipWithIndex.foreach { case (cert, index) =>
trustStore.setCertificateEntry(s"certificate-$index", cert)
}
trustStore
}
def convertCertPemToTempTrustStoreFile(
certPemFile: File,
trustStorePassword: String,
trustStoreType: Option[String]): File = {
val trustStore = convertCertPemToTrustStore(certPemFile, trustStoreType)
val tempTrustStoreDir = Utils.createTempDir(namePrefix = "temp-trustStore")
val tempTrustStoreFile = new File(tempTrustStoreDir,
s"trustStore.${trustStoreType.getOrElse(KeyStore.getDefaultType)}")
Utils.tryWithResource(new FileOutputStream(tempTrustStoreFile)) {
trustStore.store(_, trustStorePassword.toCharArray)
}
tempTrustStoreFile
}
private def withPemParsedFromFile[T](pemFile: File)(f: (PEMParser => T)): T = {
Utils.tryWithResource(new FileInputStream(pemFile)) { pemStream =>
Utils.tryWithResource(new InputStreamReader(pemStream, Charsets.UTF_8)) { pemReader =>
Utils.tryWithResource(new PEMParser(pemReader))(f)
}
}
}
private def parsePrivateKeyFromPemFile(keyPemFile: File): PrivateKey = {
withPemParsedFromFile(keyPemFile) { keyPemParser =>
val converter = new JcaPEMKeyConverter
keyPemParser.readObject() match {
case privateKey: PrivateKeyInfo =>
converter.getPrivateKey(privateKey)
case keyPair: PEMKeyPair =>
converter.getPrivateKey(keyPair.getPrivateKeyInfo)
case _ =>
throw new SparkException(s"Key file provided at ${keyPemFile.getAbsolutePath}" +
s" is not a key pair or private key PEM file.")
}
}
}
private def parseCertificatesFromPemFile(certPemFile: File): Array[Certificate] = {
withPemParsedFromFile(certPemFile) { certPemParser =>
val certificates = mutable.Buffer[Certificate]()
var pemObject = certPemParser.readObject()
while (pemObject != null) {
pemObject match {
case certificate: X509CertificateHolder =>
val converter = new JcaX509CertificateConverter
certificates += converter.getCertificate(certificate)
case _ =>
}
pemObject = certPemParser.readObject()
}
if (certificates.isEmpty) {
throw new SparkException(s"No certificates found in ${certPemFile.getAbsolutePath}")
}
certificates.toArray
}
}
}
| kimoonkim/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/rest/kubernetes/PemsToKeyStoreConverter.scala | Scala | apache-2.0 | 5,698 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package repositories.storage.old_dao.event
import models.storage.event.EventTypeRegistry.TopLevelEvents._
import models.storage.event.dto._
import models.storage.event.{ActorRole, EventType, ObjectRole, PlaceRole}
import no.uio.musit.models.{EventId, MuseumId, ObjectId, StorageNodeDatabaseId}
import no.uio.musit.test.MusitSpecWithAppPerSuite
import no.uio.musit.test.matchers.MusitResultValues
import org.scalatest.Inspectors._
import utils.testhelpers.{EventGenerators_Old, NodeGenerators}
/**
* Test specs for the EventDao.
*/
class EventDaoSpec
extends MusitSpecWithAppPerSuite
with EventGenerators_Old
with NodeGenerators
with MusitResultValues {
// This is mutable to allow keeping track of the last inserted eventId.
private var latestEventId: EventId = _
"The EventDao" when {
"processing controls with sub-controls and observations" should {
"succeed when inserting a Control" in {
val mid = MuseumId(2)
val ctrl = createControl(defaultBuilding.id)
latestEventId = addControl(mid, ctrl).futureValue
latestEventId mustBe an[EventId]
latestEventId mustBe EventId(1L)
}
"return the Control associated with the provided Id" in {
val mid = MuseumId(2)
val ctrl = createControl(defaultBuilding.id)
val ctrlId = addControl(mid, ctrl).futureValue
val res = eventDao.getEvent(mid, latestEventId).futureValue
res.successValue.value match {
case base: BaseEventDto =>
val c = DtoConverters.CtrlConverters.controlFromDto(base)
c.eventType mustBe EventType.fromEventTypeId(ControlEventType.id)
c.registeredBy mustBe Some(defaultActorId)
c.registeredDate must not be None
c.temperature mustBe ctrl.temperature
c.alcohol mustBe ctrl.alcohol
c.cleaning mustBe ctrl.cleaning
c.pest mustBe ctrl.pest
c.relativeHumidity mustBe ctrl.relativeHumidity
c.mold mustBe ctrl.mold
c.gas mustBe ctrl.gas
c.hypoxicAir mustBe ctrl.hypoxicAir
c.lightingCondition mustBe ctrl.lightingCondition
case _ =>
fail("Expected dto to be of type BaseEventDto")
}
}
}
"processing observations" should {
"succeed when inserting an observation" in {
val mid = MuseumId(2)
val obs = createObservation(defaultBuilding.id)
val eventId = addObservation(mid, obs).futureValue
latestEventId = eventId
eventId mustBe an[EventId]
}
"return the Observation associated with the provided Id" in {
val mid = MuseumId(2)
val obs = createObservation(defaultBuilding.id)
val obsId = addObservation(mid, obs).futureValue
val res = eventDao.getEvent(mid, latestEventId).futureValue
res.successValue.value match {
case base: BaseEventDto =>
val o = DtoConverters.ObsConverters.observationFromDto(base)
o.eventType mustBe EventType.fromEventTypeId(ObservationEventType.id)
o.registeredBy mustBe Some(defaultActorId)
o.registeredDate must not be None
o.alcohol mustBe obs.alcohol
o.cleaning mustBe obs.cleaning
o.gas mustBe obs.gas
o.hypoxicAir mustBe obs.hypoxicAir
o.lightingCondition mustBe obs.lightingCondition
o.mold mustBe obs.mold
o.pest mustBe obs.pest
o.relativeHumidity mustBe o.relativeHumidity
o.temperature mustBe obs.temperature
case _ =>
fail("Expected dto to be of type BaseEventDto")
}
}
}
"processing environment requirements" should {
val envReq = createEnvRequirement(defaultBuilding.id)
"succeed when inserting an Environment Requirement" in {
val mid = MuseumId(2)
val erDto = DtoConverters.EnvReqConverters.envReqToDto(envReq)
val eventId = eventDao.insertEvent(mid, erDto).futureValue
latestEventId = eventId
eventId mustBe an[EventId]
}
"return the Environment Requirement event with the provided ID" in {
val mid = MuseumId(2)
val res = eventDao.getEvent(mid, latestEventId).futureValue
res.successValue.value match {
case ext: ExtendedDto =>
val er = DtoConverters.EnvReqConverters.envReqFromDto(ext)
er.eventType mustBe envReq.eventType
er.note mustBe envReq.note
er.registeredBy mustBe Some(defaultActorId)
er.registeredDate must not be None
er.light mustBe envReq.light
er.temperature mustBe envReq.temperature
er.hypoxicAir mustBe envReq.hypoxicAir
er.airHumidity mustBe envReq.airHumidity
er.cleaning mustBe envReq.cleaning
case _ =>
fail("Expected dto to be of type ExtendedDto")
}
}
}
"processing Move events" should {
"succeed when moving an object" in {
val mid = MuseumId(2)
val moveObj = createMoveObject(
from = Some(StorageNodeDatabaseId(1)),
to = StorageNodeDatabaseId(2)
)
val dto = DtoConverters.MoveConverters.moveObjectToDto(moveObj)
val eventId = eventDao.insertEvent(mid, dto).futureValue
latestEventId = eventId
eventId mustBe an[EventId]
}
"return the move object event" in {
val mid = MuseumId(2)
val res = eventDao.getEvent(mid, latestEventId, recursive = false).futureValue
val theDto = res.successValue.value
theDto mustBe a[BaseEventDto]
val br = theDto.asInstanceOf[BaseEventDto]
val baseRoleActor = EventRoleActor.toActorRole(br.relatedActors.head)
val baseRolePlace = EventRolePlace.toPlaceRole(br.relatedPlaces.head)
val baseRoleObj = EventRoleObject.toObjectRole(br.relatedObjects.head)
br.eventTypeId mustBe MoveObjectType.id
baseRoleActor mustBe ActorRole(1, defaultActorId)
baseRoleObj mustBe ObjectRole(1, ObjectId(1))
baseRolePlace mustBe PlaceRole(1, StorageNodeDatabaseId(2))
br.valueLong mustBe Some(1L)
}
"succeed when moving a storage node" in {
val mid = MuseumId(2)
val moveNode = createMoveNode(
from = Some(StorageNodeDatabaseId(1)),
to = StorageNodeDatabaseId(2)
)
val dto = DtoConverters.MoveConverters.moveNodeToDto(moveNode)
val eventId = eventDao.insertEvent(mid, dto).futureValue
latestEventId = eventId
eventId mustBe a[EventId]
}
"return the move node event" in {
val mid = MuseumId(2)
val res = eventDao.getEvent(mid, latestEventId, recursive = false).futureValue
val theDto = res.successValue.value
theDto mustBe a[BaseEventDto]
val br = theDto.asInstanceOf[BaseEventDto]
val baseRoleActor = EventRoleActor.toActorRole(br.relatedActors.head)
val baseRolePlace = EventRolePlace.toPlaceRole(br.relatedPlaces.head)
val baseRoleObj = EventRoleObject.toObjectRole(br.relatedObjects.head)
br.eventTypeId mustBe MoveNodeType.id
baseRoleActor mustBe ActorRole(1, defaultActorId)
baseRolePlace mustBe PlaceRole(1, StorageNodeDatabaseId(2))
baseRoleObj mustBe ObjectRole(1, ObjectId(1))
br.valueLong mustBe Some(1L)
}
}
"fetching events for a node" should {
"return all control events" in {
val ctrl1 = createControl(defaultBuilding.id)
val ctrl2 = createControl(defaultBuilding.id)
val ctrl3 = createControl(defaultBuilding.id)
val ctrlId1 = addControl(defaultMuseumId, ctrl1).futureValue
val ctrlId2 = addControl(defaultMuseumId, ctrl2).futureValue
val ctrlId3 = addControl(defaultMuseumId, ctrl3).futureValue
val controls = eventDao
.getEventsForNode(
mid = defaultMuseumId,
id = defaultBuilding.id.get,
eventType = ControlEventType
)
.futureValue
controls must not be empty
controls.size mustBe 5
forAll(controls) { c =>
c.eventTypeId mustBe ControlEventType.id
c.relatedObjects.head.objectId.underlying mustBe defaultBuilding.id.value.underlying
}
}
"return all observation events" in {
val mid = MuseumId(2)
val obs1 = createObservation(defaultRoom.id)
val obs2 = createObservation(defaultRoom.id)
val obsId1 = addObservation(mid, obs1).futureValue
val obsId2 = addObservation(mid, obs2).futureValue
val observations = eventDao
.getEventsForNode(
mid,
defaultRoom.id.get,
ObservationEventType
)
.futureValue
observations must not be empty
observations.size mustBe 2
forAll(observations) { o =>
o.eventTypeId mustBe ObservationEventType.id
o.relatedObjects.head.objectId.underlying mustBe defaultRoom.id.value.underlying
}
}
}
}
}
| kpmeen/musit | service_storagefacility/test/repositories/storage/old_dao/event/EventDaoSpec.scala | Scala | gpl-2.0 | 10,112 |
package org.template.recommendation
import org.apache.predictionio.controller.IEngineFactory
import org.apache.predictionio.controller.Engine
case class Query(user: String, num: Int, creationYear: Option[Int] = None)
case class PredictedResult(itemScores: Array[ItemScore])
// HOWTO: added movie creation year to predicted result.
case class ItemScore(item: String, score: Double, creationYear: Option[Int])
object RecommendationEngine extends IEngineFactory {
def apply() =
new Engine(classOf[DataSource],
classOf[Preparator],
Map("als" → classOf[ALSAlgorithm]),
classOf[Serving])
}
| alex9311/PredictionIO | examples/scala-parallel-recommendation/custom-query/src/main/scala/Engine.scala | Scala | apache-2.0 | 615 |
package org.apress.prospark
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{ Milliseconds, Seconds, StreamingContext }
import org.apache.hadoop.io.{ Text, LongWritable, IntWritable }
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark.streaming.dstream.DStream
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.hadoop.mapreduce.lib.output.{ TextOutputFormat => NewTextOutputFormat }
import org.apache.spark.streaming.dstream.PairDStreamFunctions
import org.apache.log4j.LogManager
import org.json4s._
import org.json4s.native.JsonMethods._
import java.text.SimpleDateFormat
import java.util.Date
object RedditMappingApp {
def main(args: Array[String]) {
if (args.length != 2) {
System.err.println(
"Usage: RedditMappingApp <appname> <input_path>")
System.exit(1)
}
val Seq(appName, inputPath) = args.toSeq
val LOG = LogManager.getLogger(this.getClass)
val conf = new SparkConf()
.setAppName(appName)
.setJars(SparkContext.jarOfClass(this.getClass).toSeq)
val ssc = new StreamingContext(conf, Seconds(1))
LOG.info("Started at %d".format(ssc.sparkContext.startTime))
val comments = ssc.fileStream[LongWritable, Text, TextInputFormat](inputPath, (f: Path) => true, newFilesOnly = false).map(pair => pair._2.toString)
val sdf = new SimpleDateFormat("yyyy-MM-dd")
val tsKey = "created_utc"
val secs = 1000L
val keyedByDay = comments.map(rec => {
val ts = (parse(rec) \\ tsKey).values
(sdf.format(new Date(ts.toString.toLong * secs)), rec)
})
val keyedByDayPart = comments.mapPartitions(iter => {
var ret = List[(String, String)]()
while (iter.hasNext) {
val rec = iter.next
val ts = (parse(rec) \\ tsKey).values
ret.::=(sdf.format(new Date(ts.toString.toLong * secs)), rec)
}
ret.iterator
})
val wordTokens = comments.map(rec => {
((parse(rec) \\ "body")).values.toString.split(" ")
})
val wordTokensFlat = comments.flatMap(rec => {
((parse(rec) \\ "body")).values.toString.split(" ")
})
val filterSubreddit = comments.filter(rec =>
(parse(rec) \\ "subreddit").values.toString.equals("AskReddit"))
val sortedByAuthor = comments.transform(rdd =>
(rdd.sortBy(rec => (parse(rec) \\ "author").values.toString)))
ssc.start()
ssc.awaitTermination()
}
} | ZubairNabi/prosparkstreaming | Chap3/src/main/scala/org/apress/prospark/L3-DStreamMapping.scala | Scala | apache-2.0 | 2,501 |
package pureconfig.module.catseffect2
import scala.language.higherKinds
import scala.reflect.ClassTag
import cats.effect.{Blocker, ContextShift, Sync}
import pureconfig.module.catseffect2
import pureconfig.{ConfigReader, ConfigSource}
package object syntax {
implicit class CatsEffectConfigSource(private val cs: ConfigSource) extends AnyVal {
@inline
final def loadF[F[_]: Sync: ContextShift, A: ConfigReader](blocker: Blocker)(implicit ct: ClassTag[A]): F[A] =
catseffect2.loadF(cs, blocker)
}
}
| pureconfig/pureconfig | modules/cats-effect2/src/main/scala/pureconfig/module/catseffect2/syntax/package.scala | Scala | mpl-2.0 | 522 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.sources
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.io.CsvInputFormat
import org.apache.flink.api.java.{DataSet, ExecutionEnvironment}
import org.apache.flink.types.Row
import org.apache.flink.api.java.io.RowCsvInputFormat
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.core.fs.Path
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.table.api.TableException
import scala.collection.mutable
/**
* A [[BatchTableSource]] and [[StreamTableSource]] for simple CSV files with a
* (logically) unlimited number of fields.
*
* @param path The path to the CSV file.
* @param fieldNames The names of the table fields.
* @param fieldTypes The types of the table fields.
* @param fieldDelim The field delimiter, "," by default.
* @param rowDelim The row delimiter, "\\n" by default.
* @param quoteCharacter An optional quote character for String values, null by default.
* @param ignoreFirstLine Flag to ignore the first line, false by default.
* @param ignoreComments An optional prefix to indicate comments, null by default.
* @param lenient Flag to skip records with parse error instead to fail, false by default.
*/
class CsvTableSource(
private val path: String,
private val fieldNames: Array[String],
private val fieldTypes: Array[TypeInformation[_]],
private val fieldDelim: String = CsvInputFormat.DEFAULT_FIELD_DELIMITER,
private val rowDelim: String = CsvInputFormat.DEFAULT_LINE_DELIMITER,
private val quoteCharacter: Character = null,
private val ignoreFirstLine: Boolean = false,
private val ignoreComments: String = null,
private val lenient: Boolean = false)
extends BatchTableSource[Row]
with StreamTableSource[Row]
with ProjectableTableSource[Row] {
/**
* A [[BatchTableSource]] and [[StreamTableSource]] for simple CSV files with a
* (logically) unlimited number of fields.
*
* @param path The path to the CSV file.
* @param fieldNames The names of the table fields.
* @param fieldTypes The types of the table fields.
*/
def this(path: String, fieldNames: Array[String], fieldTypes: Array[TypeInformation[_]]) =
this(path, fieldNames, fieldTypes, CsvInputFormat.DEFAULT_FIELD_DELIMITER,
CsvInputFormat.DEFAULT_LINE_DELIMITER, null, false, null, false)
if (fieldNames.length != fieldTypes.length) {
throw TableException("Number of field names and field types must be equal.")
}
private val returnType = new RowTypeInfo(fieldTypes, fieldNames)
private var selectedFields: Array[Int] = fieldTypes.indices.toArray
/**
* Returns the data of the table as a [[DataSet]] of [[Row]].
*
* NOTE: This method is for internal use only for defining a [[TableSource]].
* Do not use it in Table API programs.
*/
override def getDataSet(execEnv: ExecutionEnvironment): DataSet[Row] = {
execEnv.createInput(createCsvInput(), returnType)
}
/** Returns the [[RowTypeInfo]] for the return type of the [[CsvTableSource]]. */
override def getReturnType: RowTypeInfo = returnType
/**
* Returns the data of the table as a [[DataStream]] of [[Row]].
*
* NOTE: This method is for internal use only for defining a [[TableSource]].
* Do not use it in Table API programs.
*/
override def getDataStream(streamExecEnv: StreamExecutionEnvironment): DataStream[Row] = {
streamExecEnv.createInput(createCsvInput(), returnType)
}
/** Returns a copy of [[TableSource]] with ability to project fields */
override def projectFields(fields: Array[Int]): CsvTableSource = {
val (newFields, newFieldNames, newFieldTypes) = if (fields.nonEmpty) {
(fields, fields.map(fieldNames(_)), fields.map(fieldTypes(_)))
} else {
// reporting number of records only, we must read some columns to get row count.
// (e.g. SQL: select count(1) from csv_table)
// We choose the first column here.
(Array(0), Array(fieldNames.head), Array[TypeInformation[_]](fieldTypes.head))
}
val source = new CsvTableSource(path,
newFieldNames,
newFieldTypes,
fieldDelim,
rowDelim,
quoteCharacter,
ignoreFirstLine,
ignoreComments,
lenient)
source.selectedFields = newFields
source
}
private def createCsvInput(): RowCsvInputFormat = {
val inputFormat = new RowCsvInputFormat(
new Path(path),
fieldTypes,
rowDelim,
fieldDelim,
selectedFields)
inputFormat.setSkipFirstLineAsHeader(ignoreFirstLine)
inputFormat.setLenient(lenient)
if (quoteCharacter != null) {
inputFormat.enableQuotedStringParsing(quoteCharacter)
}
if (ignoreComments != null) {
inputFormat.setCommentPrefix(ignoreComments)
}
inputFormat
}
override def equals(other: Any): Boolean = other match {
case that: CsvTableSource => returnType == that.returnType &&
path == that.path &&
fieldDelim == that.fieldDelim &&
rowDelim == that.rowDelim &&
quoteCharacter == that.quoteCharacter &&
ignoreFirstLine == that.ignoreFirstLine &&
ignoreComments == that.ignoreComments &&
lenient == that.lenient
case _ => false
}
override def hashCode(): Int = {
returnType.hashCode()
}
}
object CsvTableSource {
/**
* A builder for creating [[CsvTableSource]] instances.
*
* For example:
*
* {{{
* val source: CsvTableSource = new CsvTableSource.builder()
* .path("/path/to/your/file.csv")
* .field("myfield", Types.STRING)
* .field("myfield2", Types.INT)
* .build()
* }}}
*
*/
class Builder {
private val schema: mutable.LinkedHashMap[String, TypeInformation[_]] =
mutable.LinkedHashMap[String, TypeInformation[_]]()
private var quoteCharacter: Character = _
private var path: String = _
private var fieldDelim: String = CsvInputFormat.DEFAULT_FIELD_DELIMITER
private var lineDelim: String = CsvInputFormat.DEFAULT_LINE_DELIMITER
private var isIgnoreFirstLine: Boolean = false
private var commentPrefix: String = _
private var lenient: Boolean = false
/**
* Sets the path to the CSV file. Required.
*
* @param path the path to the CSV file
*/
def path(path: String): Builder = {
this.path = path
this
}
/**
* Sets the field delimiter, "," by default.
*
* @param delim the field delimiter
*/
def fieldDelimiter(delim: String): Builder = {
this.fieldDelim = delim
this
}
/**
* Sets the line delimiter, "\\n" by default.
*
* @param delim the line delimiter
*/
def lineDelimiter(delim: String): Builder = {
this.lineDelim = delim
this
}
/**
* Adds a field with the field name and the type information. Required.
* This method can be called multiple times. The call order of this method defines
* also the order of thee fields in a row.
*
* @param fieldName the field name
* @param fieldType the type information of the field
*/
def field(fieldName: String, fieldType: TypeInformation[_]): Builder = {
if (schema.contains(fieldName)) {
throw new IllegalArgumentException(s"Duplicate field name $fieldName.")
}
schema += (fieldName -> fieldType)
this
}
/**
* Sets a quote character for String values, null by default.
*
* @param quote the quote character
*/
def quoteCharacter(quote: Character): Builder = {
this.quoteCharacter = quote
this
}
/**
* Sets a prefix to indicate comments, null by default.
*
* @param prefix the prefix to indicate comments
*/
def commentPrefix(prefix: String): Builder = {
this.commentPrefix = prefix
this
}
/**
* Ignore the first line. Not skip the first line by default.
*/
def ignoreFirstLine(): Builder = {
this.isIgnoreFirstLine = true
this
}
/**
* Skip records with parse error instead to fail. Throw an exception by default.
*/
def ignoreParseErrors(): Builder = {
this.lenient = true
this
}
/**
* Apply the current values and constructs a newly-created [[CsvTableSource]].
*
* @return a newly-created [[CsvTableSource]].
*/
def build(): CsvTableSource = {
if (path == null) {
throw new IllegalArgumentException("Path must be defined.")
}
if (schema.isEmpty) {
throw new IllegalArgumentException("Fields can not be empty.")
}
new CsvTableSource(
path,
schema.keys.toArray,
schema.values.toArray,
fieldDelim,
lineDelim,
quoteCharacter,
isIgnoreFirstLine,
commentPrefix,
lenient)
}
}
/**
* Return a new builder that builds a [[CsvTableSource]].
*
* For example:
*
* {{{
* val source: CsvTableSource = CsvTableSource
* .builder()
* .path("/path/to/your/file.csv")
* .field("myfield", Types.STRING)
* .field("myfield2", Types.INT)
* .build()
* }}}
* @return a new builder to build a [[CsvTableSource]]
*/
def builder(): Builder = new Builder
}
| haohui/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/sources/CsvTableSource.scala | Scala | apache-2.0 | 10,318 |
package mot.dump
import mot.protocol.Frame
import java.net.ServerSocket
import java.net.InetSocketAddress
import mot.util.Util.FunctionToRunnable
import java.net.Socket
import java.net.SocketException
import com.typesafe.scalalogging.slf4j.StrictLogging
import scala.util.control.NonFatal
import java.io.PrintStream
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.LinkedBlockingQueue
import scala.collection.JavaConversions._
import java.text.SimpleDateFormat
import java.util.TimeZone
import java.util.concurrent.atomic.AtomicLong
import scala.io.Source
import java.io.OutputStream
import java.io.IOException
import java.nio.charset.StandardCharsets.UTF_8
import java.util.concurrent.TimeUnit
import mot.impl.Connection
import mot.util.Util
import java.net.InetAddress
case class Listener(bufferSize: Int) {
val queue = new LinkedBlockingQueue[Event](bufferSize)
val overflows = new AtomicLong
}
final class Dumper(dumperPort: Int) extends StrictLogging {
def dump(event: Event): Unit = {
// Optimize the very common case of no listeners
if (currentListeners.isEmpty)
return
// Avoid wrapping in Scala iterators, which add overhead
val it = currentListeners.keys
while (it.hasMoreElements) {
val listener = it.nextElement()
val success = listener.queue.offer(event)
if (!success)
listener.overflows.incrementAndGet()
}
}
val currentListeners = new ConcurrentHashMap[Listener, Boolean]
val serverSocket = new ServerSocket
val acceptorThread = new Thread(acceptLoop _, "mot-dump-acceptor")
@volatile var closed = false
def start() = {
serverSocket.bind(new InetSocketAddress(InetAddress.getByName(null) /* loopback interface */, dumperPort))
acceptorThread.start()
}
def stop() {
closed = true
Util.closeSocket(serverSocket)
acceptorThread.join()
}
def listen(bufferSize: Int) = {
val listener = new Listener(bufferSize)
currentListeners.put(listener, true)
listener
}
def unlisten(listener: Listener): Unit = {
currentListeners.remove(listener)
}
def acceptLoop() {
try {
while (true) {
val socket = serverSocket.accept()
new Thread(() => processClient(socket), "mot-dump-handler-for-" + socket.getRemoteSocketAddress).start()
}
} catch {
case e: IOException if closed => // closing, exception expected
}
}
val parser = new DumpFilterParser
class ArgumentError(msg: String) extends Exception(msg)
def processClient(socket: Socket) = {
socket.setSoTimeout(100) // localhost should be fast
val is = socket.getInputStream
val os = socket.getOutputStream
try {
// read lines until empty one
val lines = Source.fromInputStream(is).getLines.takeWhile(!_.isEmpty).toSeq
val params = parseParameters(lines)
val showBody = params.get("body").map(_.toBoolean).getOrElse(false)
val showAttributes = params.get("attributes").map(_.toBoolean).getOrElse(false)
val showBodyLength = params.get("length").map(_.toInt).getOrElse(1024)
val bufferSize = params.get("buffer-size").map(_.toInt).getOrElse(10000)
val filterOpt = params.get("filter").map { str =>
parser.parseAll(str) match {
case parser.Success(result, next) => result
case parser.NoSuccess((msg, next)) => throw new ArgumentError(s"Error parsing expression: $msg")
}
}
val filter = filterOpt.getOrElse(Filters.All)
val listener = listen(bufferSize)
try {
@volatile var finished = false
def eofReader() = try {
// closing input stream is used as a signal to tell the server to stop sending the dump,
// this way the server has the opportunity to send a summary at the end.
val c = is.read()
if (c == -1)
finished = true
else
logger.error("Unexpected byte in input stream: " + c)
} catch {
case e: IOException => logger.error("Unexpected error reading input stream: " + e.getMessage)
}
socket.setSoTimeout(0) // everything read, now wait forever for EOF
new Thread(eofReader _, "mot-dump-eof-reader-for-" + socket.getRemoteSocketAddress).start()
val sdf = new SimpleDateFormat("HH:mm:ss.SSS'Z'")
sdf.setTimeZone(TimeZone.getTimeZone("UTC"))
var dumped = 0L
var processed = 0L
while (!finished && !closed) {
val event = listener.queue.poll(200, TimeUnit.MILLISECONDS)
if (event != null) {
if (filter.filter(event)) {
event.print(os, sdf, showBody, showBodyLength, showAttributes)
dumped += 1
}
processed += 1
}
}
// EOF received, print summary
val summary =
s"$processed events occured and processed during capture (regardless of current filter)\n" +
s"${listener.overflows} dropped because the buffer was too small\n" +
s"$dumped events passed the filter and were dumped\n"
os.write(summary.getBytes(UTF_8))
} finally {
unlisten(listener)
}
} catch {
case e: ArgumentError =>
logger.info(s"Invalid dump filter: " + e.getMessage)
os.write(e.getMessage.getBytes(UTF_8))
os.write('\n')
case e: SocketException =>
logger.info(s"Client ${socket.getRemoteSocketAddress} gone (${e.getMessage})")
case NonFatal(e) =>
logger.error("Error dumping messages", e)
try {
val ps = new PrintStream(os)
e.printStackTrace(ps)
ps.flush()
} catch {
case NonFatal(e) => logger.error("Could not send message in catch block", e)
}
} finally {
Util.closeSocket(socket)
}
}
def parseParameters(lines: Seq[String]) = {
val pairs = for (line <- lines) yield {
val parts = line.split("=", 2).toSeq
if (parts.size < 2)
throw new Exception("Invalid line: " + line)
val Seq(key, value) = parts
(key, value)
}
pairs.toMap
}
} | marianobarrios/mot | src/main/scala/mot/dump/Dumper.scala | Scala | bsd-2-clause | 6,125 |
package tests.rescala.fullmv.mirrors
import java.util.concurrent.atomic.AtomicReference
import org.scalatest.funsuite.AnyFunSuite
import rescala.fullmv._
import DistributedFullMVApi.{FullMVEngine, FullMVTurnLocalClone, SerializationGraphTracking, FullMVTurnImpl}
import rescala.parrp.Backoff
import tests.rescala.testtools.Spawn
import scala.annotation.tailrec
import scala.concurrent.TimeoutException
import scala.concurrent.duration.Duration
import scala.util.{Failure, Random, Success}
class LockMirrorStressTest extends AnyFunSuite {
test("stress") {
val numWorkers = 4
val hosts = Array.tabulate(numWorkers)(i => new FullMVEngine(Duration.Zero, "stress-" + i))
val turns = Array.tabulate(numWorkers) { i =>
val turn = hosts(i).newTurn()
turn.beginExecuting()
new AtomicReference(turn)
}
val duration = 10000
println(s"starting lock stress test " + (if (duration == 0) "until key press"
else s"for ${duration / 1000} seconds..."))
var running: Boolean = true
val threads = Array.tabulate(numWorkers)(i =>
Spawn {
try {
val random = new Random()
var ownTurn = turns(i).get
var iterations = 0L
while (running) {
if (random.nextInt(5) == 0) {
ownTurn.completeExecuting()
ownTurn = hosts(i).newTurn()
ownTurn.beginExecuting()
turns(i).set(ownTurn)
} else {
val pick = random.nextInt(numWorkers)
val backoff = new Backoff(maxBackoff = 100L * 1000L * 1000L)
@tailrec def reTryLock(): Unit = {
if (running) {
val turnOnLocalHost = FullMVTurnLocalClone(turns(pick).get, hosts(i))
if (turnOnLocalHost.phase < TurnPhase.Completed) {
SerializationGraphTracking.tryLock(turnOnLocalHost, ownTurn, UnlockedUnknown) match {
case LockedSameSCC(lock) => lock.asyncUnlock()
case _ => backoff.backoff(); reTryLock()
}
} else reTryLock()
}
}
reTryLock()
}
iterations += 1
}
iterations
} catch {
case t: Throwable =>
running = false
throw t
}
}
)
val timeout = System.currentTimeMillis() + duration
while (running && (if (duration == 0) System.in.available() == 0 else System.currentTimeMillis() < timeout)) {
Thread.sleep(50)
}
if (!running)
println(s"Premature termination after ${(duration - (timeout - System.currentTimeMillis())) / 1000} seconds")
running = false
val finalTimeout = System.currentTimeMillis() + 500
val scores = threads.map(_.awaitTry(math.max(0, finalTimeout - System.currentTimeMillis())))
println("lock stress test thread results:")
println("\t" + scores.zipWithIndex.map { case (count, idx) => idx + ": " + count }.mkString("\n\t"))
scores.find {
case Failure(ex: TimeoutException) => false
case Failure(_) => true
case Success(_) => false
}.asInstanceOf[Option[Failure[_]]].foreach {
case Failure(ex) =>
ex.printStackTrace()
}
scores.foldLeft(Option(0L)) {
case (None, _) => None
case (Some(score), Failure(_)) => None
case (Some(score), Success(moreScore)) => Some(score + moreScore)
} match {
case None =>
println("no total and stats due to failures. state snapshot:")
println(turns.zipWithIndex.map {
case (t, idx) =>
val turn = t.get
s"$idx: $turn with ${turn.asInstanceOf[FullMVTurnImpl].subsumableLock.get}"
}.mkString("\n"))
fail("there were errors")
case Some(sum) =>
println(s"lock stress test totaled $sum iterations (individual scores: ${scores.mkString(", ")}")
turns.foreach(_.get.completeExecuting())
hosts.foreach(host => println(s"$host orphan stats: ${host.cacheStatus}"))
println(" == Orphan listing == ")
hosts.foreach { host =>
if (!host.instances.isEmpty || !host.lockHost.instances.isEmpty) {
println(s"orphans on $host:")
val it1 = host.instances.values().iterator()
while (it1.hasNext) println("\t" + it1.next())
val it2 = host.lockHost.instances.values().iterator()
while (it2.hasNext) println("\t" + it2.next())
}
}
assert(hosts.map(host => host.instances.size() + host.lockHost.instances.size()).sum === 0)
}
}
}
| guidosalva/REScala | Code/Extensions/MultiversionDistributed/multiversion/src/test/scala/tests/rescala/fullmv/mirrors/LockMirrorStressTest.scala | Scala | apache-2.0 | 4,777 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.containerpool.test
import java.time.Instant
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfterAll
import org.scalatest.FlatSpecLike
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.FSM
import akka.actor.FSM.CurrentState
import akka.actor.FSM.SubscribeTransitionCallBack
import akka.actor.FSM.Transition
import akka.stream.scaladsl.Source
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import akka.util.ByteString
import common.LoggedFunction
import common.StreamLogging
import scala.concurrent.ExecutionContext
import spray.json._
import spray.json.DefaultJsonProtocol._
import whisk.common.Logging
import whisk.common.TransactionId
import whisk.core.connector.ActivationMessage
import whisk.core.containerpool._
import whisk.core.containerpool.logging.LogCollectingException
import whisk.core.entity._
import whisk.core.entity.ExecManifest.RuntimeManifest
import whisk.core.entity.ExecManifest.ImageName
import whisk.core.entity.size._
import whisk.http.Messages
@RunWith(classOf[JUnitRunner])
class ContainerProxyTests
extends TestKit(ActorSystem("ContainerProxys"))
with ImplicitSender
with FlatSpecLike
with Matchers
with BeforeAndAfterAll
with StreamLogging {
override def afterAll = TestKit.shutdownActorSystem(system)
val timeout = 5.seconds
val log = logging
// Common entities to pass to the tests. We don't really care what's inside
// those for the behavior testing here, as none of the contents will really
// reach a container anyway. We merely assert that passing and extraction of
// the values is done properly.
val exec = CodeExecAsString(RuntimeManifest("actionKind", ImageName("testImage")), "testCode", None)
val memoryLimit = 256.MB
val invocationNamespace = EntityName("invocationSpace")
val action = ExecutableWhiskAction(EntityPath("actionSpace"), EntityName("actionName"), exec)
val message = ActivationMessage(
TransactionId.testing,
action.fullyQualifiedName(true),
action.rev,
Identity(Subject(), invocationNamespace, AuthKey(), Set()),
ActivationId(),
invocationNamespace.toPath,
InstanceId(0),
blocking = false,
content = None)
/*
* Helpers for assertions and actor lifecycles
*/
/** Imitates a StateTimeout in the FSM */
def timeout(actor: ActorRef) = actor ! FSM.StateTimeout
/** Registers the transition callback and expects the first message */
def registerCallback(c: ActorRef) = {
c ! SubscribeTransitionCallBack(testActor)
expectMsg(CurrentState(c, Uninitialized))
}
/** Pre-warms the given state-machine, assumes good cases */
def preWarm(machine: ActorRef) = {
machine ! Start(exec, memoryLimit)
expectMsg(Transition(machine, Uninitialized, Starting))
expectPreWarmed(exec.kind)
expectMsg(Transition(machine, Starting, Started))
}
/** Run the common action on the state-machine, assumes good cases */
def run(machine: ActorRef, currentState: ContainerState) = {
machine ! Run(action, message)
expectMsg(Transition(machine, currentState, Running))
expectWarmed(invocationNamespace.name, action)
expectMsg(Transition(machine, Running, Ready))
}
/** Expect a NeedWork message with prewarmed data */
def expectPreWarmed(kind: String) = expectMsgPF() {
case NeedWork(PreWarmedData(_, kind, memoryLimit)) => true
}
/** Expect a NeedWork message with warmed data */
def expectWarmed(namespace: String, action: ExecutableWhiskAction) = {
val test = EntityName(namespace)
expectMsgPF() {
case NeedWork(WarmedData(_, `test`, `action`, _)) => true
}
}
/** Expect the container to pause successfully */
def expectPause(machine: ActorRef) = {
expectMsg(Transition(machine, Ready, Pausing))
expectMsg(Transition(machine, Pausing, Paused))
}
/** Creates an inspectable version of the ack method, which records all calls in a buffer */
def createAcker = LoggedFunction { (_: TransactionId, activation: WhiskActivation, _: Boolean, _: InstanceId) =>
activation.annotations.get("limits") shouldBe Some(action.limits.toJson)
activation.annotations.get("path") shouldBe Some(action.fullyQualifiedName(false).toString.toJson)
activation.annotations.get("kind") shouldBe Some(action.exec.kind.toJson)
Future.successful(())
}
/** Creates an inspectable factory */
def createFactory(response: Future[Container]) = LoggedFunction {
(_: TransactionId, _: String, _: ImageName, _: Boolean, _: ByteSize) =>
response
}
def createCollector(response: Future[ActivationLogs] = Future.successful(ActivationLogs(Vector.empty))) =
LoggedFunction {
(transid: TransactionId,
user: Identity,
activation: WhiskActivation,
container: Container,
action: ExecutableWhiskAction) =>
response
}
def createStore = LoggedFunction { (transid: TransactionId, activation: WhiskActivation) =>
Future.successful(())
}
behavior of "ContainerProxy"
/*
* SUCCESSFUL CASES
*/
it should "create a container given a Start message" in within(timeout) {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val store = createStore
val machine =
childActorOf(
ContainerProxy.props(factory, createAcker, store, createCollector(), InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
preWarm(machine)
factory.calls should have size 1
val (tid, name, _, _, memory) = factory.calls(0)
tid shouldBe TransactionId.invokerWarmup
name should fullyMatch regex """wsk\d+_\d+_prewarm_actionKind"""
memory shouldBe memoryLimit
}
it should "run a container which has been started before, write an active ack, write to the store, pause and remove the container" in within(
timeout) {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
preWarm(machine)
run(machine, Started)
// Timeout causes the container to pause
timeout(machine)
expectPause(machine)
// Another pause causes the container to be removed
timeout(machine)
expectMsg(ContainerRemoved)
expectMsg(Transition(machine, Paused, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.suspendCount shouldBe 1
container.destroyCount shouldBe 1
acker.calls should have size 1
store.calls should have size 1
}
}
it should "run an action and continue with a next run without pausing the container" in within(timeout) {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
preWarm(machine)
run(machine, Started)
// Note that there are no intermediate state changes
run(machine, Ready)
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 2
collector.calls should have size 2
container.suspendCount shouldBe 0
acker.calls should have size 2
store.calls should have size 2
}
}
it should "run an action after pausing the container" in within(timeout) {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
preWarm(machine)
run(machine, Started)
timeout(machine)
expectPause(machine)
run(machine, Paused)
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 2
collector.calls should have size 2
container.suspendCount shouldBe 1
container.resumeCount shouldBe 1
acker.calls should have size 2
store.calls should have size 2
}
}
it should "successfully run on an uninitialized container" in within(timeout) {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
run(machine, Uninitialized)
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
acker.calls should have size 1
store.calls should have size 1
}
}
/*
* ERROR CASES
*/
it should "complete the transaction and abort if container creation fails" in within(timeout) {
val container = new TestContainer
val factory = createFactory(Future.failed(new Exception()))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
expectMsg(ContainerRemoved)
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 0
container.runCount shouldBe 0
collector.calls should have size 0 // gather no logs
container.destroyCount shouldBe 0 // no destroying possible as no container could be obtained
acker.calls should have size 1
acker.calls(0)._2.response should be a 'whiskError
store.calls should have size 1
}
}
it should "complete the transaction and destroy the container on a failed init" in within(timeout) {
val container = new TestContainer {
override def initialize(initializer: JsObject,
timeout: FiniteDuration)(implicit transid: TransactionId): Future[Interval] = {
initializeCount += 1
Future.failed(InitializationError(Interval.zero, ActivationResponse.applicationError("boom")))
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Running, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 0 // should not run the action
collector.calls should have size 1
container.destroyCount shouldBe 1
acker.calls(0)._2.response shouldBe ActivationResponse.applicationError("boom")
store.calls should have size 1
}
}
it should "complete the transaction and destroy the container on a failed run" in within(timeout) {
val container = new TestContainer {
override def run(parameters: JsObject, environment: JsObject, timeout: FiniteDuration)(
implicit transid: TransactionId): Future[(Interval, ActivationResponse)] = {
runCount += 1
Future.successful((Interval.zero, ActivationResponse.applicationError("boom")))
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Running, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.destroyCount shouldBe 1
acker.calls(0)._2.response shouldBe ActivationResponse.applicationError("boom")
store.calls should have size 1
}
}
it should "complete the transaction and destroy the container if log reading failed" in {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val partialLogs = Vector("this log line made it", Messages.logFailure)
val collector =
createCollector(Future.failed(LogCollectingException(ActivationLogs(partialLogs))))
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Running, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.destroyCount shouldBe 1
acker.calls should have size 1
acker.calls(0)._2.response shouldBe ActivationResponse.success()
store.calls should have size 1
store.calls(0)._2.logs shouldBe ActivationLogs(partialLogs)
}
}
it should "complete the transaction and destroy the container if log reading failed terminally" in {
val container = new TestContainer
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector(Future.failed(new Exception))
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Running, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.destroyCount shouldBe 1
acker.calls should have size 1
acker.calls(0)._2.response shouldBe ActivationResponse.success()
store.calls should have size 1
store.calls(0)._2.logs shouldBe ActivationLogs(Vector(Messages.logFailure))
}
}
it should "resend the job to the parent if resuming a container fails" in within(timeout) {
val container = new TestContainer {
override def resume()(implicit transid: TransactionId) = {
resumeCount += 1
Future.failed(new RuntimeException())
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, createCollector(), InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
run(machine, Uninitialized) // first run an activation
timeout(machine) // times out Ready state so container suspends
expectPause(machine)
val runMessage = Run(action, message)
machine ! runMessage
expectMsg(Transition(machine, Paused, Running))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Running, Removing))
expectMsg(runMessage)
awaitAssert {
factory.calls should have size 1
container.runCount shouldBe 1
container.suspendCount shouldBe 1
container.resumeCount shouldBe 1
container.destroyCount shouldBe 1
}
}
it should "remove the container if suspend fails" in within(timeout) {
val container = new TestContainer {
override def suspend()(implicit transid: TransactionId) = {
suspendCount += 1
Future.failed(new RuntimeException())
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, createCollector(), InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
run(machine, Uninitialized)
timeout(machine) // times out Ready state so container suspends
expectMsg(Transition(machine, Ready, Pausing))
expectMsg(ContainerRemoved) // The message is sent as soon as the container decides to destroy itself
expectMsg(Transition(machine, Pausing, Removing))
awaitAssert {
factory.calls should have size 1
container.suspendCount shouldBe 1
container.destroyCount shouldBe 1
}
}
/*
* DELAYED DELETION CASES
*/
// this test represents a Remove message whenever you are in the "Running" state. Therefore, testing
// a Remove while /init should suffice to guarantee test coverage here.
it should "delay a deletion message until the transaction is completed successfully" in within(timeout) {
val initPromise = Promise[Interval]
val container = new TestContainer {
override def initialize(initializer: JsObject,
timeout: FiniteDuration)(implicit transid: TransactionId): Future[Interval] = {
initializeCount += 1
initPromise.future
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
// Start running the action
machine ! Run(action, message)
expectMsg(Transition(machine, Uninitialized, Running))
// Schedule the container to be removed
machine ! Remove
// Finish /init, note that /run and log-collecting happens nonetheless
initPromise.success(Interval.zero)
expectWarmed(invocationNamespace.name, action)
expectMsg(Transition(machine, Running, Ready))
// Remove the container after the transaction finished
expectMsg(ContainerRemoved)
expectMsg(Transition(machine, Ready, Removing))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.suspendCount shouldBe 0 // skips pausing the container
container.destroyCount shouldBe 1
acker.calls should have size 1
store.calls should have size 1
}
}
// this tests a Run message in the "Removing" state. The contract between the pool and state-machine
// is, that only one Run is to be sent until a "NeedWork" comes back. If we sent a NeedWork but no work is
// there, we might run into the final timeout which will schedule a removal of the container. There is a
// time window though, in which the pool doesn't know of that decision yet. We handle the collision by
// sending the Run back to the pool so it can reschedule.
it should "send back a Run message which got sent before the container decided to remove itself" in within(timeout) {
val destroyPromise = Promise[Unit]
val container = new TestContainer {
override def destroy()(implicit transid: TransactionId): Future[Unit] = {
destroyCount += 1
destroyPromise.future
}
}
val factory = createFactory(Future.successful(container))
val acker = createAcker
val store = createStore
val collector = createCollector()
val machine =
childActorOf(ContainerProxy.props(factory, acker, store, collector, InstanceId(0), pauseGrace = timeout))
registerCallback(machine)
run(machine, Uninitialized)
timeout(machine)
expectPause(machine)
timeout(machine)
// We don't know of this timeout, so we schedule a run.
machine ! Run(action, message)
// State-machine shuts down nonetheless.
expectMsg(ContainerRemoved)
expectMsg(Transition(machine, Paused, Removing))
// Pool gets the message again.
expectMsg(Run(action, message))
awaitAssert {
factory.calls should have size 1
container.initializeCount shouldBe 1
container.runCount shouldBe 1
collector.calls should have size 1
container.suspendCount shouldBe 1
container.resumeCount shouldBe 1
container.destroyCount shouldBe 1
acker.calls should have size 1
store.calls should have size 1
}
}
/**
* Implements all the good cases of a perfect run to facilitate error case overriding.
*/
class TestContainer extends Container {
protected val id = ContainerId("testcontainer")
protected val addr = ContainerAddress("0.0.0.0")
protected implicit val logging: Logging = log
protected implicit val ec: ExecutionContext = system.dispatcher
var suspendCount = 0
var resumeCount = 0
var destroyCount = 0
var initializeCount = 0
var runCount = 0
var logsCount = 0
def suspend()(implicit transid: TransactionId): Future[Unit] = {
suspendCount += 1
Future.successful(())
}
def resume()(implicit transid: TransactionId): Future[Unit] = {
resumeCount += 1
Future.successful(())
}
override def destroy()(implicit transid: TransactionId): Future[Unit] = {
destroyCount += 1
super.destroy()
}
override def initialize(initializer: JsObject, timeout: FiniteDuration)(
implicit transid: TransactionId): Future[Interval] = {
initializeCount += 1
initializer shouldBe action.containerInitializer
timeout shouldBe action.limits.timeout.duration
Future.successful(Interval.zero)
}
override def run(parameters: JsObject, environment: JsObject, timeout: FiniteDuration)(
implicit transid: TransactionId): Future[(Interval, ActivationResponse)] = {
runCount += 1
environment.fields("api_key") shouldBe message.user.authkey.toJson
environment.fields("namespace") shouldBe invocationNamespace.toJson
environment.fields("action_name") shouldBe message.action.qualifiedNameWithLeadingSlash.toJson
environment.fields("activation_id") shouldBe message.activationId.toJson
val deadline = Instant.ofEpochMilli(environment.fields("deadline").convertTo[String].toLong)
val maxDeadline = Instant.now.plusMillis(timeout.toMillis)
// The deadline should be in the future but must be smaller than or equal
// a freshly computed deadline, as they get computed slightly after each other
deadline should (be <= maxDeadline and be >= Instant.now)
Future.successful((Interval.zero, ActivationResponse.success()))
}
def logs(limit: ByteSize, waitForSentinel: Boolean)(implicit transid: TransactionId): Source[ByteString, Any] = ???
}
}
| paulcastro/openwhisk | tests/src/test/scala/whisk/core/containerpool/test/ContainerProxyTests.scala | Scala | apache-2.0 | 25,085 |
package com.outr.arango
import cats.effect.IO
import com.outr.arango.core.{ArangoDBCollection, CreateResult, DeleteResult, NotFoundException}
import fabric.Value
import fabric.rw._
case class DatabaseStore(collection: ArangoDBCollection) {
def get[T: ReaderWriter](key: String): IO[Option[T]] = collection
.get(id(key))
.map(_.map(_.as[StoreValue].value.as[T]))
def apply[T: ReaderWriter](key: String,
default: String => T = (key: String) => throw NotFoundException(key)): IO[T] = get[T](key)
.map(_.getOrElse(default(key)))
def update[T: ReaderWriter](key: String, value: T): IO[CreateResult[T]] = collection
.upsert(StoreValue(key, value.toValue).toValue)
.map(_.convert(_.as[T]))
def delete(key: String): IO[DeleteResult[Value]] = collection.delete(id(key))
def id[T](key: String): Id[T] = Id[T](key, collection.name)
case class StoreValue(_key: String, value: Value)
object StoreValue {
implicit val rw: ReaderWriter[StoreValue] = ccRW
}
} | outr/scarango | driver/src/main/scala/com/outr/arango/DatabaseStore.scala | Scala | mit | 1,020 |
/*
* Copyright 2012 OneCalendar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dao
import com.mongodb.casbah.Imports._
import configuration.injection.MongoConnectionPool
import configuration.injection.MongoProp.MongoDbName
import fr.scala.util.collection.CollectionsUtils
import models._
import play.api.Logger
object EventDao extends CollectionsUtils
with EventDaoTrait
with MongoOperations
with EventTypeClass
with MongoConnectionPool {
private val log = Logger("EventDao")
private val PREVIEW_SIZE = 3
def deleteByOriginalStream(originalStream: String)(implicit dbName: MongoDbName, now: () => Long) = {
val query = ( "end" $gt now() ) ++ ( "originalStream" -> originalStream )
log.debug("query deleteByOriginalStreal %s".format(query))
delete(query)
}
def saveEvent(event: Event)(implicit dbName: MongoDbName) = save(event)
def findByTag(tags: List[String])(implicit dbName: MongoDbName): List[Event] = {
val query = "tags" $in tags.map(_.toUpperCase)
log.debug("query find by tag %s".format(query))
find[Event](query)
}
def findPreviewByTag(tags: List[String])(implicit dbName: MongoDbName, now: () => Long): SearchPreview = {
val query = ( "tags" $in tags.map(_.toUpperCase) ) ++ ( "begin" $gt now() )
val c = count[Event](query)
val sortByBeginDate = MongoDBObject("begin" -> 1)
SearchPreview(c, find[Event](query, sortByBeginDate, PREVIEW_SIZE))
}
def findAll()(implicit dbName: MongoDbName): List[Event] = find[Event](MongoDBObject())
def listTags()(implicit dbName: MongoDbName, now: () => Long): List[String] = {
val query = "begin" $gt now()
retrieveMongoCollection(EventMongoModel.collectionName).distinct("tags", query).toList.asInstanceOf[List[String]]
}
def countFutureEvents()(implicit dbName: MongoDbName, now: () => Long): Long = {
val query = "begin" $gt now()
count(query)
}
} | mdia/OneCalendar | app/dao/EventDao.scala | Scala | apache-2.0 | 2,531 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.compat.akka
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import scala.concurrent.duration._
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Inside, Matchers}
import swave.core._
import swave.core.util._
class AkkaCompatSpec extends FreeSpec with Matchers with Inside with BeforeAndAfterAll {
implicit val env = StreamEnv()
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
"Akka compatibility should work as expected" - {
"Source.toSpout" in {
Source(1 to 10).toSpout.drainToList(100).await() shouldEqual (1 to 10)
}
"Spout.toAkkaSource" in {
Spout(1 to 10).toAkkaSource.runWith(Sink.seq).await() shouldEqual (1 to 10)
}
"Flow.toPipe" in {
val flow = Flow[Int].map(_ * -1)
Spout(1 to 10).via(flow.toPipe).drainToList(100).await() shouldEqual (-1 to -10 by -1)
}
"Pipe.toAkkaFlow" in {
val pipe = Pipe[Int].map(_ * -1)
Source(1 to 10).via(pipe.toAkkaFlow).runWith(Sink.seq).await() shouldEqual (-1 to -10 by -1)
}
"Sink.toDrain" in {
val sink = Sink.seq[Int]
Spout(1 to 10).drainTo(sink.toDrain).await() shouldEqual (1 to 10)
}
"Drain.toAkkaSink" in {
val drain = Drain.seq[Int](100)
Source(1 to 10).runWith(drain.toAkkaSink).await() shouldEqual (1 to 10)
}
}
override val invokeBeforeAllAndAfterAllEvenIfNoTestsAreExpected = true
override protected def afterAll(): Unit = {
val envTermination = env.shutdown()
system.terminate().await(2.seconds)
envTermination.awaitTermination(2.seconds)
}
}
| sirthias/swave | compat-akka/src/test/scala/swave/compat/akka/AkkaCompatSpec.scala | Scala | mpl-2.0 | 1,870 |
package bad.robot.radiate.ui
object FrameRate {
val videoFramesPerSecond: FrameRate = framesPerSecond(24)
private[ui] def framesPerSecond(framesPerSecond: Int): FrameRate = {
new FrameRate(framesPerSecond)
}
}
class FrameRate(framesPerSecond: Int) {
if (framesPerSecond < 1) throw new IllegalArgumentException("frame rates less than 1 seems a little low")
if (framesPerSecond > 60) throw new IllegalArgumentException("frame rates over 60 frames per seconds seems a little high")
private[ui] def asFrequencyInMillis: Int = 1000 / framesPerSecond
} | tobyweston/radiate | src/main/scala/bad/robot/radiate/ui/FrameRate.scala | Scala | apache-2.0 | 566 |
package controllers.api
import play.api.mvc._
import models._
import settings.Global
import org.joda.time.DateTime
object ViewsApiController extends BaseApiController {
def getViews = Action { implicit request =>
val monthViews = Global.views.search(ViewQuery(start_date = Option(new DateTime().minusMonths(1)), end_at = Option(new DateTime())))
jsonResponse(monthViews.map(v => ViewJson(v.created_at.toString("DD-MMM-YY"), 1)).groupBy(_.viewed_at).
map{ case (dt: String, views: Iterable[ViewJson]) => ViewJson(dt, views.size)}.toList)
}
}
case class ViewJson(viewed_at: String, views: Int)
| grahamar/Giles | app/controllers/api/ViewsApiController.scala | Scala | apache-2.0 | 616 |
/*
* Copyright (c) 2013 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
object Dependencies {
val resolutionRepos = Seq(
"Akka Repository" at "http://repo.akka.io/releases/",
"Spray Repository" at "http://repo.spray.cc/"
)
object V {
val spark = "1.5.0"
val specs2 = "1.13" // -> "1.13" when we bump to Scala 2.10.0
val guava = "11.0.1"
// Add versions for your additional libraries here...
}
object Libraries {
val sparkCore = "org.apache.spark" %% "spark-core" % V.spark % "provided"
val sparkMllib = "org.apache.spark" %% "spark-mllib" % V.spark % "provided"
val sparkSql = "org.apache.spark" %% "spark-sql" % V.spark % "provided"
// Add additional libraries from mvnrepository.com (SBT syntax) here...
// Scala (test only)
val specs2 = "org.specs2" % "specs2_2.10" % V.specs2 % "test"
val guava = "com.google.guava" % "guava" % V.guava % "test"
}
}
| devagorilla/spark-code-challenge | project/Dependencies.scala | Scala | apache-2.0 | 1,746 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.wiki.pages
import com.netflix.atlas.core.model.StyleVocabulary
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.stacklang.Word
import com.netflix.atlas.wiki.StackWordPage
case object VSpan extends StackWordPage {
val vocab: Vocabulary = StyleVocabulary
val word: Word = vocab.words.find(_.name == "vspan").get
override def signature: String =
s"""
|```
|TimeSeriesExpr -- StyleExpr
|```
""".stripMargin
override def summary: String =
"""
|Change the line style to be a vertical span. In this mode any non-zero datapoints on the
|line will be shown as a span. This is frequently used to visualize when an alert would
|have fired.
|
|See the [line style examples](Line-Styles) page for more information.
""".stripMargin.trim
}
| brharrington/atlas | atlas-wiki/src/main/scala/com/netflix/atlas/wiki/pages/VSpan.scala | Scala | apache-2.0 | 1,465 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Ported from Apache Jakarta Commons Validator,
* http://commons.apache.org/validator/
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.validation.vspec.constraint
import org.junit.Test
class IsbnConstraintTest {
import ConstraintAssertions._
import IsbnConstraintTest._
val constraint = new IsbnConstraint
@Test
def invalid_isbn_numbers() {
assertInvalid(constraint,
"", "1", "12345678901234","dsasdsadsads",
"535365", "I love sparrows!", "--1 930110 99 5",
"1 930110 99 5--", "1 930110-99 5-", INVALID_ISBN)
}
@Test
def valid_isbn_numbers() {
assertValid(constraint,
null, VALID_ISBN_RAW, VALID_ISBN_DASHES,
VALID_ISBN_SPACES, VALID_ISBN_X, VALID_ISBN_x)
}
}
private object IsbnConstraintTest {
val VALID_ISBN_RAW = "1930110995"
val VALID_ISBN_DASHES = "1-930110-99-5"
val VALID_ISBN_SPACES = "1 930110 99 5"
val VALID_ISBN_X = "0-201-63385-X"
val VALID_ISBN_x = "0-201-63385-x"
val INVALID_ISBN = "068-556-98-45"
}
| sptz45/coeus | src/test/scala/com/tzavellas/coeus/validation/vspec/constraint/IsbnConstraintTest.scala | Scala | apache-2.0 | 1,133 |
package org.fs.utility
import org.junit.runner.RunWith
import org.scalatest.Spec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class RichGeneralImplicitsSpec extends Spec {
object `rich throwable -` {
import RichGeneralImplicits.RichThrowable
def `stackTraceString ` = {
val t = new Throwable("My message")
val s = t.stackTraceString
val lines = s.lines.toSeq
assert(lines.size > 1)
assert(lines(0) == "java.lang.Throwable: My message")
assert(lines(1) startsWith s"\\tat ${classOf[RichGeneralImplicitsSpec].getCanonicalName}")
}
}
object `rich long -` {
import RichGeneralImplicits.RichLong
def `hhMmSsString ` = {
val h = 123L
val m = 53L
val s = 18L
val ms = 336L
val long = (((h * 60) + m) * 60 + s) * 1000 + ms
assert(long.hhMmSsString == s"$h:$m:$s")
}
}
}
| frozenspider/fs-common-utils | src/test/scala/org/fs/utility/RichGeneralImplicitsSpec.scala | Scala | mit | 896 |
package org.scalatra
import org.scalatra.test.specs2.ScalatraSpec
class EnvironmentFilter extends ScalatraFilter {
get("/*/environment") {
environment
}
get("/*/is-development-mode") {
isDevelopmentMode
}
}
class EnvironmentFilterSpec extends ScalatraSpec {
def is =
"The dev filter should" ^
"return 'development' as the environment" ! env("dev", "DEVELOPMENT") ^
"be development mode" ! isDevMode("dev", expected = true) ^
p ^
"The prod filter should" ^
"return 'production' as the environment" ! env("prod", "production") ^
"not be development mode" ! isDevMode("prod", expected = false) ^
end
val devFilterHolder = addFilter(classOf[EnvironmentFilter], "/dev/*")
val prodFilterHolder = addFilter(classOf[EnvironmentFilter], "/prod/*")
prodFilterHolder.setInitParameter(EnvironmentKey, "production")
def env(environment: String, expected: String) =
get("/%s/environment".format(environment)) {
body must be equalTo (expected)
}
def isDevMode(environment: String, expected: Boolean) =
get("/%s/is-development-mode".format(environment)) {
body must be equalTo (expected.toString)
}
}
| dozed/scalatra | core/src/test/scala/org/scalatra/EnvironmentSpec.scala | Scala | bsd-2-clause | 1,195 |
package com.truecar.mleap.core.regression
import com.truecar.mleap.core.linalg.Vector
import com.truecar.mleap.core.tree.{DecisionTree, Node}
/**
* Created by hwilkins on 11/8/15.
*/
case class DecisionTreeRegression(rootNode: Node, numFeatures: Int) extends DecisionTree {
def predict(features: Vector): Double = {
rootNode.predictImpl(features).prediction
}
}
| TrueCar/mleap | mleap-core/src/main/scala/com/truecar/mleap/core/regression/DecisionTreeRegression.scala | Scala | apache-2.0 | 374 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.source.SourceFunction
import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.table.api.TableEnvironment
import org.apache.flink.table.api.java.utils.UserDefinedAggFunctions.WeightedAvg
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.scala.stream.table.OverWindowITCase.RowTimeSourceFunction
import org.apache.flink.table.api.scala.stream.utils.{StreamITCase, StreamingWithStateTestBase}
import org.apache.flink.table.functions.aggfunctions.CountAggFunction
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
class OverWindowITCase extends StreamingWithStateTestBase {
@Test
def testProcTimeUnBoundedPartitionedRowOver(): Unit = {
val data = List(
(1L, 1, "Hello"),
(2L, 2, "Hello"),
(3L, 3, "Hello"),
(4L, 4, "Hello"),
(5L, 5, "Hello"),
(6L, 6, "Hello"),
(7L, 7, "Hello World"),
(8L, 8, "Hello World"),
(20L, 20, "Hello World"))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.testResults = mutable.MutableList()
StreamITCase.clear
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'a, 'b, 'c, 'proctime.proctime)
val countFun = new CountAggFunction
val weightAvgFun = new WeightedAvg
val windowedTable = table
.window(
Over partitionBy 'c orderBy 'proctime preceding UNBOUNDED_ROW as 'w)
.select('c, countFun('b) over 'w as 'mycount, weightAvgFun('a, 'b) over 'w as 'wAvg)
.select('c, 'mycount, 'wAvg)
val results = windowedTable.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink)
env.execute()
val expected = Seq(
"Hello World,1,7", "Hello World,2,7", "Hello World,3,14",
"Hello,1,1", "Hello,2,1", "Hello,3,2", "Hello,4,3", "Hello,5,3", "Hello,6,4")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testRowTimeUnBoundedPartitionedRangeOver(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val tEnv = TableEnvironment.getTableEnvironment(env)
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setStateBackend(getStateBackend)
StreamITCase.testResults = mutable.MutableList()
StreamITCase.clear
env.setParallelism(1)
val data = Seq(
Left(14000005L, (1, 1L, "Hi")),
Left(14000000L, (2, 1L, "Hello")),
Left(14000002L, (1, 1L, "Hello")),
Left(14000002L, (1, 2L, "Hello")),
Left(14000002L, (1, 3L, "Hello world")),
Left(14000003L, (2, 2L, "Hello world")),
Left(14000003L, (2, 3L, "Hello world")),
Right(14000020L),
Left(14000021L, (1, 4L, "Hello world")),
Left(14000022L, (1, 5L, "Hello world")),
Left(14000022L, (1, 6L, "Hello world")),
Left(14000022L, (1, 7L, "Hello world")),
Left(14000023L, (2, 4L, "Hello world")),
Left(14000023L, (2, 5L, "Hello world")),
Right(14000030L)
)
val table = env
.addSource(new RowTimeSourceFunction[(Int, Long, String)](data))
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
val countFun = new CountAggFunction
val weightAvgFun = new WeightedAvg
val windowedTable = table
.window(Over partitionBy 'a orderBy 'rowtime preceding UNBOUNDED_RANGE following
CURRENT_RANGE as 'w)
.select(
'a, 'b, 'c,
'b.sum over 'w,
countFun('b) over 'w,
'b.avg over 'w,
'b.max over 'w,
'b.min over 'w,
weightAvgFun('b, 'a) over 'w)
val result = windowedTable.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"1,1,Hello,6,3,2,3,1,2",
"1,2,Hello,6,3,2,3,1,2",
"1,3,Hello world,6,3,2,3,1,2",
"1,1,Hi,7,4,1,3,1,1",
"2,1,Hello,1,1,1,1,1,1",
"2,2,Hello world,6,3,2,3,1,2",
"2,3,Hello world,6,3,2,3,1,2",
"1,4,Hello world,11,5,2,4,1,2",
"1,5,Hello world,29,8,3,7,1,3",
"1,6,Hello world,29,8,3,7,1,3",
"1,7,Hello world,29,8,3,7,1,3",
"2,4,Hello world,15,5,3,5,1,3",
"2,5,Hello world,15,5,3,5,1,3"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testProcTimeBoundedPartitionedRangeOver(): Unit = {
val data = List(
(1, 1L, 0, "Hallo", 1L),
(2, 2L, 1, "Hallo Welt", 2L),
(2, 3L, 2, "Hallo Welt wie", 1L),
(3, 4L, 3, "Hallo Welt wie gehts?", 2L),
(3, 5L, 4, "ABC", 2L),
(3, 6L, 5, "BCD", 3L),
(4, 7L, 6, "CDE", 2L),
(4, 8L, 7, "DEF", 1L),
(4, 9L, 8, "EFG", 1L),
(4, 10L, 9, "FGH", 2L),
(5, 11L, 10, "GHI", 1L),
(5, 12L, 11, "HIJ", 3L),
(5, 13L, 12, "IJK", 3L),
(5, 14L, 13, "JKL", 2L),
(5, 15L, 14, "KLM", 2L))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStateBackend(getStateBackend)
val tEnv = TableEnvironment.getTableEnvironment(env)
env.setParallelism(1)
StreamITCase.testResults = mutable.MutableList()
val stream = env.fromCollection(data)
val table = stream.toTable(tEnv, 'a, 'b, 'c, 'd, 'e, 'proctime.proctime)
val windowedTable = table
.window(Over partitionBy 'a orderBy 'proctime preceding 4.rows following CURRENT_ROW as 'w)
.select('a, 'c.sum over 'w, 'c.min over 'w)
val result = windowedTable.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"1,0,0",
"2,1,1",
"2,3,1",
"3,3,3",
"3,7,3",
"3,12,3",
"4,6,6",
"4,13,6",
"4,21,6",
"4,30,6",
"5,10,10",
"5,21,10",
"5,33,10",
"5,46,10",
"5,60,10")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testRowTimeBoundedPartitionedRowOver(): Unit = {
val data = Seq(
Left((1L, (1L, 1, "Hello"))),
Left((2L, (2L, 2, "Hello"))),
Left((1L, (1L, 1, "Hello"))),
Left((2L, (2L, 2, "Hello"))),
Left((2L, (2L, 2, "Hello"))),
Left((1L, (1L, 1, "Hello"))),
Left((3L, (7L, 7, "Hello World"))),
Left((1L, (7L, 7, "Hello World"))),
Left((1L, (7L, 7, "Hello World"))),
Right(2L),
Left((3L, (3L, 3, "Hello"))),
Left((4L, (4L, 4, "Hello"))),
Left((5L, (5L, 5, "Hello"))),
Left((6L, (6L, 6, "Hello"))),
Left((20L, (20L, 20, "Hello World"))),
Right(6L),
Left((8L, (8L, 8, "Hello World"))),
Left((7L, (7L, 7, "Hello World"))),
Right(20L))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setStateBackend(getStateBackend)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
val table = env.addSource[(Long, Int, String)](
new RowTimeSourceFunction[(Long, Int, String)](data))
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
val windowedTable = table
.window(Over partitionBy 'c orderBy 'rowtime preceding 2.rows following CURRENT_ROW as 'w)
.select('c, 'a, 'a.count over 'w, 'a.sum over 'w)
val result = windowedTable.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"Hello,1,1,1", "Hello,1,2,2", "Hello,1,3,3",
"Hello,2,3,4", "Hello,2,3,5", "Hello,2,3,6",
"Hello,3,3,7", "Hello,4,3,9", "Hello,5,3,12",
"Hello,6,3,15",
"Hello World,7,1,7", "Hello World,7,2,14", "Hello World,7,3,21",
"Hello World,7,3,21", "Hello World,8,3,22", "Hello World,20,3,35")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testRowTimeBoundedPartitionedRangeOver(): Unit = {
val data = Seq(
Left((1500L, (1L, 15, "Hello"))),
Left((1600L, (1L, 16, "Hello"))),
Left((1000L, (1L, 1, "Hello"))),
Left((2000L, (2L, 2, "Hello"))),
Right(1000L),
Left((2000L, (2L, 2, "Hello"))),
Left((2000L, (2L, 3, "Hello"))),
Left((3000L, (3L, 3, "Hello"))),
Right(2000L),
Left((4000L, (4L, 4, "Hello"))),
Right(3000L),
Left((5000L, (5L, 5, "Hello"))),
Right(5000L),
Left((6000L, (6L, 6, "Hello"))),
Left((6500L, (6L, 65, "Hello"))),
Right(7000L),
Left((9000L, (6L, 9, "Hello"))),
Left((9500L, (6L, 18, "Hello"))),
Left((9000L, (6L, 9, "Hello"))),
Right(10000L),
Left((10000L, (7L, 7, "Hello World"))),
Left((11000L, (7L, 17, "Hello World"))),
Left((11000L, (7L, 77, "Hello World"))),
Right(12000L),
Left((14000L, (7L, 18, "Hello World"))),
Right(14000L),
Left((15000L, (8L, 8, "Hello World"))),
Right(17000L),
Left((20000L, (20L, 20, "Hello World"))),
Right(19000L))
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
env.setStateBackend(getStateBackend)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
val table = env.addSource[(Long, Int, String)](
new RowTimeSourceFunction[(Long, Int, String)](data))
.toTable(tEnv, 'a, 'b, 'c, 'rowtime.rowtime)
val windowedTable = table
.window(
Over partitionBy 'c orderBy 'rowtime preceding 1.seconds following CURRENT_RANGE as 'w)
.select('c, 'b, 'a.count over 'w, 'a.sum over 'w)
val result = windowedTable.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink)
env.execute()
val expected = mutable.MutableList(
"Hello,1,1,1", "Hello,15,2,2", "Hello,16,3,3",
"Hello,2,6,9", "Hello,3,6,9", "Hello,2,6,9",
"Hello,3,4,9",
"Hello,4,2,7",
"Hello,5,2,9",
"Hello,6,2,11", "Hello,65,2,12",
"Hello,9,2,12", "Hello,9,2,12", "Hello,18,3,18",
"Hello World,7,1,7", "Hello World,17,3,21", "Hello World,77,3,21", "Hello World,18,1,7",
"Hello World,8,2,15",
"Hello World,20,1,20")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
object OverWindowITCase {
class RowTimeSourceFunction[T](
dataWithTimestampList: Seq[Either[(Long, T), Long]]) extends SourceFunction[T] {
override def run(ctx: SourceContext[T]): Unit = {
dataWithTimestampList.foreach {
case Left(t) => ctx.collectWithTimestamp(t._2, t._1)
case Right(w) => ctx.emitWatermark(new Watermark(w))
}
}
override def cancel(): Unit = ???
}
}
| WangTaoTheTonic/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/scala/stream/table/OverWindowITCase.scala | Scala | apache-2.0 | 11,838 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import java.io.File
import java.util.Arrays
import kafka.server._
import kafka.utils.TestUtils
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.junit.{After, Before}
import scala.collection.mutable.{ArrayBuffer, Buffer}
import java.util.Properties
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.utils.Time
/**
* A test harness that brings up some number of broker nodes
*/
abstract class KafkaServerTestHarness extends ZooKeeperTestHarness {
var instanceConfigs: Seq[KafkaConfig] = null
var servers: Buffer[KafkaServer] = new ArrayBuffer
var brokerList: String = null
var alive: Array[Boolean] = null
val kafkaPrincipalType = KafkaPrincipal.USER_TYPE
/**
* Implementations must override this method to return a set of KafkaConfigs. This method will be invoked for every
* test and should not reuse previous configurations unless they select their ports randomly when servers are started.
*/
def generateConfigs: Seq[KafkaConfig]
/**
* Override this in case ACLs or security credentials must be set before `servers` are started.
*
* This is required in some cases because of the topic creation in the setup of `IntegrationTestHarness`. If the ACLs
* are only set later, tests may fail. The failure could manifest itself as a cluster action
* authorization exception when processing an update metadata request (controller -> broker) or in more obscure
* ways (e.g. __consumer_offsets topic replication fails because the metadata cache has no brokers as a previous
* update metadata request failed due to an authorization exception).
*
* The default implementation of this method is a no-op.
*/
def configureSecurityBeforeServersStart() {}
/**
* Override this in case Tokens or security credentials needs to be created after `servers` are started.
* The default implementation of this method is a no-op.
*/
def configureSecurityAfterServersStart() {}
def configs: Seq[KafkaConfig] = {
if (instanceConfigs == null)
instanceConfigs = generateConfigs
instanceConfigs
}
def serverForId(id: Int): Option[KafkaServer] = servers.find(s => s.config.brokerId == id)
def boundPort(server: KafkaServer): Int = server.boundPort(listenerName)
protected def securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT
protected def listenerName: ListenerName = ListenerName.forSecurityProtocol(securityProtocol)
protected def trustStoreFile: Option[File] = None
protected def serverSaslProperties: Option[Properties] = None
protected def clientSaslProperties: Option[Properties] = None
protected def brokerTime(brokerId: Int): Time = Time.SYSTEM
@Before
override def setUp() {
super.setUp()
if (configs.isEmpty)
throw new KafkaException("Must supply at least one server config.")
// default implementation is a no-op, it is overridden by subclasses if required
configureSecurityBeforeServersStart()
// Add each broker to `servers` buffer as soon as it is created to ensure that brokers
// are shutdown cleanly in tearDown even if a subsequent broker fails to start
for (config <- configs)
servers += TestUtils.createServer(config, time = brokerTime(config.brokerId))
brokerList = TestUtils.bootstrapServers(servers, listenerName)
alive = new Array[Boolean](servers.length)
Arrays.fill(alive, true)
// default implementation is a no-op, it is overridden by subclasses if required
configureSecurityAfterServersStart()
}
@After
override def tearDown() {
if (servers != null) {
TestUtils.shutdownServers(servers)
}
super.tearDown()
}
/**
* Create a topic in ZooKeeper.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(topic: String, numPartitions: Int = 1, replicationFactor: Int = 1,
topicConfig: Properties = new Properties): scala.collection.immutable.Map[Int, Int] =
TestUtils.createTopic(zkClient, topic, numPartitions, replicationFactor, servers, topicConfig)
/**
* Create a topic in ZooKeeper using a customized replica assignment.
* Wait until the leader is elected and the metadata is propagated to all brokers.
* Return the leader for each partition.
*/
def createTopic(topic: String, partitionReplicaAssignment: collection.Map[Int, Seq[Int]]): scala.collection.immutable.Map[Int, Int] =
TestUtils.createTopic(zkClient, topic, partitionReplicaAssignment, servers)
/**
* Pick a broker at random and kill it if it isn't already dead
* Return the id of the broker killed
*/
def killRandomBroker(): Int = {
val index = TestUtils.random.nextInt(servers.length)
killBroker(index)
index
}
def killBroker(index: Int) {
if(alive(index)) {
servers(index).shutdown()
servers(index).awaitShutdown()
alive(index) = false
}
}
/**
* Restart any dead brokers
*/
def restartDeadBrokers() {
for(i <- servers.indices if !alive(i)) {
servers(i).startup()
alive(i) = true
}
}
}
| gf53520/kafka | core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala | Scala | apache-2.0 | 6,098 |
package dpla.ingestion3.executors
import java.time.LocalDateTime
import com.databricks.spark.avro._
import dpla.ingestion3.confs.i3Conf
import dpla.ingestion3.dataStorage.OutputHelper
import dpla.ingestion3.harvesters.Harvester
import dpla.ingestion3.harvesters.file.NaraDeltaHarvester
import dpla.ingestion3.harvesters.oai.OaiHarvester
import dpla.ingestion3.harvesters.pss.PssHarvester
import dpla.ingestion3.harvesters.resourceSync.RsHarvester
import dpla.ingestion3.utils.{ProviderRegistry, Utils}
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import scala.util.{Failure, Success, Try}
trait HarvestExecutor {
/**
* Run the appropriate type of harvest.
*
* @param shortName Provider short name (e.g. cdl, mdl, nara, loc).
* @see ProviderRegistry.register() for the authoritative
* list of provider short names.
* @param conf Configurations read from application configuration file
* @param logger Logger object
*/
def execute(sparkConf: SparkConf,
shortName: String,
dataOut: String,
conf: i3Conf,
logger: Logger): Unit = {
// Log config file location and provider short name.
logger.info(s"Harvest initiated")
logger.info(s"Provider short name: $shortName")
//todo build spark here
val spark = SparkSession.builder()
.config(sparkConf)
.getOrCreate()
// Get and log harvest type.
val harvestType = conf.harvest.harvestType
.getOrElse(throw new RuntimeException("No harvest type specified."))
logger.info(s"Harvest type: $harvestType")
val harvester = buildHarvester(spark, shortName, conf, logger, harvestType)
// This start time is used for documentation and output file naming.
val startDateTime = LocalDateTime.now
// This start time is used to measure the duration of harvest.
val start = System.currentTimeMillis()
val outputHelper: OutputHelper =
new OutputHelper(dataOut, shortName, "harvest", startDateTime)
val outputPath = outputHelper.activityPath
// Call local implementation of runHarvest()
Try {
// Calls the local implementation
val harvestData: DataFrame = harvester.harvest
// if there are setIds in the returned dataframe then generate a count summary by setId
val setSummary: Option[String] = if (harvestData.columns.contains("setIds")) {
val summary = harvestData.groupBy("setIds")
.count()
.sort("setIds")
.collect()
.map( row => row.getSeq[String](0).mkString(" ") -> row.getLong(1))
.map { case ( set: String, count: Long ) => s"$set, $count" }.mkString("\\n")
// drop setIds column from dataframe
harvestData.drop("setIds")
Some(summary)
} else {
None
}
// Write harvested data to output file.
harvestData
.write
.format("com.databricks.spark.avro")
.option("avroSchema", harvestData.schema.toString)
.avro(outputPath)
setSummary match {
case Some(s) => outputHelper.writeSetSummary(s) match {
case Success (s) => logger.info (s"OAI set summary written to $s.")
case Failure (f) => print (f.toString)
}
case None =>
}
// Reads the saved avro file back
spark.read.avro(outputPath)
} match {
case Success(df) =>
Harvester.validateSchema(df)
val recordCount = df.count()
logger.info(Utils.harvestSummary(outputPath, System.currentTimeMillis() - start, recordCount))
val manifestOpts: Map[String, String] = Map(
"Activity" -> "Harvest",
"Provider" -> shortName,
"Record count" -> recordCount.toString
)
outputHelper.writeManifest(manifestOpts) match {
case Success(s) => logger.info(s"Manifest written to $s.")
case Failure(f) => logger.warn(s"Manifest failed to write.", f)
}
case Failure(f) => logger.error(s"Harvest failure.", f)
}
harvester.cleanUp()
spark.stop()
}
private def buildHarvester(spark: SparkSession, shortName: String, conf: i3Conf, logger: Logger, harvestType: String) = {
harvestType match {
case "oai" =>
new OaiHarvester(spark, shortName, conf, logger)
case "pss" =>
new PssHarvester(spark, shortName, conf, logger)
case "rs" =>
new RsHarvester(spark, shortName, conf, logger)
case "nara.file.delta" =>
new NaraDeltaHarvester(spark, shortName, conf, logger)
case "api" | "file" =>
val harvesterClass = ProviderRegistry.lookupHarvesterClass(shortName) match {
case Success(harvClass) => harvClass
case Failure(e) =>
logger.fatal(e.getMessage)
throw e
}
harvesterClass
.getConstructor(classOf[SparkSession], classOf[String], classOf[i3Conf], classOf[Logger])
.newInstance(spark, shortName, conf, logger)
case _ =>
val msg = s"Harvest type not recognized."
logger.fatal(msg)
throw new RuntimeException(msg)
}
}
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/executors/HarvestExecutor.scala | Scala | mit | 5,218 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.examples
import com.stratio.datasource.mongodb.examples.MongoExampleFunctions._
object DataFrameAPIExample extends App with MongoDefaultConstants {
val mongoClient = prepareEnvironment()
withSQLContext { sqlContext =>
sqlContext.sql(
s"""|CREATE TEMPORARY TABLE $Collection
|(id STRING, age INT, description STRING, enrolled BOOLEAN, name STRING, optionalField BOOLEAN)
|USING $MongoProvider
|OPTIONS (
|host '$MongoHost:$MongoPort',
|database '$Database',
|collection '$Collection'
|)
""".stripMargin.replaceAll("\\n", " "))
import org.apache.spark.sql.functions._
val studentsDF = sqlContext.read.format(MongoProvider).table(Collection)
studentsDF.where(studentsDF("age") > 15).groupBy(studentsDF("enrolled")).agg(avg("age"), max("age")).show(5)
}
cleanEnvironment(mongoClient)
} | pfcoperez/spark-mongodb | spark-mongodb-examples/src/main/scala/com/stratio/datasource/mongodb/examples/DataFrameAPIExample.scala | Scala | apache-2.0 | 1,556 |
package com.karasiq.scalajsbundler.dsl
import java.io.File
import java.net.URL
import java.nio.file.Path
import scala.language.implicitConversions
import com.karasiq.scalajsbundler.ScalaJSBundler._
trait BundlerImplicits { self: BundlerDsl ⇒
import PageContentBuilders._
final implicit class BuilderOps[+T <: PageContent](builder: ContentBuilder[T]) {
def from[S](source: S)(implicit ev: S ⇒ Asset): T = {
builder.fromAsset(source)
}
}
implicit def pageContentAsSeq[T <: PageContent](pc: T): Seq[PageContent] = Seq(pc)
implicit def urlToAsset(url: URL): Asset = WebAsset(url.toString)
implicit def stringToAsset(str: String): Asset = StringAsset(str)
implicit def fileToAsset(file: File): Asset = FileAsset(file.toString)
implicit def pathToAsset(path: Path): Asset = FileAsset(path.toString)
implicit def webjarToAsset(moduleId: JarResource): Asset = ResourceAsset(s"META-INF/resources/webjars/${moduleId.module.name}/${moduleId.module.revision}/${moduleId.resourceName}")
implicit def githubToAsset(gh: GithubRepository): Asset = WebAsset(gh.url)
}
| Karasiq/sbt-scalajs-bundler | src/main/scala/com/karasiq/scalajsbundler/dsl/BundlerImplicits.scala | Scala | mit | 1,101 |
package s99
object P15 {
def duplicateN[E](n: Int, l: List[E]): List[E] = {
l.flatMap(List.fill(n)(_))
}
def duplicateNRec[E](n: Int, l: List[E]): List[E] = {
duplicateNRec(n, l, List.empty[E])
}
private[this] def duplicateNRec[E](n: Int, l: List[E], result: List[E]): List[E] = {
l match {
case h :: t => duplicateNRec(n, t, copyN(n, h, Nil) ++ result)
case Nil => result.reverse
}
}
private[this] def copyN[E](n: Int, e: E, result: List[E]): List[E] = {
if (n <= 0)
result
else
copyN(n - 1, e, e :: result)
}
}
| qilab-/algorithm-problems | s-99/src/main/scala/s99/P15.scala | Scala | unlicense | 583 |
package io.buoyant.test.h2
import com.twitter.finagle.buoyant.h2.{Frame, Stream}
import com.twitter.util.Future
object StreamTestUtils {
/**
* Read a [[Stream]] to the end, [[Frame.release release()]]ing each
* [[Frame]] before reading the next one.
*
* The value of each frame is discarded, but assertions can be made about
* their contents by attaching an [[Stream.onFrame onFrame()]] callback
* before calling `readAll()`.
*
* @param stream the [[Stream]] to read to the end
* @return a [[Future]] that will finish when the whole stream is read
*/
final def readToEnd(stream: Stream): Future[Unit] =
if (stream.isEmpty) Future.Unit
else
stream.read().flatMap { frame =>
val end = frame.isEnd
frame.release().before {
if (end) Future.Unit else readToEnd(stream)
}
}
/**
* Enhances a [[Stream]] by providing the [[readToEnd()]] function in the
* method position
*
* @param stream the underlying [[Stream]]
*/
implicit class ReadAllStream(val stream: Stream) extends AnyVal {
@inline def readToEnd: Future[Unit] = StreamTestUtils.readToEnd(stream)
}
}
| denverwilliams/linkerd | finagle/h2/src/test/scala/io/buoyant/test/h2/StreamTestUtils.scala | Scala | apache-2.0 | 1,169 |
package com.anchortab.snippet
import scala.xml.NodeSeq
import java.text.SimpleDateFormat
import net.liftweb._
import sitemap._
import common._
import http._
import SHtml._
import js._
import JsCmds._
import LiftRules._
import json._
import Extraction._
import util._
import Helpers._
import mongodb.BsonDSL._
import org.bson.types.ObjectId
import org.joda.time._
import com.anchortab.actor._
import com.anchortab.model._
import com.anchortab.util._
import com.stripe
case class SubmitBillingInfoToStripe(submitBillingInfoFunc: (String)=>JsCmd) extends
AnonCallableFunction(submitBillingInfoFunc)
case class UpdateBillingInformation(updateStripeTokenFn: JsCmd) extends
AnchorTabEvent("update-billing-information", ("updateStripeTokenFn" -> updateStripeTokenFn.toJsCmd))
case object ErrorChargingCard extends SimpleAnchorTabEvent("error-charging-card")
case object NoBillingInformationError extends SimpleAnchorTabEvent("no-billing-information-error")
case class GeneralError(errorText: String) extends SimpleAnchorTabEvent("general-error")
object Subscription extends Loggable {
val subscriptionMenu = Menu.i("Subscription") / "manager" / "subscription" >>
Authentication.ifLoggedIn
val menus =
subscriptionMenu ::
Nil
implicit val formats = DefaultFormats
val dateFormatter = new SimpleDateFormat("dd MMM yyyy")
def snippetHandlers : SnippetPF = {
case "subscription-summary" :: Nil => subscriptionSummary
case "plan-selection" :: Nil => planSelection
case "billing-summary" :: Nil => billingSummary
case "recent-billing-history" :: Nil => recentBillingHistory
}
def recentBillingHistory = {
{
for {
user <- currentUser.is
customerId <- user.stripeCustomerId
invoices <- tryo(stripe.Invoice.all(Map(
"customer" -> customerId,
"limit" -> 24
)))
} yield {
ClearClearable andThen
".no-invoices" #> ClearNodes &
".invoice" #> invoices.data.map { invoice=>
".date *" #> StripeNumber(invoice.date).asDateTime.toString(DATE_FORMAT) &
".amount *" #> StripeNumber(invoice.total).asDollarsAndCentsString &
".details-link [href]" #> Invoice.menu.toLoc.calcHref(invoice.id.getOrElse(""))
}
}
} openOr {
ClearClearable andThen
".invoice" #> ClearNodes
}
}
def subscriptionSummary = {
{
for {
user <- currentUser.is
subscription <- user.subscription
plan <- Plan.find(subscription.planId)
} yield {
val planStatus = {
if (! plan.isSpecial && ! subscription.cancelled_?)
".special-plan-assignment" #> ClearNodes andThen
".cancelled-subscription" #> ClearNodes
else if (subscription.cancelled_?)
".subscribed" #> ClearNodes andThen
".special-plan-assignment" #> ClearNodes
else
".subscribed" #> ClearNodes andThen
".cancelled-subscription" #> ClearNodes
}
planStatus andThen
".trial-plan" #> (subscription.trial_? ? PassThru | ClearNodes) andThen
".plan-name *" #> plan.name &
".ending-date *" #> subscription.ends.map { subscriptionEndingDate =>
dateFormatter format subscriptionEndingDate.toDate
} &
".not-subscribed" #> ClearNodes
}
} openOr {
".trial-plan" #> ClearNodes &
".cancelled-subscription" #> ClearNodes &
".provisional-plan" #> ClearNodes &
".special-plan-assignment" #> ClearNodes &
".subscribed" #> ClearNodes
}
}
def planSelection = {
def changeSubscription(user: User, subscription: Option[UserSubscription], newPlanId: ObjectId)() = {
def changeFnForCustomer(customer: stripe.Customer) = {
val subscriptionsMatchingExistingPlan = customer.subscriptions.data.filter(_.plan.id == subscription.flatMap(_.plan).flatMap(_.stripeId).getOrElse(""))
subscriptionsMatchingExistingPlan match {
case existingSubscription :: Nil =>
Full((params: Map[String, _]) => customer.updateSubscription(existingSubscription.id, params))
case Nil =>
Full((params: Map[String, _]) => customer.createSubscription(params))
case _ =>
Failure("Error determining correct action to take on subscriptions.")
}
}
val changeResult: Box[JsCmd] =
if (user.activeCard.isDefined) {
for {
plan <- (Plan.find(newPlanId):Box[Plan]) ?~ "Plan not found."
newPlanStripeId <- (plan.stripeId:Box[String]) ?~ "Plan lacks Stripe ID."
customerId <- (user.stripeCustomerId:Box[String]) ?~ "User lacks Stripe ID."
customer <- tryo(stripe.Customer.retrieve(customerId)) ?~ "Stripe doesn't recognize user."
changeFn <- changeFnForCustomer(customer)
updatedStripeSubscription <- tryo(changeFn(Map(
"plan" -> newPlanStripeId
)))
updatedSubscription = subscription.map(_.copy(
status = "cancelled",
ends = Some(new DateTime())
))
newSubscription = UserSubscription(newPlanId, plan.price, plan.term, status = "active")
} yield {
val allButCurrentSubscription = user.subscriptions.filter { sub =>
sub._id != (updatedSubscription.map(_._id) getOrElse "")
}
val newSubscriptions = (updatedSubscription :: Some(newSubscription) :: Nil).flatten
val updatedUser = user.copy(
subscriptions = allButCurrentSubscription ++ newSubscriptions
)
updatedUser.save
Authentication.authenticationStickyNotices(updatedUser)
if (Props.productionMode)
EmailActor ! SendAdminNotificationEmail(PlanChanged(user.plan._id, plan._id), user.email)
Notices.notice("Your subscription has been successfully changed.")
Reload
}
} else {
Full(NoBillingInformationError)
}
changeResult match {
case Full(jsCmd) =>
jsCmd
case Empty =>
ErrorChargingCard
case Failure(msg, _, _) =>
logger.error("Error updating subscription: " + msg)
GeneralError("Something went wrong while attempting to update your subscription: " + msg)
}
}
def cancelSubscription(user: User, subscription: UserSubscription)() = {
{
for {
customerId <- user.stripeCustomerId
customer <- tryo(stripe.Customer.retrieve(customerId))
existingStripeSubscription <- customer.subscriptions.data.headOption
stripeSubscription <- tryo(customer.cancelSubscription(
existingStripeSubscription.id,
Map(
"at_period_end" -> true
))
)
periodEnd <- stripeSubscription.currentPeriodEnd
updatedSubscription = subscription.copy(
status = "cancelled",
ends = Some(new DateTime(periodEnd * 1000))
)
} yield {
val allButCurrentSubscription = user.subscriptions.filter { sub =>
sub._id != updatedSubscription._id
}
val updatedUser = user.copy(
subscriptions = allButCurrentSubscription ++ (updatedSubscription :: Nil)
)
updatedUser.save
Notices.notice("Your subscription has been cancelled. You will not be billed again.")
Reload
}
} getOrElse {
GeneralError("Error while canceling subscription. Please contact us.")
}
}
{
for {
user <- currentUser.is
if ! user.onSpecialPlan_?
} yield {
val subscription = user.subscription
val currentPlan = user.subscription.flatMap { sub =>
Plan.find(sub.planId)
} getOrElse Plan.DefaultPlan
val cancelClick = {
for {
sub <- subscription
} yield {
cancelSubscription(user, sub) _
}
} getOrElse {
() => Noop
}
val plans = Plan.findAll("$or" -> JArray(List(
("visibleOnRegistration" -> true),
("_id" -> currentPlan._id)
)))
val currentSubscriptionCanceling = subscription.filter(_.plan.isDefined).map(_.cancelled_?).getOrElse(false)
ClearClearable andThen
".plan" #> plans.map { plan =>
".plan-name *" #> plan.registrationTitle &
".plan-details *" #> plan.description &
".select-plan" #> ((plan._id == currentPlan._id || currentSubscriptionCanceling) ? ClearNodes | PassThru) &
".select-plan [onclick]" #> ajaxInvoke(changeSubscription(user, subscription, plan._id) _) &
".cancel-plan" #> ((plan._id == currentPlan._id && ! currentSubscriptionCanceling) ? PassThru | ClearNodes) &
".cancel-plan [onclick]" #> ajaxInvoke(cancelClick)
}
}
} openOr {
ClearNodes
}
}
def billingSummary = {
def submitBillingUpdateToStripe(token: String) = {
val billingUpdateResult = {
for {
user <- currentUser.is ?~! "Could not find user."
customerId <- (user.stripeCustomerId: Box[String]) ?~! "We couldn't find your Stripe ID."
customer <- tryo(stripe.Customer.retrieve(customerId)) ?~! "We couldn't retrieve your customer data."
updatedCustomer <- tryo(customer.update(Map("card" -> token)))
cardId <- (updatedCustomer.defaultCard: Box[String]) ?~! "Could not find active card id after update."
card <- (updatedCustomer.cards.data.find(_.id == cardId): Box[stripe.Card]) ?~! "Could not find card in cards list after update."
} yield {
User.update("_id" -> user._id, "$set" -> (
"activeCard" -> decompose(
UserActiveCard(card.last4, card.`type`, card.expMonth, card.expYear)
)
))
}
}
billingUpdateResult match {
case Empty =>
Notices.error("An internal error occured. Please contact us at [email protected] and let us know.")
Reload
case fail @ Failure(msg, _, _) =>
logger.warn(fail)
Notices.error("An error occured while updating billing information: " + msg)
Reload
case _ =>
Notices.notice("Your billing information has been successfully updated.")
Reload
}
}
{
for {
user <- currentUser.is
} yield {
".no-billing-information" #> (user.activeCard.isDefined ? ClearNodes | PassThru) &
".billing-information-on-file" #> user.activeCard.map { card =>
".card-type *" #> card.cardType &
".last4 *" #> card.last4 &
".expMonth *" #> card.expMonth &
".expYear *" #> card.expYear
} &
".update-billing-information [onclick]" #> ajaxInvoke(() =>
UpdateBillingInformation(SubmitBillingInfoToStripe(submitBillingUpdateToStripe)))
}
} openOr {
".billing-information" #> ClearNodes
}
}
}
| farmdawgnation/anchortab | src/main/scala/com/anchortab/snippet/Subscription.scala | Scala | apache-2.0 | 11,194 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.