code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package de.endrullis.sta
import org.junit.runner.RunWith
import org.specs2._
import org.specs2.runner.JUnitRunner
import org.specs2.specification.BeforeAll
/**
* Tests for a small 'Hello World' example.
*
* @author Stefan Endrullis <[email protected]>
*/
@RunWith(classOf[JUnitRunner])
class HelloWorldSpec extends Specification with BeforeAll with BaseVarIC { def is = s2"""
This 'Hello World' animation should
have 1 frame without last frame $frameCountWithoutLastFrame
have 2 frames with last frame $frameCountWithLastFrame
contain the frame "Hello World" $containFrameHelloWorld
"""
object Ani extends ScalaTikzAni {
add("Hello World" start 0 stay 1)
}
def beforeAll = Ani.main(Array())
def frameCountWithoutLastFrame =
Ani.frames(true).size must_== 2
def frameCountWithLastFrame =
Ani.frames(false).size must_== 1
def containFrameHelloWorld =
Ani.frames(false) must_== List("Hello World")
}
|
xylo/scala-tikz-animations
|
src-test/de/endrullis/sta/HelloWorldSpec.scala
|
Scala
|
apache-2.0
| 954 |
package scifn.gen
import scifn.api.imports
import scifn.func.Fn
import scala.language.experimental.macros
import scala.language.implicitConversions
import scala.reflect.api.Universe
import scala.reflect.macros.blackbox
import scala.reflect.runtime.{currentMirror, universe => ru}
import scala.reflect.{macros => srm}
import scala.tools.reflect.ToolBox
import scala.util.Try
/**
* A base trait for reflection environment-specific traits to be mixed in with [[FeatureGen]]. The
* two environments are runtime reflection and compile-time reflection (macros). This trait provides
* a `universe` variable and two important functions:
*
- `parse`: transforms a String to a corresponding `Tree` associated with the underlying `universe`.
- `freshTermName`: provides a hygienic term name. See: [[http://docs.scala-lang.org/overviews/quasiquotes/hygiene.html Scala Quasiquote Hygiene page]]
*
* @tparam U type of Universe.
*/
sealed trait FeatureGenReflectEnv[U <: Universe] {
/**
* A `scala.reflect.api.Universe`
*/
protected[this] val universe: U
/**
* Transform a String into a Tree associated with `universe`.
* @param code code to turn into a `Tree`.
* @return a `Tree` representing the code.
*/
protected[this] def parse(code: String): universe.Tree
/**
* Create a new hygienic term name.
* This uses the method outlined in:
* [[http://docs.scala-lang.org/overviews/quasiquotes/hygiene.html Scala Quasiquote Hygiene page]]
* @param prefix prefix of the term name to be produced
* @return a new term name prefixed by the designated prefix and with a unique numbered suffix
*/
protected[this] final def freshTermName(prefix: String): universe.TermName =
universe.internal.reificationSupport.freshTermName(prefix)
}
/**
* Provides a mixin to [[FeatureGen]] for runtime reflection. In scala 2.11, this is not only
* threadsafe, but optimized for use with parallel collections. For instance, using an
* implementation such as the ''Identity Fn Producer'', we can write code like:
*
* {{{
* import scifn.func.Fn
* import scifn.gen.impl.ident.RuntimeIdentityFeatureGen
* import scifn.gen.FeatureGen.Implicits.runtimeWeakTypeTag
*
* class X {
* def transform(ss: Seq[String]): Seq[Fn[String, Double] = {
* val fg = RuntimeIdentityFeatureGen[String]
* // Turn into a parallel collection and get automatic speed up.
* ss.par.map(s => fg.compile[Double](s).get)
* }
* }}}
* @tparam A the domain of the functions being produced.
*/
trait RuntimeFeatureGen[A] extends FeatureGenReflectEnv[ru.type] { self: FeatureGen[A, ru.type] =>
override protected[this] final val universe: ru.type = ru
import universe.{Liftable, Tree, WeakTypeTag}
/**
* A toolbox responsible for the `parse` and `compile` methods. This is thread-local
* because it will automatically boost performance when mapping over parallel functors.
*/
private[this] final val toolbox = new ThreadLocal[ToolBox[ru.type]] {
override protected[this] final def initialValue() = currentMirror.mkToolBox()
}
/**
* Transform a String into a Tree associated with `universe`.
* @param code code to turn into a `Tree`.
* @return a `Tree` representing the code.
*/
protected[this] final def parse(code: String): Tree = toolbox.get.parse(code)
/**
* Attempt to compile a `Tree` and transform it into an `Fn[A, B]`.
* @param tree the `Tree` to transform to an `Fn` instance.
* @tparam B the codomain of the function.
* @tparam F the type of function produced. This needs to be supplied because the underlying
* scala compilation function in the reflection library is untyped so we need to have
* a type to which the result is casted.
* @return an Fn instance. This is a function with additional capabilities.
*/
final def compile[B: WeakTypeTag, F <: Fn[A, B]: WeakTypeTag](tree: Tree): Try[F] =
Try { toolbox.get.eval(tree).asInstanceOf[F] }
/**
* Compile a function with a provided default.
* @param desc a description of the function to be synthesized.
* @param default the default value if no value could be returned by the synthesized function.
* @param imports imports to pass to the function being synthesized.
* @param bwtt a weak type tag for the output type of the synthesized function.
* @param lft a `Liftable`, responsible for transforming the default into a `Tree`.
* @tparam B the codomain of the synthesized function
* @return an Fn instance. This is a function with additional capabilities.
*/
final def compileFnWithDefault[B](desc: String, default: B, imports: Vector[String] = Vector.empty)(implicit bwtt: WeakTypeTag[B], lft: Liftable[B]): Try[Fn[A, B]] = {
val d = lft(default)
compile[B, Fn[A, B]](fn(desc, Option(d), imports)(bwtt))
}
/**
* Compile a function without a default.
* @param desc a description of the function to be synthesized.
* @param imports imports to pass to the function being synthesized.
* @param bwtt a weak type tag for the output type of the synthesized function.
* @tparam B the codomain of the synthesized function
* @return an Fn instance. This is a function with additional capabilities.
*/
final def compileFn[B](desc: String, imports: Vector[String] = Vector.empty)(implicit bwtt: WeakTypeTag[B]): Try[Fn[A, B]] =
compile[B, Fn[A, B]](fn(desc, None, imports)(bwtt))
}
/**
* The abstract base class with which implementations of macro-based feature gen are mixed in. For an
* example of how to use this, see code for [[scifn.gen.impl.ident.MacroIdentityFeatureGen]].
*
* @param c a blackbox macro context.
* @tparam U This should be c.universe.type in the instantiation.
*/
abstract class MacroFeatureGen[A, C <: blackbox.Context, U <: srm.Universe](val c: C)(implicit val awtt: U#WeakTypeTag[A])
extends FeatureGenReflectEnv[U] { self: FeatureGen[A, U] with FeatureGenReflectEnv[U] =>
override protected[this] val universe: U = c.universe.asInstanceOf[U]
import universe._
/**
* Transform a String into a Tree associated with `universe`.
* @param code code to turn into a `Tree`.
* @return a `Tree` representing the code.
*/
protected[this] final def parse(code: String): Tree = c.parse(code).asInstanceOf[Tree]
/**
* Get the class that has the macro application. Because the ''macro definitions'' have a
* visibility level of `protected[this]`, the class with the ''macro application'' is expected
* to be either the same class that is mixed in with both [[scifn.api.FnProducer]] and the
* corresponding trait that specifies the ''macro definitions'' or the ''macro application''
* is in a derived class of the class with the ''macro definitions''.
* @return
*/
private[this] def definingClassSymbol = {
import c.universe.Expr
c.prefix match { case Expr(_this) => _this.tpe.typeSymbol }
}
/**
* Get the imports associated with the class containing the ''macro application''. Get the
* imports from the `imports` annotation above the class containing the ''macro application''.
* @return a list of global imports.
*/
private[this] def importsInClassWithMacroApplication: List[String] = {
val impType = typeOf[imports]
definingClassSymbol.annotations.view.map(_.tree).collectFirst {
case a if a.tpe == impType =>
// Drop the first child because it is the annotation declaration.
// We know they are strings because 'imports' only has strings.
a.children.tail.map { case q"${s: String}" => s }
} getOrElse Nil
}
/**
* Get the imports for the current function. This includes both global imports associated
* with the class containing the ''macro application'' as well as any local imports provided
* in the ''macro application''.
* @param localImports expression of a varargs list of local imports.
* @return a Vector of all imports. The Vector created by joining global imports and local
* imports and then calling distinct on the combined list.
*/
private[this] def retrieveImports(localImports: c.Expr[String]*): Vector[String] = {
val localImp = localImports.map { i =>
val q"${s: String}" = i.tree
s
}.toVector
(importsInClassWithMacroApplication ++: localImp).distinct
}
/**
* Macro implementation for syntheisizing a function with no local imports and no default.
* @param desc a function description
* @tparam B the synthesized function codomain
* @return an Expr of a function
*/
def fn[B: WeakTypeTag](desc: c.Expr[String]): c.Expr[Fn[A, B]] = {
val q"${s: String}" = desc.tree
val imp = retrieveImports()
c.Expr[Fn[A, B]](fn[B](s, None, imp).asInstanceOf[c.Tree])
}
/**
* Macro implementation for syntheisizing a function with local imports and a default.
* @param desc a function description
* @param default a default the synthesized function returns when it cannot otherwise return a value.
* @param imports imports to add for just this synthesized function
* @tparam B the synthesized function codomain
* @return an Expr of a function
*/
def fnWithDefaultAndImports[B: WeakTypeTag](desc: c.Expr[String], default: c.Expr[B], imports: c.Expr[String]*): c.Expr[Fn[A, B]] = {
val q"${s: String}" = desc.tree
val imp = retrieveImports(imports:_*)
c.Expr[Fn[A, B]](fn[B](s, Option(default.tree.asInstanceOf[universe.Tree]), imp).asInstanceOf[c.Tree])
}
/**
* Macro implementation for syntheisizing a function with local imports but no default.
* @param desc a function description
* @param imports imports to add for just this synthesized function
* @tparam B the synthesized function codomain
* @return an Expr of a function
*/
def fnWithImports[B: WeakTypeTag](desc: c.Expr[String], imports: c.Expr[String]*): c.Expr[Fn[A, B]] = {
val q"${s: String}" = desc.tree
val imp = retrieveImports(imports:_*)
c.Expr[Fn[A, B]](fn[B](s, None, imp).asInstanceOf[c.Tree])
}
}
/**
* Companion objects containing scifn macros can extend this trait to avoid writing boilerplate.
* Developers creating a new [[scifn.api.FnProducer]] implementation can extend [[MacroCompanion]]
* so that they just have to implement the ''instance'' method.
* @tparam D the domain lower bound.
*/
trait MacroCompanion[D] {
/**
* Top-level ''macro implementation'' that creates a function with a default value and local imports.
* This delegates to `instance`'s `fnWithDefaultAndImports` method.
* @param c a macro context.
* @param desc a description of the function to produce.
* @param default a default value.
* @param imports local imports.
* @tparam A domain of the function being materialized.
* @tparam B codomain of the function being materialized.
* @return an Expr of a function
*/
final def fnWithDefaultAndImports[A <: D: c.WeakTypeTag, B: c.WeakTypeTag](c: blackbox.Context)(desc: c.Expr[String], default: c.Expr[B], imports: c.Expr[String]*): c.Expr[Fn[A, B]] =
instance[A](c).fnWithDefaultAndImports[B](desc, default, imports:_*)
/**
* Top-level ''macro implementation'' that creates a function with function specific imports.
* This delegates to `instance`'s `fnWithImports` method.
* @param c a macro context.
* @param desc a description of the function to produce.
* @param imports local imports.
* @tparam A domain of the function being materialized.
* @tparam B codomain of the function being materialized.
* @return an Expr of a function
*/
final def fnWithImports[A <: D: c.WeakTypeTag, B: c.WeakTypeTag](c: blackbox.Context)(desc: c.Expr[String], imports: c.Expr[String]*): c.Expr[Fn[A, B]] =
instance[A](c).fnWithImports[B](desc, imports:_*)
/**
* Top-level ''macro implementation'' that creates a function.
* This delegates to `instance`'s `fn` method.
* @param c a macro context.
* @param desc a description of the function to produce.
* @tparam A domain of the function being materialized.
* @tparam B codomain of the function being materialized.
* @return an Expr of a function
*/
final def fn[A <: D: c.WeakTypeTag, B: c.WeakTypeTag](c: blackbox.Context)(desc: c.Expr[String]): c.Expr[Fn[A, B]] =
instance[A](c).fn[B](desc)
/**
* Create a macro-based feature generation instance of the appropriate type. If the FeatureGen for
* input type is appropriately modularized into a based trait to be shared across runtime and
* compile time reflection environments, like `MyFeatureGen` below, then ''instance'' can be
* written as so:
*
* {{{
* trait MyBasisCreator[A, U <: Universe] extends BasisCreator[A, U] {
* self: FeatureGen[A, U] with FeatureGenReflectEnv[U] =>
* def basis(basisDesc: String, default: Option[String]): Either[MalformedBasisError, Basis[universe.type]] = {
* // [ implementation here ]
* }
* }
*
* object MyMacroFeatureGen extends MacroCompanion[Any] {
* override protected[this] def instance[A : c.WeakTypeTag](c: blackbox.Context) =
* new MacroFeatureGen[A, c.type, c.universe.type](c)
* with FeatureGen[A, c.universe.type]
* with MyBasisCreator[A, c.universe.type]
* }
* }}}
* @param c macro context
* @param awtt WeakTypeTag to provide type information about the domain of the function to be produced.
* @tparam A The domain of the function to be returned
* @return a macro-based feature generator instance
*/
protected[this] def instance[A <: D](c: blackbox.Context)(implicit awtt: c.WeakTypeTag[A]): MacroFeatureGen[A, c.type, c.universe.type]
}
|
scifn/scifn
|
scifn-gen/src/main/scala-2.11/scifn/gen/reflectionEnvs.scala
|
Scala
|
mit
| 13,665 |
/*
* Copyright 2016 Lightcopy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.internal
import org.apache.spark.internal.config.ConfigEntry
import org.apache.spark.sql.SparkSession
object IndexConf {
import SQLConf.buildConf
val METASTORE_LOCATION = buildConf("spark.sql.index.metastore").
doc("Metastore location or root directory to store index information, will be created " +
"if path does not exist").
stringConf.
createWithDefault("")
val CREATE_IF_NOT_EXISTS = buildConf("spark.sql.index.createIfNotExists").
doc("When set to true, creates index if one does not exist in metastore for the table").
booleanConf.
createWithDefault(false)
val NUM_PARTITIONS = buildConf("spark.sql.index.partitions").
doc("When creating index uses this number of partitions. If value is non-positive or not " +
"provided then uses `sc.defaultParallelism * 3` or `spark.sql.shuffle.partitions` " +
"configuration value, whichever is smaller").
intConf.
createWithDefault(0)
val PARQUET_FILTER_STATISTICS_ENABLED =
buildConf("spark.sql.index.parquet.filter.enabled").
doc("When set to true, writes filter statistics for indexed columns when creating table " +
"index, otherwise only min/max statistics are used. Filter statistics are always used " +
"during filtering stage, if applicable").
booleanConf.
createWithDefault(true)
val PARQUET_FILTER_STATISTICS_TYPE = buildConf("spark.sql.index.parquet.filter.type").
doc("When filter statistics enabled, selects type of statistics to use when creating index. " +
"Available options are `bloom`, `dict`").
stringConf.
createWithDefault("bloom")
val PARQUET_FILTER_STATISTICS_EAGER_LOADING =
buildConf("spark.sql.index.parquet.filter.eagerLoading").
doc("When set to true, read and load all filter statistics in memory the first time catalog " +
"is resolved, otherwise load them lazily as needed when evaluating predicate. " +
"Eager loading removes IO of reading filter data from disk, but requires extra memory").
booleanConf.
createWithDefault(false)
/** Create new configuration from session SQLConf */
def newConf(sparkSession: SparkSession): IndexConf = {
new IndexConf(sparkSession.sessionState.conf)
}
}
class IndexConf private[sql](val sqlConf: SQLConf) {
import IndexConf._
/** ************************ Index Params/Hints ******************* */
def metastoreLocation: String = getConf(METASTORE_LOCATION)
def parquetFilterEnabled: Boolean = getConf(PARQUET_FILTER_STATISTICS_ENABLED)
def parquetFilterType: String = getConf(PARQUET_FILTER_STATISTICS_TYPE)
def parquetFilterEagerLoading: Boolean = getConf(PARQUET_FILTER_STATISTICS_EAGER_LOADING)
def createIfNotExists: Boolean = getConf(CREATE_IF_NOT_EXISTS)
def numPartitions: Int = getConf(NUM_PARTITIONS)
/** ********************** IndexConf functionality methods ************ */
/** Set configuration for underlying SQLConf */
def setConf[T](entry: ConfigEntry[T], value: T): Unit = {
sqlConf.setConf(entry, value)
}
/** Set direct configuration key and value in SQLConf */
def setConfString(key: String, value: String): Unit = {
sqlConf.setConfString(key, value)
}
/** Get configuration from underlying SQLConf */
def getConf[T](entry: ConfigEntry[T]): T = {
sqlConf.getConf(entry)
}
/** Unset configuration from SQLConf */
def unsetConf(entry: ConfigEntry[_]): Unit = {
sqlConf.unsetConf(entry)
}
}
|
lightcopy/parquet-index
|
src/main/scala/org/apache/spark/sql/internal/IndexConf.scala
|
Scala
|
apache-2.0
| 4,074 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eleflow.uberdata.core.data.json
/**
* Created by dirceu on 23/03/16.
*/
case class BlockUpdated(blockManagerExecutorId_ : String,
blockManagerHost: String,
blockManagerPort: Int,
blockIdName: String,
storageLevel: StorageLvl,
memSize: Long,
diskSize: Long)
case class BlockManagerAdded(time: Long,
blockManagerExecutorId: String,
blockManagerHost: String,
blockManagerPort: Int,
maxMem: Long)
case class BlockManagerRemoved(time: Long,
blockManagerExecutorId: String,
blockManagerHost: String,
blockManagerPort: Int)
case class BlockMetrics(blockName: String, executorRunTime: Long)
|
eleflow/uberdata
|
iuberdata_core/src/main/scala/eleflow/uberdata/core/data/json/Block.scala
|
Scala
|
apache-2.0
| 1,548 |
package eventstore.examples
import akka.actor.ActorSystem
import eventstore.{ EventStream, EventNumber, ReadEvent, EsConnection }
import eventstore.tcp.ConnectionActor
class APIsExample {
val system = ActorSystem()
def methodCall() {
val connection = EsConnection(system)
val future = connection future ReadEvent(EventStream.Id("my-stream"), EventNumber.First)
}
def messageSending() {
val connection = system.actorOf(ConnectionActor.props())
connection ! ReadEvent(EventStream.Id("my-stream"), EventNumber.First)
}
}
|
pawelkaczor/EventStore.JVM
|
src/main/scala/eventstore/examples/APIsExample.scala
|
Scala
|
bsd-3-clause
| 547 |
package edu.scaps
object Hello extends App {
println("Hi!")
}
class Hello {
def iu(number: Int): Int = number
}
class Miz {
def mau(times: Int): String = 1.to(times).foldLeft("")((str, _) => str + "mau, ")
def mauNice(times: Int): String = (1 to times).map(_ => "mau").mkString(", ")
}
|
flomerz/scala-ide-scaps-testproject
|
src/main/scala/edu/scaps/Hello.scala
|
Scala
|
mpl-2.0
| 299 |
package com.greencatsoft.angularjs
import scala.scalajs.js
import scala.scalajs.js.Any.fromFunction11
import scala.scalajs.js.UndefOr
import scala.scalajs.js.UndefOr.undefOr2ops
trait Filter[A] extends Factory[js.Function] {
override def apply(): js.Function = (item: A, arg0: UndefOr[Any], arg1: UndefOr[Any], arg2: UndefOr[Any],
arg3: UndefOr[Any], arg4: UndefOr[Any], arg5: UndefOr[Any], arg6: UndefOr[Any], arg7: UndefOr[Any],
arg8: UndefOr[Any], arg9: UndefOr[Any]) =>
filter(item, Seq(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9).map(_.toList).filterNot(_.isEmpty).map(_.head))
def filter(item: A): Any = item
def filter(item: A, args: Seq[Any]): Any = filter(item)
}
@js.native
@injectable("$filter")
trait FilterService extends js.Function1[String, js.Function] {
def apply(name: String): js.Function = js.native
}
|
7thsense/scalajs-angular
|
src/main/scala/com/greencatsoft/angularjs/Filter.scala
|
Scala
|
apache-2.0
| 863 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.{SparkConf, SparkContext}
// $example on$
import org.apache.spark.mllib.classification.{NaiveBayes, NaiveBayesModel}
import org.apache.spark.mllib.util.MLUtils
// $example off$
object NaiveBayesExample {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("NaiveBayesExample")
val sc = new SparkContext(conf)
// $example on$
// Load and parse the data file.
val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
// Split data into training (60%) and test (40%).
val Array(training, test) = data.randomSplit(Array(0.6, 0.4))
val model = NaiveBayes.train(training, lambda = 1.0, modelType = "multinomial")
val predictionAndLabel = test.map(p => (model.predict(p.features), p.label))
val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / test.count()
// Save and load model
model.save(sc, "target/tmp/myNaiveBayesModel")
val sameModel = NaiveBayesModel.load(sc, "target/tmp/myNaiveBayesModel")
// $example off$
}
}
// scalastyle:on println
|
mrchristine/spark-examples-dbc
|
src/main/scala/org/apache/spark/examples/mllib/NaiveBayesExample.scala
|
Scala
|
apache-2.0
| 1,966 |
package app.claim
import utils.WithJsBrowser
import utils.pageobjects.preview.PreviewTestableData
import utils.pageobjects.s_eligibility.GBenefitsPage
import utils.pageobjects._
import app.FunctionalTestCommon
/**
* End-to-End functional tests using input files created by Steve Moody.
* @author Jorge Migueis
* Date: 02/08/2013
*/
class FunctionalTestCase7Spec extends FunctionalTestCommon {
isolated
section("functional", "claim")
"The application" should {
"Successfully run absolute Test Case 7" in new WithJsBrowser with PageObjects {
val page = GBenefitsPage(context)
val claim = TestData.readTestDataFromFile("/functional_scenarios/ClaimScenario_TestCase7.csv")
test(page, claim, buildPreviewUseData)
}
}
section("functional", "claim")
private def buildPreviewUseData = {
PreviewTestableData() +
"AboutYouTitle" + "AboutYouFirstName" + "AboutYouMiddleName" + "AboutYouSurname" +
dateConversion("AboutYouDateOfBirth") +
dateConversion("ClaimDateWhenDoYouWantYourCarersAllowanceClaimtoStart") +
addressConversion("AboutYouAddress") + "AboutYouPostcode" +
"AboutYouNationalityAndResidencyNationality" +
"OtherMoneyOtherAreYouReceivingPensionFromAnotherEEA" +
"AboutYourPartnerTitle" + "AboutYourPartnerFirstName" + "AboutYourPartnerFirstName" + "AboutYourPartnerSurname" +
dateConversion("AboutYourPartnerDateofBirth") +
"OtherMoneyOtherAreYouPayingInsuranceToAnotherEEA" +
"AboutTheCareYouProvideWhatTheirRelationshipToYou" +
"AboutTheCareYouProvideTitlePersonCareFor" + "AboutTheCareYouProvideFirstNamePersonCareFor" + "AboutTheCareYouProvideMiddleNamePersonCareFor" + "AboutTheCareYouProvideSurnamePersonCareFor" +
dateConversion("AboutTheCareYouProvideDateofBirthPersonYouCareFor") +
addressConversion("AboutTheCareYouProvideAddressPersonCareFor") + "AboutTheCareYouProvidePostcodePersonCareFor" +
"AboutTheCareYouProvideDoYouSpend35HoursorMoreEachWeek" +
"AboutTheCareYouProvideOtherCarer" +
"AboutTheCareYouProvideHaveYouHadAnyMoreBreaksInCare_1" +
"EducationHaveYouBeenOnACourseOfEducation" +
"EducationCourseTitle" +
"EducationNameofSchool" +
"EducationNameOfMainTeacherOrTutor" +
"EducationPhoneNumber" +
dateConversion("EducationWhenDidYouStartTheCourse") +
dateConversion("EducationWhenDoYouExpectTheCourseToEnd") +
"EmploymentHaveYouBeenEmployedAtAnyTime_0" +
"EmploymentEmployerName_1" +
"EmploymentHaveYouBeenSelfEmployedAtAnyTime"
}
}
|
Department-for-Work-and-Pensions/ClaimCapture
|
c3/test/app/claim/FunctionalTestCase7Spec.scala
|
Scala
|
mit
| 2,591 |
package ml.wolfe.term
import ml.wolfe.WolfeSpec
/**
* @author riedel
*/
class IntTermSpecs extends WolfeSpec {
import ml.wolfe.term.TermImplicits._
"A int term" should {
"have a singleton domain if its a constant" in {
val i:IntTerm = 1
i.domain.values should be (1 until 2)
}
"have an increased range if constant is added" in {
val i = Ints(0 until 4).Var
val t = i + 2
t.domain.values should be (2 until 6)
}
"have a decreased range if constant is subtracted" in {
val i = Ints(0 until 4).Var
val t = i - 2
t.domain.values should be (-2 until 2)
}
}
"The Ints domain" should {
"allow iteration over its elements" in {
val all = Ints.toIterable
all should be (Range(Int.MinValue,Int.MaxValue))
}
}
}
|
wolfe-pack/wolfe
|
wolfe-core/src/test/scala/ml/wolfe/term/IntTermSpecs.scala
|
Scala
|
apache-2.0
| 813 |
package arena.market.order
abstract class OrderBookRequest
case class NewOrder(timestamp: Long, tradeID: String, symbol: String, qty: Long, isBuy: Boolean, price: Option[Double]) extends OrderBookRequest
case class Cancel(timestamp: Long, order: NewOrder) extends OrderBookRequest
case class Amend(timestamp: Long, order: NewOrder, newPrice: Option[Double], newQty: Option[Long]) extends OrderBookRequest
|
quedexco/arena-scala
|
src/main/scala/arena/market/order/OrderBookRequest.scala
|
Scala
|
apache-2.0
| 408 |
package im.actor.server.group
import java.time.{ LocalDateTime, ZoneOffset }
import akka.actor.Status
import akka.pattern.pipe
import com.google.protobuf.ByteString
import com.trueaccord.scalapb.GeneratedMessage
import im.actor.api.rpc.Update
import im.actor.api.rpc.groups._
import im.actor.api.rpc.messaging.ServiceMessage
import im.actor.api.rpc.users.Sex
import im.actor.server.api.ApiConversions._
import im.actor.server.event.TSEvent
import im.actor.server.file.Avatar
import im.actor.server.group.GroupErrors._
import im.actor.server.office.PushTexts
import im.actor.server.dialog.group.GroupDialogOperations
import im.actor.server.sequence.SeqUpdatesManager._
import im.actor.server.sequence.{ SeqState, SeqStateDate }
import im.actor.server.user.UserOffice
import im.actor.server.util.ACLUtils._
import im.actor.server.util.IdUtils._
import im.actor.server.util.ImageUtils._
import im.actor.server.util.{ ACLUtils, GroupServiceMessages, HistoryUtils }
import im.actor.server.{ models, persist ⇒ p }
import org.joda.time.DateTime
import slick.driver.PostgresDriver.api._
import scala.concurrent.Future
import scala.concurrent.forkjoin.ThreadLocalRandom
private[group] trait GroupCommandHandlers extends GroupsImplicits with GroupCommandHelpers {
this: GroupProcessor ⇒
import GroupCommands._
import GroupEvents._
protected def create(groupId: Int, creatorUserId: Int, creatorAuthId: Long, title: String, randomId: Long, userIds: Set[Int]): Unit = {
val date = new DateTime
val rng = ThreadLocalRandom.current()
val accessHash = rng.nextLong()
val botUserId = nextIntId(rng)
val botToken = accessToken(rng)
val events = Vector(
TSEvent(now(), GroupEvents.Created(groupId, creatorUserId, accessHash, title)),
TSEvent(now(), GroupEvents.BotAdded(botUserId, botToken))
)
userIds.filterNot(_ == creatorUserId) foreach { userId ⇒
val randomId = rng.nextLong()
context.parent ! Invite(groupId, userId, creatorUserId, creatorAuthId, randomId)
}
var stateMaybe: Option[Group] = None
persist[GeneratedMessage](events) {
case TSEvent(ts, evt: GroupEvents.Created) ⇒
val group = initState(ts, evt)
stateMaybe = Some(group)
val serviceMessage = GroupServiceMessages.groupCreated
val update = UpdateGroupInvite(groupId = groupId, inviteUserId = creatorUserId, date = date.getMillis, randomId = randomId)
db.run(
for {
_ ← p.Group.create(
models.Group(
id = groupId,
creatorUserId = group.creatorUserId,
accessHash = group.accessHash,
title = group.title,
isPublic = group.isPublic,
createdAt = group.createdAt,
about = None,
topic = None
),
randomId
)
_ ← p.GroupUser.create(groupId, creatorUserId, creatorUserId, date, None, isAdmin = true)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(creatorUserId),
models.Peer.group(group.id),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
SeqState(seq, state) ← if (isBot(group, creatorUserId)) DBIO.successful(SeqState(0, ByteString.EMPTY))
else DBIO.from(UserOffice.broadcastClientUpdate(creatorUserId, creatorAuthId, update, pushText = None, isFat = true, deliveryId = Some(s"creategroup_${randomId}")))
} yield CreateAck(group.accessHash, seq, state, date.getMillis)
) pipeTo sender() onFailure {
case e ⇒
log.error(e, "Failed to create a group")
}
case evt @ TSEvent(_, GroupEvents.BotAdded(userId, token)) ⇒
stateMaybe = stateMaybe map { state ⇒
val newState = updatedState(evt, state)
context become working(newState)
newState
}
val rng = ThreadLocalRandom.current()
UserOffice.create(userId, nextAccessSalt(rng), "Bot", "US", Sex.Unknown, isBot = true)
.flatMap(_ ⇒ db.run(p.GroupBot.create(groupId, userId, token))) onFailure {
case e ⇒
log.error(e, "Failed to create group bot")
}
}
}
protected def invite(group: Group, userId: Int, inviterUserId: Int, inviterAuthId: Long, randomId: Long, date: DateTime): Future[SeqStateDate] = {
val dateMillis = date.getMillis
val memberIds = group.members.keySet
val inviteeUpdate = UpdateGroupInvite(groupId = groupId, randomId = randomId, inviteUserId = inviterUserId, date = dateMillis)
val userAddedUpdate = UpdateGroupUserInvited(groupId = groupId, userId = userId, inviterUserId = inviterUserId, date = dateMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.userInvited(userId)
for {
_ ← db.run(p.GroupUser.create(groupId, userId, inviterUserId, date, None, isAdmin = false))
_ ← UserOffice.broadcastUserUpdate(userId, inviteeUpdate, pushText = Some(PushTexts.Invited), isFat = true, deliveryId = Some(s"invite_${randomId}"))
// TODO: #perf the following broadcasts do update serializing per each user
_ ← Future.sequence(memberIds.toSeq.filterNot(_ == inviterUserId).map(UserOffice.broadcastUserUpdate(_, userAddedUpdate, Some(PushTexts.Added), isFat = true, deliveryId = Some(s"useradded_${randomId}")))) // use broadcastUsersUpdate maybe?
seqstate ← UserOffice.broadcastClientUpdate(inviterUserId, inviterAuthId, userAddedUpdate, pushText = None, isFat = true, deliveryId = Some(s"useradded_${randomId}"))
// TODO: Move to a History Writing subsystem
_ ← db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(inviterUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
} yield {
SeqStateDate(seqstate.seq, seqstate.state, dateMillis)
}
}
protected def setJoined(group: Group, joiningUserId: Int, joiningUserAuthId: Long, invitingUserId: Int): Unit = {
if (!hasMember(group, joiningUserId) || isInvited(group, joiningUserId)) {
val replyTo = sender()
persist(TSEvent(now(), GroupEvents.UserJoined(joiningUserId, invitingUserId))) { evt ⇒
val newState = workWith(evt, group)
val memberIds = group.members.keySet
val action: DBIO[(SeqStateDate, Vector[Int], Long)] = {
for {
updates ← {
val date = new DateTime
val randomId = ThreadLocalRandom.current().nextLong()
for {
exists ← p.GroupUser.exists(groupId, joiningUserId)
_ ← if (exists) DBIO.successful(()) else p.GroupUser.create(groupId, joiningUserId, invitingUserId, date, Some(LocalDateTime.now(ZoneOffset.UTC)), isAdmin = false)
seqstatedate ← DBIO.from(GroupDialogOperations.sendMessage(groupId, joiningUserId, joiningUserAuthId, randomId, GroupServiceMessages.userJoined, isFat = true))
} yield (seqstatedate, memberIds.toVector :+ invitingUserId, randomId)
}
} yield updates
}
db.run(action) pipeTo replyTo onFailure {
case e ⇒
replyTo ! Status.Failure(e)
}
}
} else {
sender() ! Status.Failure(GroupErrors.UserAlreadyInvited)
}
}
protected def kick(group: Group, kickedUserId: Int, kickerUserId: Int, kickerAuthId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserKicked(kickedUserId, kickerUserId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserKick(groupId, kickedUserId, kickerUserId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userKicked(kickedUserId)
db.run(removeUser(kickedUserId, group.members.keySet, kickerAuthId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def leave(group: Group, userId: Int, authId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserLeft(userId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserLeave(groupId, userId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userLeft(userId)
db.run(removeUser(userId, group.members.keySet, authId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def updateAvatar(group: Group, clientUserId: Int, clientAuthId: Long, avatarOpt: Option[Avatar], randomId: Long): Unit = {
persistStashingReply(TSEvent(now(), AvatarUpdated(avatarOpt)), group) { evt ⇒
val date = new DateTime
val avatarData = avatarOpt map (getAvatarData(models.AvatarData.OfGroup, groupId, _)) getOrElse models.AvatarData.empty(models.AvatarData.OfGroup, groupId.toLong)
val update = UpdateGroupAvatarChanged(groupId, clientUserId, avatarOpt, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.changedAvatar(avatarOpt)
val memberIds = group.members.keySet
db.run(for {
_ ← p.AvatarData.createOrUpdate(avatarData)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, None, isFat = false)
} yield {
db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
UpdateAvatarAck(avatarOpt, SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
})
}
}
protected def makePublic(group: Group, description: String): Unit = {
persistStashingReply(Vector(TSEvent(now(), BecamePublic()), TSEvent(now(), AboutUpdated(Some(description)))), group) { _ ⇒
db.run(DBIO.sequence(Seq(
p.Group.makePublic(groupId),
p.Group.updateAbout(groupId, Some(description))
))) map (_ ⇒ MakePublicAck())
}
}
protected def updateTitle(group: Group, clientUserId: Int, clientAuthId: Long, title: String, randomId: Long): Unit = {
val memberIds = group.members.keySet
persistStashingReply(TSEvent(now(), TitleUpdated(title)), group) { _ ⇒
val date = new DateTime
val update = UpdateGroupTitleChanged(groupId = groupId, userId = clientUserId, title = title, date = date.getMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.changedTitle(title)
db.run(for {
_ ← p.Group.updateTitle(groupId, title, clientUserId, randomId, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, Some(PushTexts.TitleChanged), isFat = false)
} yield SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
}
}
protected def updateTopic(group: Group, clientUserId: Int, clientAuthId: Long, topic: Option[String], randomId: Long): Unit = {
withGroupMember(group, clientUserId) { member ⇒
val trimmed = topic.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), TopicUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val serviceMessage = GroupServiceMessages.changedTopic(trimmed)
val update = UpdateGroupTopicChanged(groupId = groupId, randomId = randomId, userId = clientUserId, topic = trimmed, date = dateMillis)
db.run(for {
_ ← p.Group.updateTopic(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.TopicChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(TopicTooLong)
}
}
}
protected def updateAbout(group: Group, clientUserId: Int, clientAuthId: Long, about: Option[String], randomId: Long): Unit = {
withGroupAdmin(group, clientUserId) {
val trimmed = about.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), AboutUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val update = UpdateGroupAboutChanged(groupId, trimmed)
val serviceMessage = GroupServiceMessages.changedAbout(trimmed)
db.run(for {
_ ← p.Group.updateAbout(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.AboutChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(AboutTooLong)
}
}
}
protected def makeUserAdmin(group: Group, clientUserId: Int, clientAuthId: Long, candidateId: Int): Unit = {
withGroupAdmin(group, clientUserId) {
withGroupMember(group, candidateId) { member ⇒
persistStashingReply(TSEvent(now(), UserBecameAdmin(candidateId, clientUserId)), group) { e ⇒
val date = e.ts
if (!member.isAdmin) {
//we have current state, that does not updated by UserBecameAdmin event. That's why we update it manually
val updated = group.members.updated(candidateId, group.members(candidateId).copy(isAdmin = true))
val members = updated.values.map(_.asStruct).toVector
db.run(for {
_ ← p.GroupUser.makeAdmin(groupId, candidateId)
(seqState, _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = UpdateGroupMembersUpdate(groupId, members),
pushText = None,
isFat = false
)
} yield (members, seqState))
} else {
Future.failed(UserAlreadyAdmin)
}
}
}
}
}
protected def revokeIntegrationToken(group: Group, userId: Int): Unit = {
withGroupAdmin(group, userId) {
val newToken = ACLUtils.accessToken(ThreadLocalRandom.current())
persistStashingReply(TSEvent(now(), IntegrationTokenRevoked(newToken)), group) { _ ⇒
db.run(for {
_ ← p.GroupBot.updateToken(groupId, newToken)
} yield RevokeIntegrationTokenAck(newToken))
}
}
}
private def removeUser(userId: Int, memberIds: Set[Int], clientAuthId: Long, serviceMessage: ServiceMessage, update: Update, date: DateTime, randomId: Long): DBIO[SeqStateDate] = {
val groupPeer = models.Peer.group(groupId)
for {
_ ← p.GroupUser.delete(groupId, userId)
_ ← p.GroupInviteToken.revoke(groupId, userId)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(userId, clientAuthId, memberIds - userId, update, Some(PushTexts.Left), isFat = false)
// TODO: Move to a History Writing subsystem
_ ← p.Dialog.updateLastReadAt(userId, groupPeer, date)
_ ← p.Dialog.updateOwnerLastReadAt(userId, groupPeer, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(userId),
groupPeer,
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
} yield SeqStateDate(seq, state, date.getMillis)
}
}
|
stonegithubs/actor-platform
|
actor-server/actor-core/src/main/scala/im/actor/server/group/GroupCommandHandlers.scala
|
Scala
|
mit
| 17,062 |
/*
* Copyright 2010 Michael Fortin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this
* file except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed
* under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific
* language governing permissions and limitations under the License.
*/
package org.brzy.webapp.persistence
import java.lang.String
import org.brzy.validator.Validator
import scala.language.implicitConversions
class MockPersistable[E<:{def id:PK},PK] extends Dao[E,PK] {
def findBy(id: PK)(implicit pk: Manifest[PK], t: Manifest[E]) = null.asInstanceOf[E]
def getBy(id: PK)(implicit pk: Manifest[PK], t: Manifest[E]) = None
def getOrElse(id: PK, alternate: E)(implicit pk: Manifest[PK], t: Manifest[E]) = alternate
def load(id: String)(implicit pk: Manifest[PK], t: Manifest[E]) = null.asInstanceOf[E]
def list(size: Int, offset: Int, sort: String, order: String)(implicit t: Manifest[E]) = List.empty[E]
def count(implicit t: Manifest[E]) = 1
implicit def applyCrudOps(t: E)(implicit m: Manifest[E]) = new MockCrudOps(t)
class MockCrudOps(t:E) extends PersistentCrudOps(t) {
def validate = Validator(t).violations
def insert(commit:Boolean = false) = t
def commit() {}
def update(commit:Boolean = false) = t
def delete() {}
}
}
|
m410/brzy
|
src/test/scala/org/brzy/webapp/persistence/MockPersistable.scala
|
Scala
|
apache-2.0
| 1,612 |
package org.jetbrains.plugins.scala.debugger.breakpoints
import com.intellij.debugger.engine.{DebugProcessImpl, JavaBreakpointHandler, JavaBreakpointHandlerFactory}
/**
* @author Nikolay.Tropin
*/
class ScalaBreakpointHandlerFactory extends JavaBreakpointHandlerFactory {
override def createHandler(process: DebugProcessImpl): JavaBreakpointHandler = new ScalaBreakpointHandler(process)
}
class ScalaBreakpointHandler(process: DebugProcessImpl) extends JavaBreakpointHandler(classOf[ScalaLineBreakpointType], process)
|
LPTK/intellij-scala
|
src/org/jetbrains/plugins/scala/debugger/breakpoints/ScalaBreakpointHandlerFactory.scala
|
Scala
|
apache-2.0
| 524 |
package scalaz.contrib
package geo
sealed trait Elevation {
val value: Double
}
trait Elevations {
def elevation(d: Double) = new Elevation {
val value = d.abs
}
}
object Elevation {
implicit val elevationInstances = new scalaz.Show[Elevation] with scalaz.Order[Elevation] {
override def shows(e: Elevation) = e.value + "m"
def order(e1: Elevation, e2: Elevation) = scalaz.Order[Double].order(e1.value, e2.value)
}
}
|
drstevens/scalaz-geo
|
src/main/scala/scalaz/geo/Elevation.scala
|
Scala
|
bsd-3-clause
| 445 |
package com.twitter.inject.app.internal
import com.google.inject.Key
import com.twitter.finatra.annotations.FlagImpl
import com.twitter.inject.{TwitterModule, Logging}
object FlagsModule {
def create(flags: Seq[com.twitter.app.Flag[_]]) = {
val flagsMap = (for (flag <- flags) yield {
flag.name -> flag()
}).toMap
new FlagsModule(flagsMap)
}
}
//TODO: Use type information in Flag instead of hardcoding java.lang.String
class FlagsModule(
flags: Map[String, Any])
extends TwitterModule
with Logging {
override def configure() {
for ((flagName, value) <- flags) {
debug("Binding flag: " + flagName + " = " + value)
val key = Key.get(classOf[java.lang.String], new FlagImpl(flagName))
binder.bind(key).toInstance(value.toString)
}
}
}
|
kaushik94/finatra
|
inject/inject-app/src/main/scala/com/twitter/inject/app/internal/FlagsModule.scala
|
Scala
|
apache-2.0
| 796 |
package org.jetbrains.plugins.scala
package codeInspection.typeChecking
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.collections.MethodRepr
import org.jetbrains.plugins.scala.codeInspection.typeChecking.ComparingUnrelatedTypesInspection._
import org.jetbrains.plugins.scala.codeInspection.{AbstractInspection, InspectionBundle}
import org.jetbrains.plugins.scala.extensions.{PsiClassExt, ResolvesTo}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.Success
/**
* Nikolay.Tropin
* 5/30/13
*/
object ComparingUnrelatedTypesInspection {
val inspectionName = InspectionBundle.message("comparing.unrelated.types.name")
val inspectionId = "ComparingUnrelatedTypes"
private val seqFunctions = Seq("contains", "indexOf", "lastIndexOf")
def cannotBeCompared(type1: ScType, type2: ScType): Boolean = {
val types = Seq(type1, type2).map(tryExtractSingletonType)
val Seq(unboxed1, unboxed2) =
if (types.contains(Null)) types else types.map(StdType.unboxedType)
if (isNumericType(unboxed1) && isNumericType(unboxed2)) return false
ComparingUtil.isNeverSubType(unboxed1, unboxed2) && ComparingUtil.isNeverSubType(unboxed2, unboxed1)
}
def isNumericType(tp: ScType) = {
tp match {
case Byte | Char | Short | Int | Long | Float | Double => true
case ScDesignatorType(c: ScClass) => c.supers.headOption.map(_.qualifiedName).contains("scala.math.ScalaNumber")
case _ => false
}
}
private def tryExtractSingletonType(tp: ScType): ScType = ScType.extractDesignatorSingletonType(tp).getOrElse(tp)
}
class ComparingUnrelatedTypesInspection extends AbstractInspection(inspectionId, inspectionName){
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case MethodRepr(expr, Some(left), Some(oper), Seq(right)) if Seq("==", "!=", "ne", "eq", "equals") contains oper.refName =>
//getType() for the reference on the left side returns singleton type, little hack here
val leftOnTheRight = ScalaPsiElementFactory.createExpressionWithContextFromText(left.getText, right.getParent, right)
Seq(leftOnTheRight, right) map (_.getType()) match {
case Seq(Success(leftType, _), Success(rightType, _)) if cannotBeCompared(leftType, rightType) =>
holder.registerProblem(expr, inspectionName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
case _ =>
}
case MethodRepr(_, Some(baseExpr), Some(ResolvesTo(fun: ScFunction)), Seq(arg, _*)) if mayNeedHighlighting(fun) =>
for {
ScParameterizedType(_, Seq(elemType)) <- baseExpr.getType().map(tryExtractSingletonType)
argType <- arg.getType()
if cannotBeCompared(elemType, argType)
} {
val (elemTypeText, argTypeText) = ScTypePresentation.different(elemType, argType)
val message = InspectionBundle.message("comparing.unrelated.types.hint", elemTypeText, argTypeText)
holder.registerProblem(arg, message, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
case IsInstanceOfCall(call) =>
val qualType = call.referencedExpr match {
case ScReferenceExpression.withQualifier(q) => q.getType().toOption
case _ => None
}
val argType = call.arguments.headOption.flatMap(_.getType().toOption)
for {
t1 <- qualType
t2 <- argType
if cannotBeCompared(t1, t2)
} {
holder.registerProblem(call, inspectionName, ProblemHighlightType.GENERIC_ERROR_OR_WARNING)
}
}
private def mayNeedHighlighting(fun: ScFunction): Boolean = {
if (!seqFunctions.contains(fun.name)) return false
val className = fun.containingClass.qualifiedName
className.startsWith("scala.collection") && className.contains("Seq") && seqFunctions.contains(fun.name) ||
Seq("scala.Option", "scala.Some").contains(className) && fun.name == "contains"
}
}
|
LPTK/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/typeChecking/ComparingUnrelatedTypesInspection.scala
|
Scala
|
apache-2.0
| 4,312 |
package edu.gemini.catalog.ui.image
import java.beans.{PropertyChangeEvent, PropertyChangeListener}
import java.nio.file.Path
import java.time.Instant
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}
import java.util.logging.{Level, Logger}
import edu.gemini.catalog.image._
import edu.gemini.shared.util.immutable.ScalaConverters._
import edu.gemini.pot.sp._
import edu.gemini.spModel.core.{Angle, Coordinates, Site, Wavelength}
import edu.gemini.spModel.obs.ObservationStatus
import edu.gemini.spModel.rich.pot.sp._
import jsky.app.ot.tpe.{ImageCatalogPanel, TpeContext, TpeImageWidget, TpeManager}
import jsky.app.ot.userprefs.observer.ObserverPreferences
import jsky.image.gui.ImageLoadingException
import scalaz._
import Scalaz._
import scala.swing.Swing
import scalaz.concurrent.Task
/**
* Describes a requested image for an observation and wavelength
*/
case class TargetImageRequest(key: SPNodeKey, coordinates: Coordinates, obsWavelength: Option[Wavelength], site: Option[Site])
object TargetImageRequest {
/** @group Typeclass Instances */
implicit val equal: Equal[TargetImageRequest] = Equal.equalA[TargetImageRequest]
}
/**
* This interface can be used to listen when the image is being loaded and update the UI
*/
case class ImageLoadingListener[A](downloadStarts: Task[A], downloadCompletes: Task[A], downloadError: Task[A])
/**
* Listens for program changes and download images as required. It listens
* for program changes and requests downloading images if they are not already
* presents.
* It also updates the UI as needed
*/
object BackgroundImageLoader {
val Log: Logger = Logger.getLogger(this.getClass.getName)
private val taskUnit = Task.now(())
private def imageDownloadsThreadFactory(priority: Int) = new ThreadFactory {
private val threadNumber: AtomicInteger = new AtomicInteger(1)
private val defaultThreadFactory = Executors.defaultThreadFactory()
override def newThread(r: Runnable): Thread = {
val name = s"Background Image Downloads - ${threadNumber.getAndIncrement()} - priority: $priority"
defaultThreadFactory.newThread(r) <| {_.setDaemon(true)} <| {_.setName(name)} <| {_.setPriority(priority)}
}
}
def newExecutor(priority: Int): ExecutorService =
Executors.newFixedThreadPool(ImageCatalog.all.length, imageDownloadsThreadFactory(priority))
/**
* Execution context for lower priority downloads
*/
private val lowPriorityEC = newExecutor(Thread.MIN_PRIORITY)
/**
* Regular execution context for higher priority tasks, i.e. UI requests
*/
private val highPriorityEC = newExecutor(Thread.NORM_PRIORITY)
private def logError[A](x: Throwable \\/ A): Unit = x match {
case -\\/(e) => Log.log(Level.SEVERE, e.getMessage, e)
case \\/-(_) => // Ignore, successful case
}
/** Called when a program is created to download its images */
def watch(prog: ISPProgram): Unit = {
// At startup only load images for active programs
def needsImage(ctx: TpeContext): Boolean =
ctx.obsShell.exists(ObservationStatus.computeFor(_).isActive)
// Listen for future changes
prog.addCompositeChangeListener(ChangeListener)
val targets = for {
p <- prog.allObservations
tpeCtx = TpeContext(p)
if needsImage(tpeCtx)
i <- requestedImage(tpeCtx)
} yield i
// remove duplicates and request images
val tasks = targets.distinct.map(requestImageDownload(lowPriorityEC))
// Run as low priority
runAsync(tasks)(logError)(lowPriorityEC)
}
/** Called when a program is removed to clear the cache */
def unwatch(prog: ISPProgram): Unit = {
prog.removeCompositeChangeListener(ChangeListener)
}
/**
* Display an image if available on disk or request the download if necessary
*/
def loadImageOnTheTpe(tpe: TpeContext): Unit = {
val task = requestedImage(tpe).fold(taskUnit)(requestImageDownload(highPriorityEC))
// This method called on an explicit user interaction so we'd rather
// Request the execution in a higher priority thread
// Execute and set the image on the tpe
runAsync(task)(logError)(highPriorityEC)
}
// Watches for changes to existing observations, runs BAGS on them when updated.
object ChangeListener extends PropertyChangeListener {
override def propertyChange(evt: PropertyChangeEvent): Unit =
evt.getSource match {
case node: ISPNode => Option(node.getContextObservation).foreach { o =>
val task = requestedImage(TpeContext(o)).fold(taskUnit)(requestImageDownload(highPriorityEC))
// Run it in the background as it is lower priority than GUI
runAsync(task)(logError)(lowPriorityEC)
}
}
}
/**
* Creates a task to load an image and set it on the tpe
*/
private[image] def requestImageDownload(pool: ExecutorService)(t: TargetImageRequest): Task[Unit] =
for {
catalog <- ObservationCatalogOverrides.catalogFor(t.key, t.obsWavelength)
image <- loadImage(ImageSearchQuery(catalog, t.coordinates, catalog.imageSize, t.site), ImageCatalogPanel.resetListener)(pool)
_ <- image.fold(taskUnit)(e => Task.delay(updateTpeImage(e)))
} yield ()
/**
* Load an image for the given query
* It will check if the image is in the cache or in progress before requesting a download
* It updates the listener as needed to update the UI
*/
private def loadImage(query: ImageSearchQuery, listener: ImageLoadingListener[Unit])(pool: ExecutorService): Task[Option[ImageInFile]] = {
def readImageToFile(dir: Path): NonEmptyList[Task[ImageInFile]] =
query.url.map(ImageCatalogClient.downloadImageToFile(dir, _, query))
def downloadImage(prefs: ImageCatalogPreferences): Task[ImageInFile] = {
val task = for {
_ <- KnownImagesSets.start(query) *> listener.downloadStarts
f <- TaskHelper.selectFirstToComplete(readImageToFile(prefs.cacheDir))(pool)
_ <- StoredImagesCache.add(f) *> ImageCacheOnDisk.pruneCache(prefs.imageCacheSize) // Add to cache and prune. Cache pruning goes in a different thread
} yield f
// Remove query from registry and inform listeners at the end
task.onFinish {
case Some(_) => KnownImagesSets.failed(query) *> listener.downloadError
case _ => KnownImagesSets.completed(query) *> listener.downloadCompletes
}
}
def checkIfNeededAndDownload(prefs: ImageCatalogPreferences): Task[Option[ImageInFile]] =
KnownImagesSets.inProgress(query).ifM(Task.now(none), downloadImage(prefs).map(Some.apply))
// Try to find the image on the cache, else download
for {
prefs <- ImageCatalogPreferences.preferences()
inCache <- StoredImagesCache.find(query)
exists <- Task.delay(inCache.filter(_.file.toFile.exists()))
file <- exists.fold(checkIfNeededAndDownload(prefs))(f => Task.now(f.some))
} yield file
}
/**
* Extracts the data to request an image from the current context
*/
private def requestedImage(tpe: TpeContext): Option[TargetImageRequest] =
for {
ctx <- tpe.obsContext
base <- tpe.targets.base
when = ctx.getSchedulingBlockStart.asScalaOpt | Instant.now.toEpochMilli
coords <- base.getTarget.coords(when)
key <- tpe.obsKey
site = Option(ObserverPreferences.fetch.observingSite())
} yield TargetImageRequest(key, coords, ObsWavelengthExtractor.extractObsWavelength(tpe), site)
/**
* Utility methods to run the tasks on separate threads of the pool
*/
private def runAsync[A](tasks: List[Task[A]])(f: Throwable \\/ List[A] => Unit)(pool: ExecutorService) =
Task.gatherUnordered(tasks.map(t => Task.fork(t)(pool))).unsafePerformAsync(f)
private def runAsync[A](task: Task[A])(f: Throwable \\/ A => Unit)(pool: ExecutorService) =
Task.fork(task).unsafePerformAsync(f)
/**
* Attempts to set the image on the tpe, note that this is called from a separate
* thread, typically after an image download so we need to go to Swing for updating the UI
*
* Since an image download may take a while the tpe may have moved.
* We'll only update the position if the coordinates and catalog match
*
*/
private def updateTpeImage(entry: ImageInFile): Unit = {
def updateCacheAndDisplay(iw: TpeImageWidget): Task[Unit] = StoredImagesCache.markAsUsed(entry) *> Task.delay(iw.setFilename(entry.file.toAbsolutePath.toString, false))
Swing.onEDT {
// Run inside the EDT, we need to catch possible exceptions when setting the file on the UI
for {
tpe <- Option(TpeManager.get())
iw <- Option(tpe.getImageWidget)
ctx = iw.getContext
request <- requestedImage(ctx)
// TODO This check seems to be failing in a few spots on the sky, it may need to be loosened
if entry.contains(request.coordinates, Angle.zero) // The TPE may have moved so only display if the coordinates match
if ImageCatalogPanel.isCatalogSelected(entry.query.catalog) // Only set the image if the catalog matches
} {
val task = KnownImagesSets.inProgress(entry.query).ifM(taskUnit, updateCacheAndDisplay(iw))
// Function to capture an exception and request a new download
val reDownload: PartialFunction[Throwable, Task[Unit]] = {
case _: ImageLoadingException =>
// This happens typically if the image is corrupted
// Let's try to re-download
Task.delay(entry.file.toFile.delete).ifM(Task.delay(loadImageOnTheTpe(ctx)), taskUnit)
}
// We don't really care about the result but want to intercept
// file errors to redownload the image
task.handleWith(reDownload).unsafePerformSync
}
}
}
}
|
spakzad/ocs
|
bundle/jsky.app.ot/src/main/scala/edu/gemini/catalog/ui/image/BackgroundImageLoader.scala
|
Scala
|
bsd-3-clause
| 9,902 |
package jp.ac.nagoya_u.dsmoq.sdk.response
case class StatisticsDetail(
private val dataset_amount: Long,
private val real_size: Long,
private val local_size: Long,
private val s3_size: Long,
private val total_size: Long
) {
def getDatasetAmount = dataset_amount
def getRealSize = real_size
def getLocalSize = local_size
def getS3Size = s3_size
def getTotalSize = total_size
}
|
nkawa/dsmoq
|
sdk/src/main/java/jp/ac/nagoya_u/dsmoq/sdk/response/Statistics.scala
|
Scala
|
apache-2.0
| 397 |
/*
* NumericOptionSpinnerModel.scala
* (LucreSwing)
*
* Copyright (c) 2014-2021 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.lucre.swing.impl
import java.io.Serializable
import javax.swing.AbstractSpinnerModel
class NumericOptionSpinnerModel[A](value0: Option[A], minimum0: Option[A], maximum0: Option[A], stepSize0: A)
(implicit num: Numeric[A])
extends AbstractSpinnerModel with Serializable {
private var _value = value0
private var _minimum = minimum0
private var _maximum = maximum0
private var _stepSize = stepSize0
def value: Option[A] = _value
def value_=(v: Option[A]): Unit = if (_value != v) {
_value = v
fireStateChanged()
}
def minimum: Option[A] = _minimum
def minimum_=(value: Option[A]): Unit = if (_minimum != value) {
_minimum = value
fireStateChanged()
}
def maximum: Option[A] = _maximum
def maximum_=(value: Option[A]): Unit = if (_maximum != value) {
_maximum = value
fireStateChanged()
}
def stepSize: A = _stepSize
def stepSize_=(value: A): Unit = if (_stepSize != value) {
_stepSize = value
fireStateChanged()
}
// dir == `true` means increase, dir == `false` means decrease
// if value is None or will exceeds bounds, returns None else Some
private def incrValue(dir: Boolean): Option[A] = _value.flatMap { v =>
val newValue = num.plus(v, if (dir) _stepSize else num.negate(_stepSize))
val tooLarge = maximum.exists(m => num.compare(newValue, m) > 0)
val tooSmall = minimum.exists(m => num.compare(newValue, m) < 0)
if (tooLarge || tooSmall) None else Some(newValue)
}
/** Returns the next number in the sequence.
*
* @return <code>value + stepSize</code> or <code>null</code> if the sum
* exceeds <code>maximum</code>.
*/
def getNextValue: AnyRef = {
val res = incrValue(dir = true)
if (res.isDefined) res else null
}
/** Returns the previous number in the sequence.
*
* @return <code>value - stepSize</code>, or
* <code>null</code> if the sum is less
* than <code>minimum</code>.
*/
def getPreviousValue: AnyRef = {
val res = incrValue(dir = false)
if (res.isDefined) res else null
}
def getValue: AnyRef = _value
/** Sets the current value for this sequence. If <code>value</code> is
* <code>null</code>, or not an <code>Option</code>, an
* <code>IllegalArgumentException</code> is thrown. No
* bounds checking is done here.
*
* This method fires a <code>ChangeEvent</code> if the value has changed.
*
* @param v the current (non <code>null</code>) <code>Option</code>
* for this sequence
* @throws IllegalArgumentException if <code>value</code> is
* <code>null</code> or not a <code>Option</code>
*/
def setValue(v: AnyRef): Unit = {
if ((v == null) || !v.isInstanceOf[Option[_]]) {
val s1 = if (v == null) "" else s" (${v.getClass.getSimpleName})"
throw new IllegalArgumentException(s"Illegal value $v$s1")
}
value = v.asInstanceOf[Option[A]]
}
}
|
Sciss/LucreSwing
|
jvm/src/main/scala/de/sciss/lucre/swing/impl/NumericOptionSpinnerModel.scala
|
Scala
|
agpl-3.0
| 3,331 |
import sbt._
import sbt.Keys._
import com.typesafe.sbteclipse.plugin.EclipsePlugin._
object SynergyBuild extends Build {
lazy val synergy = Project(
id = "synergy",
base = file("."),
settings = Project.defaultSettings ++ Seq(
name := "synergy",
organization := "net.caoticode.synergy",
version := "0.1-SNAPSHOT",
scalaVersion := "2.10.3",
EclipseKeys.createSrc := EclipseCreateSrc.Default + EclipseCreateSrc.Resource,
// dependencies
libraryDependencies += "org.scalatest" % "scalatest_2.10" % "1.9.1" % "test",
libraryDependencies += "com.typesafe.akka" %% "akka-actor" % "2.2.1",
libraryDependencies += "com.typesafe.akka" %% "akka-remote" % "2.2.1",
libraryDependencies += "com.typesafe.akka" %% "akka-testkit" % "2.2.1"
)
)
}
|
mdread/synergy
|
project/SynergyBuild.scala
|
Scala
|
mit
| 853 |
package test
import org.scalatest.FunSuite
import scala.offheap._
@data class L1(x: Byte, y: Byte)
@data class L2(x: Byte, y: Short)
@data class L3(x: Byte, y: Int)
@data class L4(x: Byte, y: Long)
@data class L5(x: Int, y: Long)
@data class L6(x: Byte, @embed emb: L5)
@data class L7(x: Int, y: Short)
@data class L8(x: Byte, @embed emb: L7, y: Long)
@data class L9
class LayoutSuite extends FunSuite {
implicit val alloc = malloc
test("L1.x offset") { assert(offsetOf[L1]("x") == 0) }
test("L2.x offset") { assert(offsetOf[L2]("x") == 0) }
test("L3.x offset") { assert(offsetOf[L3]("x") == 0) }
test("L4.x offset") { assert(offsetOf[L4]("x") == 0) }
test("L5.x offset") { assert(offsetOf[L5]("x") == 0) }
test("L6.x offset") { assert(offsetOf[L6]("x") == 0) }
test("L7.x offset") { assert(offsetOf[L7]("x") == 0) }
test("L8.x offset") { assert(offsetOf[L8]("x") == 0) }
test("L1.y offset") { assert(offsetOf[L1]("y") == 1) }
test("L2.y offset") { assert(offsetOf[L2]("y") == 2) }
test("L3.y offset") { assert(offsetOf[L3]("y") == 4) }
test("L4.y offset") { assert(offsetOf[L4]("y") == 8) }
test("L5.y offset") { assert(offsetOf[L5]("y") == 8) }
test("L7.y offset") { assert(offsetOf[L7]("y") == 4) }
test("L8.y offset") { assert(offsetOf[L8]("y") == 16) }
test("L6.emb offset") { assert(offsetOf[L6]("emb") == 8) }
test("L8.emb offset") { assert(offsetOf[L8]("emb") == 4) }
test("sizeOfEmbed[L1]") { assert(sizeOfEmbed[L1] == 2 ) }
test("sizeOfEmbed[L2]") { assert(sizeOfEmbed[L2] == 4 ) }
test("sizeOfEmbed[L3]") { assert(sizeOfEmbed[L3] == 8 ) }
test("sizeOfEmbed[L4]") { assert(sizeOfEmbed[L4] == 16) }
test("sizeOfEmbed[L5]") { assert(sizeOfEmbed[L5] == 16) }
test("sizeOfEmbed[L6]") { assert(sizeOfEmbed[L6] == 24) }
test("sizeOfEmbed[L7]") { assert(sizeOfEmbed[L7] == 6) }
test("sizeOfEmbed[L8]") { assert(sizeOfEmbed[L8] == 24) }
test("sizeOfEmbed[L9]") { assert(sizeOfEmbed[L9] == 1) }
test("alignmentOfEmbed[L1]") { assert(alignmentOfEmbed[L1] == 1) }
test("alignmentOfEmbed[L2]") { assert(alignmentOfEmbed[L2] == 2) }
test("alignmentOfEmbed[L3]") { assert(alignmentOfEmbed[L3] == 4) }
test("alignmentOfEmbed[L4]") { assert(alignmentOfEmbed[L4] == 8) }
test("alignmentOfEmbed[L5]") { assert(alignmentOfEmbed[L5] == 8) }
test("alignmentOfEmbed[L6]") { assert(alignmentOfEmbed[L6] == 8) }
test("alignmentOfEmbed[L7]") { assert(alignmentOfEmbed[L7] == 4) }
test("alignmentOfEmbed[L8]") { assert(alignmentOfEmbed[L8] == 8) }
test("alignmentOfEmbed[L9]") { assert(alignmentOfEmbed[L9] == 1) }
}
|
adamwy/scala-offheap
|
tests/src/test/scala/LayoutSuite.scala
|
Scala
|
bsd-3-clause
| 2,583 |
package scalacookbook.chapter10
/**
* Created by liguodong on 2016/7/30.
*/
object CreateLazyViewOnCollection extends App{
println(1 to 5)
println((1 to 5).view)
val view = (1 to 5).view
println(view)
val x = view.force
println(x)
println("----------------")
//通过几种方式去看向集合中增加view的影响
//using a method like foreach doesn’t seem to change when using a view
(1 to 5).foreach(print)
println("\\n----------------")
(1 to 5).view.foreach(print)
println("\\n----------------")
//calling a map method with and without a view
//has dramatically different results
//不同的结果
println((1 to 5).map { _ * 2 })
println((1 to 5).view.map { _ * 2 })
println("----------------")
//If you run that code as shown, it will return immediately,
// returning a SeqView as before.
//But if you remove the view method call,
// the code block will take about 5 seconds to run.
val x2 = (1 to 5).view.map { e =>
Thread.sleep(1000)
e * 2
}
println(x2)
val x3 = (1 to 5).map { e =>
Thread.sleep(1000)
e * 2
}
println(x3)
println(x2.force) //调用的时候仍然会等待的
println("------------------")
//Discussion
val l = List(1,2,3)
println(l.reverse)
println(l.view.reverse)
val x4 = (1 to 5).view.map { e =>
Thread.sleep(10)
e * 2
}
x4.foreach(print)
println("\\n-----------")
//Use cases
// create a normal array
val arr = (1 to 10).toArray
// create a view on the array
val view2 = arr.view.slice(2, 5)
println(view2.toList)
// modify the array
arr(2) = 42
println(view2.toList)
//修改原数组,视图也会受到影响。
// the view is affected:
view2.foreach(println)
// change the elements in the view
view2(0) = 10
view2(1) = 20
view2(2) = 30
//修改视图,原始数据也会受到影响
// the array is affected:
println(arr.toList)
//不要混乱使用视图保存在内存中,有可能会造成OutOfMemoryError
//eg:
//val aa = Array.range(0,123456789)
//val aa2 = Array.range(0,123456789).view
}
|
liguodongIOT/java-scala-mix-sbt
|
src/main/scala/scalacookbook/chapter10/CreateLazyViewOnCollection.scala
|
Scala
|
apache-2.0
| 2,116 |
class ImplicitParametersDeeperLevel {
class A
class B
object D {
implicit val s: B = new B
}
object K {
implicit def g(implicit s: B): A = new A
}
def foo()(implicit x: A) = 123
import D._
import K._
foo()
}
/*
class ImplicitParametersDeeperLevel {
class A
class B
object D {
implicit val s: B = new B
}
object K {
implicit def g(implicit s: B): A = new A
}
def foo()(implicit x: A) = 123
import D._
import K._
foo()
}
*/
|
ilinum/intellij-scala
|
testdata/optimize/implicits/ImplicitParametersDeeperLevel.scala
|
Scala
|
apache-2.0
| 494 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, TimeZone}
import scala.collection.JavaConverters._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.columnar.InMemoryRelation
abstract class QueryTest extends PlanTest {
protected def sqlContext: SQLContext
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
Locale.setDefault(Locale.US)
/**
* Runs the plan and makes sure the answer contains all of the keywords, or the
* none of keywords are listed in the answer
* @param df the [[DataFrame]] to be executed
* @param exists true for make sure the keywords are listed in the output, otherwise
* to make sure none of the keyword are not listed in the output
* @param keywords keyword in string array
*/
def checkExistence(df: DataFrame, exists: Boolean, keywords: String*) {
val outputs = df.collect().map(_.mkString).mkString
for (key <- keywords) {
if (exists) {
assert(outputs.contains(key), s"Failed for $df ($key doesn't exist in result)")
} else {
assert(!outputs.contains(key), s"Failed for $df ($key existed in the result)")
}
}
}
/**
* Runs the plan and makes sure the answer matches the expected result.
* @param df the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
*/
protected def checkAnswer(df: => DataFrame, expectedAnswer: Seq[Row]): Unit = {
val analyzedDF = try df catch {
case ae: AnalysisException =>
val currentValue = sqlContext.conf.dataFrameEagerAnalysis
sqlContext.setConf(SQLConf.DATAFRAME_EAGER_ANALYSIS, false)
val partiallyAnalzyedPlan = df.queryExecution.analyzed
sqlContext.setConf(SQLConf.DATAFRAME_EAGER_ANALYSIS, currentValue)
fail(
s"""
|Failed to analyze query: $ae
|$partiallyAnalzyedPlan
|
|${stackTraceToString(ae)}
|""".stripMargin)
}
QueryTest.checkAnswer(analyzedDF, expectedAnswer) match {
case Some(errorMessage) => fail(errorMessage)
case None =>
}
}
protected def checkAnswer(df: => DataFrame, expectedAnswer: Row): Unit = {
checkAnswer(df, Seq(expectedAnswer))
}
protected def checkAnswer(df: => DataFrame, expectedAnswer: DataFrame): Unit = {
checkAnswer(df, expectedAnswer.collect())
}
/**
* Asserts that a given [[DataFrame]] will be executed using the given number of cached results.
*/
def assertCached(query: DataFrame, numCachedTables: Int = 1): Unit = {
val planWithCaching = query.queryExecution.withCachedData
val cachedData = planWithCaching collect {
case cached: InMemoryRelation => cached
}
assert(
cachedData.size == numCachedTables,
s"Expected query to contain $numCachedTables, but it actually had ${cachedData.size}\\n" +
planWithCaching)
}
}
object QueryTest {
/**
* Runs the plan and makes sure the answer matches the expected result.
* If there was exception during the execution or the contents of the DataFrame does not
* match the expected result, an error message will be returned. Otherwise, a [[None]] will
* be returned.
* @param df the [[DataFrame]] to be executed
* @param expectedAnswer the expected result in a [[Seq]] of [[Row]]s.
*/
def checkAnswer(df: DataFrame, expectedAnswer: Seq[Row]): Option[String] = {
val isSorted = df.logicalPlan.collect { case s: logical.Sort => s }.nonEmpty
// We need to call prepareRow recursively to handle schemas with struct types.
def prepareRow(row: Row): Row = {
Row.fromSeq(row.toSeq.map {
case null => null
case d: java.math.BigDecimal => BigDecimal(d)
// Convert array to Seq for easy equality check.
case b: Array[_] => b.toSeq
case r: Row => prepareRow(r)
case o => o
})
}
def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
val converted: Seq[Row] = answer.map(prepareRow)
if (!isSorted) converted.sortBy(_.toString()) else converted
}
val sparkAnswer = try df.collect().toSeq catch {
case e: Exception =>
val errorMessage =
s"""
|Exception thrown while executing query:
|${df.queryExecution}
|== Exception ==
|$e
|${org.apache.spark.sql.catalyst.util.stackTraceToString(e)}
""".stripMargin
return Some(errorMessage)
}
if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
val errorMessage =
s"""
|Results do not match for query:
|${df.queryExecution}
|== Results ==
|${sideBySide(
s"== Correct Answer - ${expectedAnswer.size} ==" +:
prepareAnswer(expectedAnswer).map(_.toString()),
s"== Spark Answer - ${sparkAnswer.size} ==" +:
prepareAnswer(sparkAnswer).map(_.toString())).mkString("\\n")}
""".stripMargin
return Some(errorMessage)
}
return None
}
def checkAnswer(df: DataFrame, expectedAnswer: java.util.List[Row]): String = {
checkAnswer(df, expectedAnswer.asScala) match {
case Some(errorMessage) => errorMessage
case None => null
}
}
}
|
pronix/spark
|
sql/core/src/test/scala/org/apache/spark/sql/QueryTest.scala
|
Scala
|
apache-2.0
| 6,616 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.hbase.example
import com.cloudera.spark.hbase.HBaseContext
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.{SparkConf, SparkContext}
object HBaseBulkPutHdfsTmpExample {
def main(args: Array[String]) {
if (args.length == 0) {
System.out.println("HBaseBulkPutHdfsTmpExample {tableName} {columnFamily}");
return;
}
val tableName = args(0);
val columnFamily = args(1);
val sparkConf = new SparkConf().setAppName("HBaseBulkPutExample " + tableName + " " + columnFamily)
val sc = new SparkContext(sparkConf)
//[(Array[Byte], Array[(Array[Byte], Array[Byte], Array[Byte])])]
val rdd = sc.parallelize(Array(
(Bytes.toBytes("1"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("1")))),
(Bytes.toBytes("2"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("2")))),
(Bytes.toBytes("3"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("3")))),
(Bytes.toBytes("4"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("4")))),
(Bytes.toBytes("5"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("5"))))
)
)
val conf = HBaseConfiguration.create();
conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
val hbaseContext = new HBaseContext(sc, conf, "/tmp/SparkOnHBase.conf");
hbaseContext.bulkPut[(Array[Byte], Array[(Array[Byte], Array[Byte], Array[Byte])])](rdd,
tableName,
(putRecord) => {
val put = new Put(putRecord._1)
putRecord._2.foreach((putValue) => put.add(putValue._1, putValue._2, putValue._3))
put
},
true);
}
}
|
ronanstokes/SparkOnHBase
|
src/main/scala/com/cloudera/spark/hbase/example/HBaseBulkPutHdfsTmpExample.scala
|
Scala
|
apache-2.0
| 2,720 |
package com.atomist.rug.kind.java
import com.atomist.rug.kind.java.JavaSourceType._
import com.atomist.source.ArtifactSource
import com.github.javaparser.JavaParser
import org.scalatest.Matchers
/**
* Utilities for use in testing.
*/
object JavaVerifier extends Matchers {
/**
* Verify that the contents of this artifact source are still well formed
*/
def verifyJavaIsWellFormed(result: ArtifactSource): Unit = {
for {
f <- result.allFiles
if f.name.endsWith(JavaExtension)
} {
try {
JavaParser.parse(f.inputStream)
}
catch {
case t: Throwable => fail(s"File ${f.path} is ill-formed\\n${f.content}", t)
}
}
}
}
|
atomist/rug
|
src/test/scala/com/atomist/rug/kind/java/JavaVerifier.scala
|
Scala
|
gpl-3.0
| 696 |
package skinny.micro
import javax.servlet.http.{ HttpServlet, HttpServletRequest, HttpServletResponse }
import javax.servlet._
import skinny.micro.context.SkinnyContext
import skinny.micro.implicits.{ RicherStringImplicits, ServletApiImplicits }
import skinny.micro.util.UriDecoder
import scala.util.control.Exception._
/**
* Base trait for SkinnyMicroServlet implementations.
*/
trait SkinnyMicroServletBase extends HttpServlet with SkinnyMicroBase {
override def service(request: HttpServletRequest, response: HttpServletResponse): Unit = {
handle(request, response)
}
/**
* Defines the request path to be matched by routers. The default
* definition is optimized for `path mapped` servlets (i.e., servlet
* mapping ends in `/*`). The route should match everything matched by
* the `/*`. In the event that the request URI equals the servlet path
* with no trailing slash (e.g., mapping = `/admin/*`, request URI =
* '/admin'), a '/' is returned.
*
* All other servlet mappings likely want to return request.getServletPath.
* Custom implementations are allowed for unusual cases.
*/
override def requestPath(implicit ctx: SkinnyContext): String = {
SkinnyMicroServletBase.requestPath(ctx.request)
}
override protected def routeBasePath(implicit ctx: SkinnyContext): String = {
require(config != null, "routeBasePath requires the servlet to be initialized")
require(ctx.request != null, "routeBasePath requires an active request to determine the servlet path")
if (ctx.servletContext != null) {
ctx.servletContext.getContextPath + ctx.request.getServletPath
} else {
// servletContext can be null when test environment
ctx.request.getServletPath
}
}
/**
* Invoked when no route matches. By default, calls `serveStaticResource()`,
* and if that fails, calls `resourceNotFound()`.
*
* This action can be overridden by a notFound block.
*/
protected var doNotFound: Action = () => {
serveStaticResource()(skinnyContext)
.getOrElse(resourceNotFound()(skinnyContext))
}
/**
* Attempts to find a static resource matching the request path.
* Override to return None to stop this.
*/
protected def serveStaticResource()(
implicit ctx: SkinnyContext): Option[Any] = {
servletContext.resource(ctx.request) map { _ =>
servletContext.getNamedDispatcher("default").forward(ctx.request, ctx.response)
}
}
/**
* Called by default notFound if no routes matched and no static resource could be found.
*/
protected def resourceNotFound()(
implicit ctx: SkinnyContext): Any = {
ctx.response.setStatus(404)
if (isDevelopment()) {
val error = "Requesting \"%s %s\" on servlet \"%s\" but only have: %s"
ctx.response.getWriter println error.format(
ctx.request.getMethod,
Option(ctx.request.getPathInfo) getOrElse "/",
ctx.request.getServletPath,
routes.entryPoints.mkString("<ul><li>", "</li><li>", "</li></ul>"))
}
}
type ConfigT = ServletConfig
override def init(config: ServletConfig): Unit = {
super.init(config)
initialize(config) // see Initializable.initialize for why
}
override def initialize(config: ServletConfig): Unit = {
super.initialize(config)
}
override def destroy(): Unit = {
shutdown()
super.destroy()
}
}
object SkinnyMicroServletBase {
import ServletApiImplicits._
import RicherStringImplicits._
val RequestPathKey = "skinny.micro.SkinnyMicroServlet.requestPath"
def requestPath(request: HttpServletRequest): String = {
require(request != null, "The request can't be null for getting the request path")
def startIndex(r: HttpServletRequest) =
r.getContextPath.blankOption.map(_.length).getOrElse(0) + r.getServletPath.blankOption.map(_.length).getOrElse(0)
def getRequestPath(r: HttpServletRequest) = {
val u = (catching(classOf[NullPointerException]) opt { r.getRequestURI } getOrElse "/")
requestPath(u, startIndex(r))
}
request.get(RequestPathKey) map (_.toString) getOrElse {
val rp = getRequestPath(request)
request(RequestPathKey) = rp
rp
}
}
def requestPath(uri: String, idx: Int): String = {
val u1 = UriDecoder.firstStep(uri)
val u2 = (u1.blankOption map { _.substring(idx) } flatMap (_.blankOption) getOrElse "/")
val pos = u2.indexOf(';')
if (pos > -1) u2.substring(0, pos) else u2
}
}
|
xerial/skinny-micro
|
micro/src/main/scala/skinny/micro/SkinnyMicroServletBase.scala
|
Scala
|
bsd-2-clause
| 4,488 |
package ml.sparkling.graph.operators.measures.vertex.clustering
import it.unimi.dsi.fastutil.longs.LongOpenHashSet
import ml.sparkling.graph.api.operators.measures.{VertexMeasure, VertexMeasureConfiguration}
import ml.sparkling.graph.operators.measures.utils.CollectionsUtils._
import ml.sparkling.graph.operators.measures.utils.{CollectionsUtils, NeighboursUtils}
import ml.sparkling.graph.operators.predicates.AllPathPredicate
import org.apache.spark.graphx.Graph
import scala.reflect.ClassTag
/**
* Created by Roman Bartusiak ([email protected] http://riomus.github.io).
* Computes local clustering
*/
object LocalClustering extends VertexMeasure[Double] {
/**
* Computes local clustering
* @param graph - computation graph
* @param vertexMeasureConfiguration - configuration of computation
* @param num - numeric for @ED
* @tparam VD - vertex data type
* @tparam ED - edge data type
* @return graph where each vertex is associated with its local clustering
*/
override def compute[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED],
vertexMeasureConfiguration: VertexMeasureConfiguration[VD,ED])
(implicit num: Numeric[ED]) = {
val firstLevelNeighboursGraph = NeighboursUtils.getWithNeighbours(graph, vertexMeasureConfiguration.treatAsUndirected, AllPathPredicate)
val localClusteringSums=firstLevelNeighboursGraph.aggregateMessages[Double](
sendMsg=edgeContext=>{
def messageCreator=(neighbours1:LongOpenHashSet,neighbours2:LongOpenHashSet)=>{
intersectSize(neighbours1,neighbours2)
}
val message=messageCreator(edgeContext.srcAttr,edgeContext.dstAttr)
edgeContext.sendToSrc(message)
if(vertexMeasureConfiguration.treatAsUndirected){
edgeContext.sendToDst(message)
}
},
mergeMsg=(a,b)=>a+b)
firstLevelNeighboursGraph.outerJoinVertices(localClusteringSums)((vId,oldValue,newValue)=>(newValue.getOrElse(0d),oldValue)).mapVertices {
case (vId, (sum, neighbours)) => {
val possibleConnections = neighbours.size * (neighbours.size - 1)
if (possibleConnections == 0) 0d else sum / possibleConnections
}
}
}
}
|
sparkling-graph/sparkling-graph
|
operators/src/main/scala/ml/sparkling/graph/operators/measures/vertex/clustering/LocalClustering.scala
|
Scala
|
bsd-2-clause
| 2,264 |
package com.socrata.geoexport.mocks
import java.io.{InputStream, ByteArrayOutputStream, DataOutputStream}
import javax.servlet.http.HttpServletResponse.{SC_OK => ScOk}
import org.velvia.MsgPack
import com.socrata.http.common.util.Acknowledgeable
class BinaryResponse(val payload: Array[Byte],
override val resultCode: Int = ScOk) extends EmptyResponse("application/octet-stream") {
override def inputStream(maxBetween: Long): InputStream with Acknowledgeable =
ByteInputStream(payload)
}
object BinaryResponse {
def apply(payload: Array[Byte], resultCode: Int = ScOk): BinaryResponse =
new BinaryResponse(payload, resultCode)
// Below is for quickly generating binary SoQLPack
def apply(header: Map[String, Any], rows: Seq[Seq[Any]],
junk: Option[Array[Byte]]): BinaryResponse = {
val baos = new ByteArrayOutputStream
val dos = new DataOutputStream(baos)
MsgPack.pack(header, dos)
rows.foreach(MsgPack.pack(_, dos))
// Now write a junk row to test parsing errors
// junk foreach { junkBytes => MsgPack.pack(Seq(junkBytes), dos) }
dos.flush()
new BinaryResponse(baos.toByteArray)
}
def apply(header: Map[String, Any], rows: Seq[Seq[Any]]): BinaryResponse =
apply(header, rows, None)
}
|
socrata-platform/geo-export
|
src/test/scala/com.socrata.geoexport/mocks/BinaryResponse.scala
|
Scala
|
apache-2.0
| 1,278 |
package skinny.dbmigration
import scalikejdbc._
import org.slf4j.LoggerFactory
/**
* DBSeeds runner.
*/
class DBSeedsRunner extends DBSeeds
/**
* Seeds database tables or records instantly.
*
* This module is surely inspired by Rails rake db:seed.
*/
trait DBSeeds {
private[this] val logger = LoggerFactory.getLogger(classOf[DBSeeds])
/**
* AutoSession for this.
*/
val dbSeedsAutoSession: DBSession = AutoSession
/**
* Registered operations.
*/
private[this] val registeredSeedOperations = new collection.mutable.ListBuffer[() => Any]
/**
* Adds new SQLs to execute when #run is called.
*
* @param seedSQLs seed SQLs
* @param session db session
* @return self
*/
def addSeedSQL(seedSQLs: SQL[_, _]*)(implicit session: DBSession = dbSeedsAutoSession): DBSeeds = {
registeredSeedOperations.appendAll(seedSQLs.map(s => () => s.execute.apply()))
this
}
/**
* Adds seed operation to execute when #run is called.
*
* @param op operation
* @return self
*/
def addSeed(op: => Any): DBSeeds = {
registeredSeedOperations.append(() => op)
this
}
/**
* Runs if predicate function returns false.
*
* @param predicate predicate function
* @param session db session
* @return nothing
*/
def runUnless(predicate: => Boolean)(implicit session: DBSession = dbSeedsAutoSession): Unit = {
ConnectionPool.synchronized {
if (!predicate) {
logger.info(s"Since #runUnless predication failed, DBSeeds is going to run now.")
run()
}
}
}
/**
* Runs if SQL execution failed.
* @param sql
* @param session
* @return nothing
*/
def runIfFailed(sql: SQL[_, _])(implicit session: DBSession = dbSeedsAutoSession): Unit = {
ConnectionPool.synchronized {
try sql.execute.apply()
catch {
case e: java.sql.SQLException =>
logger.info(s"Since '${sql.statement}' execution failed, DBSeeds is going to run now.")
run()
}
}
}
/**
* Run all the seeds.
*
* @param session db session
* @return nothing
*/
def run()(implicit session: DBSession = dbSeedsAutoSession): Unit = {
ConnectionPool.synchronized {
registeredSeedOperations.foreach(_.apply())
}
}
}
|
BlackPrincess/skinny-framework
|
orm/src/main/scala/skinny/dbmigration/DBSeeds.scala
|
Scala
|
mit
| 2,284 |
package mgoeminne.scalaggplot.geom
import mgoeminne.scalaggplot.position.Position
import mgoeminne.scalaggplot.stat.Statistic
import mgoeminne.scalaggplot.{position, aes, stat}
import org.saddle.Frame
/**
* Adds heatmap of 2d bin counts.
*
* == Aesthetics ==
*
* This method understands the following aesthetics (required aesthetics are in bold):
*
* - '''xmin'''
* - '''xmax'''
* - '''ymin'''
* - '''ymax'''
* - alpha
* - colour
* - fill
* - linetype
* - size
* - weight
*
* == Examples ==
*
* TODO
*
* @param mapping The aesthetic mapping, usually constructed with [[aes.aes]] or [[aes.string]]. Only needs to be set at the layer level if you are overriding the plot defaults.
* @param data A layer specific dataset - only needed if you want to override the plot defaults.
* @param stat The statistical transformation to use on the data for this layer.
* @param position The position adjustment to use for overlappling points on this layer
* @tparam T
*/
case class bin2d[T]( mapping: Option[(Seq[Numeric[T]], Seq[Numeric[T]])] = None,
data: Option[Frame[Any,Any,T]] = None,
stat: Statistic = bin2dUtil.defaultStat,
position: Position = bin2dUtil.defaultPos) extends Geom
private object bin2dUtil
{
val defaultStat = stat.bin2d()
val defaultPos = position.identity
}
|
mgoeminne/scala-ggplot
|
src/main/scala/mgoeminne/scalaggplot/geom/bin2d.scala
|
Scala
|
lgpl-3.0
| 1,391 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import EqualityPolicy._
/**
* Provides <code>===</code> and <code>!==</code> operators that return <code>Boolean</code>, delegate the equality determination
* to an <code>Equality</code> type class, and require no relationship between the types of the two values compared.
*
* <table><tr><td class="usage">
* <strong>Recommended Usage</strong>:
* Trait <code>UncheckedEquality</code> is useful (in both production and test code) when you need determine equality for a type of object differently than its
* <code>equals</code> method: either you can't change the <code>equals</code> method, or the <code>equals</code> method is sensible generally, but
* you are in a special situation where you need something else. If you also want a compile-time type check, however, you should use one
* of <code>UncheckedEquality</code> sibling traits:
* <a href="ConversionCheckedTripleEquals.html"><code>ConversionCheckedTripleEquals</code></a> or <a href="TypeCheckedTripleEquals.html"><code>TypeCheckedTripleEquals</code></a>.
* </td></tr></table>
*
* <p>
* This trait will override or hide implicit methods defined by its sibling traits,
* <a href="ConversionCheckedTripleEquals.html"><code>ConversionCheckedTripleEquals</code></a> or <a href="TypeCheckedTripleEquals.html"><code>TypeCheckedTripleEquals</code></a>,
* and can therefore be used to temporarily turn of type checking in a limited scope. Here's an example, in which <code>TypeCheckedTripleEquals</code> will
* cause a compiler error:
* </p>
*
* <pre class="stHighlight">
* import org.scalactic._
* import TypeCheckedTripleEquals._
*
* object Example {
*
* def cmp(a: Int, b: Long): Int = {
* if (a === b) 0 // This line won't compile
* else if (a < b) -1
* else 1
* }
*
* def cmp(s: String, t: String): Int = {
* if (s === t) 0
* else if (s < t) -1
* else 1
* }
* }
* </pre>
*
* Because <code>Int</code> and <code>Long</code> are not in a subtype/supertype relationship, comparing <code>1</code> and <code>1L</code> in the context
* of <code>TypeCheckedTripleEquals</code> will generate a compiler error:
* </p>
*
* <pre>
* Example.scala:9: error: types Int and Long do not adhere to the equality constraint selected for
* the === and !== operators; they must either be in a subtype/supertype relationship, or, if
* ConversionCheckedTripleEquals is in force, implicitly convertible in one direction or the other;
* the missing implicit parameter is of type org.scalactic.Constraint[Int,Long]
* if (a === b) 0 // This line won't compile
* ^
* one error found
* </pre>
*
* <p>
* You can “turn off” the type checking locally by importing the members of <code>UncheckedEquality</code> in
* a limited scope:
* </p>
*
* <pre class="stHighlight">
* package org.scalactic.examples.tripleequals
*
* import org.scalactic._
* import TypeCheckedTripleEquals._
*
* object Example {
*
* def cmp(a: Int, b: Long): Int = {
* import UncheckedEquality._
* if (a === b) 0
* else if (a < b) -1
* else 1
* }
*
* def cmp(s: String, t: String): Int = {
* if (s === t) 0
* else if (s < t) -1
* else 1
* }
* }
* </pre>
*
* <p>
* With the above change, the <code>Example.scala</code> file compiles fine. Type checking is turned off only inside the first <code>cmp</code> method that
* takes an <code>Int</code> and a <code>Long</code>. <code>TypeCheckedTripleEquals</code> is still enforcing its type constraint, for example, for the <code>s === t</code>
* expression in the other overloaded <code>cmp</code> method that takes strings.
* </p>
*
* <p>
* Because the methods in <code>UncheckedEquality</code> (and its siblings)<em>override</em> all the methods defined in
* supertype <a href="EqualityPolicy.html"><code>EqualityPolicy</code></a>, you can achieve the same
* kind of nested tuning of equality constraints whether you mix in traits, import from companion objects, or use some combination of both.
* </p>
*
* <p>
* In short, you should be able to select a primary constraint level via either a mixin or import, then change that in nested scopes
* however you want, again either through a mixin or import, without getting any implicit conversion ambiguity. The innermost constraint level in scope
* will always be in force.
* <p>
*
* @author Bill Venners
*/
@deprecated("Please use org.scalactic.UncheckedEquality instead.")
trait TripleEquals extends EqualityPolicy {
import scala.language.implicitConversions
// Inherit the Scaladoc for these methods
implicit override def convertToEqualizer[T](left: T): Equalizer[T] = new Equalizer(left)
override def convertToCheckingEqualizer[T](left: T): CheckingEqualizer[T] = new CheckingEqualizer(left)
override def convertToFreshCheckingEqualizer[T](left: T): FreshCheckingEqualizer[T] = new FreshCheckingEqualizer(left)
override def numericEqualityConstraint[A, B](implicit equalityOfA: Equality[A], numA: CooperatingNumeric[A], numB: CooperatingNumeric[B]): EqualityConstraint[A, B] = new BasicEqualityConstraint[A, B](equalityOfA)
implicit override def unconstrainedEquality[A, B](implicit equalityOfA: Equality[A]): Constraint[A, B] = new BasicConstraint[A, B](equalityOfA)
implicit override def unconstrainedFreshEquality[A, B](implicit equalityOfA: Equality[A]): EqualityConstraint[A, B] = new BasicEqualityConstraint[A, B](equalityOfA)
override def lowPriorityTypeCheckedConstraint[A, B](implicit equivalenceOfB: Equivalence[B], ev: A <:< B): Constraint[A, B] = new AToBEquivalenceConstraint[A, B](equivalenceOfB, ev)
override def convertEquivalenceToAToBConstraint[A, B](equivalenceOfB: Equivalence[B])(implicit ev: A <:< B): Constraint[A, B] = new AToBEquivalenceConstraint[A, B](equivalenceOfB, ev)
override def typeCheckedConstraint[A, B](implicit equivalenceOfA: Equivalence[A], ev: B <:< A): Constraint[A, B] = new BToAEquivalenceConstraint[A, B](equivalenceOfA, ev)
override def convertEquivalenceToBToAConstraint[A, B](equivalenceOfA: Equivalence[A])(implicit ev: B <:< A): Constraint[A, B] = new BToAEquivalenceConstraint[A, B](equivalenceOfA, ev)
override def lowPriorityCheckedEqualityConstraint[A, B](implicit equivalenceOfB: Equivalence[B], ev: A <:< B): EqualityConstraint[A, B] = new AToBEqualityConstraint[A, B](equivalenceOfB, ev)
override def convertEquivalenceToAToBEqualityConstraint[A, B](equivalenceOfB: Equivalence[B])(implicit ev: A <:< B): EqualityConstraint[A, B] = new AToBEqualityConstraint[A, B](equivalenceOfB, ev)
override def checkedEqualityConstraint[A, B](implicit equivalenceOfA: Equivalence[A], ev: B <:< A): EqualityConstraint[A, B] = new BToAEqualityConstraint[A, B](equivalenceOfA, ev)
override def convertEquivalenceToBToAEqualityConstraint[A, B](equivalenceOfA: Equivalence[A])(implicit ev: B <:< A): EqualityConstraint[A, B] = new BToAEqualityConstraint[A, B](equivalenceOfA, ev)
override def lowPriorityConversionCheckedConstraint[A, B](implicit equivalenceOfB: Equivalence[B], cnv: A => B): Constraint[A, B] = new AToBEquivalenceConstraint[A, B](equivalenceOfB, cnv)
override def convertEquivalenceToAToBConversionConstraint[A, B](equivalenceOfB: Equivalence[B])(implicit ev: A => B): Constraint[A, B] = new AToBEquivalenceConstraint[A, B](equivalenceOfB, ev)
override def conversionCheckedConstraint[A, B](implicit equivalenceOfA: Equivalence[A], cnv: B => A): Constraint[A, B] = new BToAEquivalenceConstraint[A, B](equivalenceOfA, cnv)
override def convertEquivalenceToBToAConversionConstraint[A, B](equivalenceOfA: Equivalence[A])(implicit ev: B => A): Constraint[A, B] = new BToAEquivalenceConstraint[A, B](equivalenceOfA, ev)
// For EnabledEquality
override def enabledEqualityConstraintFor[A](implicit equivalenceOfA: Equivalence[A], ev: EnabledEqualityFor[A]): EqualityConstraint[A, A] = new EnabledEqualityConstraint[A](equivalenceOfA)
override def lowPriorityEnabledEqualityConstraintBetween[A, B](implicit equivalenceOfB: Equivalence[B], cnv: EnabledEqualityBetween[A, B]): EqualityConstraint[A, B] = new AToBEnabledEqualityConstraint[A, B](equivalenceOfB, cnv)
override def enabledEqualityConstraintBetween[A, B](implicit equivalenceOfA: Equivalence[A], cnv: EnabledEqualityBetween[B, A]): EqualityConstraint[A, B] = new BToAEnabledEqualityConstraint[A, B](equivalenceOfA, cnv)
}
/**
* Companion object to trait <code>UncheckedEquality</code> that facilitates the importing of <code>UncheckedEquality</code> members as
* an alternative to mixing it in. One use case is to import <code>UncheckedEquality</code> members so you can use
* them in the Scala interpreter:
*
* <pre class="stREPL">
* $ scala -classpath scalatest.jar
* Welcome to Scala version 2.10.0
* Type in expressions to have them evaluated.
* Type :help for more information.
*
* scala> import org.scalactic._
* import org.scalactic._
*
* scala> import UncheckedEquality._
* import UncheckedEquality._
*
* scala> 1 + 1 === 2
* res0: Boolean = true
* </pre>
*/
@deprecated("Please use org.scalactic.UncheckedEquality instead.")
object TripleEquals extends TripleEquals
|
travisbrown/scalatest
|
src/main/scala/org/scalactic/TripleEquals.scala
|
Scala
|
apache-2.0
| 9,809 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashSet
import scala.util.Random
import org.scalatest.FunSuite
import com.google.common.io.Files
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.conf.{Configuration, Configurable}
import org.apache.spark.SparkContext._
import org.apache.spark.{Partitioner, SharedSparkContext}
class PairRDDFunctionsSuite extends FunSuite with SharedSparkContext {
test("groupByKey") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (2, 1)))
val groups = pairs.groupByKey().collect()
assert(groups.size === 2)
val valuesFor1 = groups.find(_._1 == 1).get._2
assert(valuesFor1.toList.sorted === List(1, 2, 3))
val valuesFor2 = groups.find(_._1 == 2).get._2
assert(valuesFor2.toList.sorted === List(1))
}
test("groupByKey with duplicates") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val groups = pairs.groupByKey().collect()
assert(groups.size === 2)
val valuesFor1 = groups.find(_._1 == 1).get._2
assert(valuesFor1.toList.sorted === List(1, 1, 2, 3))
val valuesFor2 = groups.find(_._1 == 2).get._2
assert(valuesFor2.toList.sorted === List(1))
}
test("groupByKey with negative key hash codes") {
val pairs = sc.parallelize(Array((-1, 1), (-1, 2), (-1, 3), (2, 1)))
val groups = pairs.groupByKey().collect()
assert(groups.size === 2)
val valuesForMinus1 = groups.find(_._1 == -1).get._2
assert(valuesForMinus1.toList.sorted === List(1, 2, 3))
val valuesFor2 = groups.find(_._1 == 2).get._2
assert(valuesFor2.toList.sorted === List(1))
}
test("groupByKey with many output partitions") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (2, 1)))
val groups = pairs.groupByKey(10).collect()
assert(groups.size === 2)
val valuesFor1 = groups.find(_._1 == 1).get._2
assert(valuesFor1.toList.sorted === List(1, 2, 3))
val valuesFor2 = groups.find(_._1 == 2).get._2
assert(valuesFor2.toList.sorted === List(1))
}
test("reduceByKey") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val sums = pairs.reduceByKey(_+_).collect()
assert(sums.toSet === Set((1, 7), (2, 1)))
}
test("reduceByKey with collectAsMap") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val sums = pairs.reduceByKey(_+_).collectAsMap()
assert(sums.size === 2)
assert(sums(1) === 7)
assert(sums(2) === 1)
}
test("reduceByKey with many output partitons") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val sums = pairs.reduceByKey(_+_, 10).collect()
assert(sums.toSet === Set((1, 7), (2, 1)))
}
test("reduceByKey with partitioner") {
val p = new Partitioner() {
def numPartitions = 2
def getPartition(key: Any) = key.asInstanceOf[Int]
}
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 1), (0, 1))).partitionBy(p)
val sums = pairs.reduceByKey(_+_)
assert(sums.collect().toSet === Set((1, 4), (0, 1)))
assert(sums.partitioner === Some(p))
// count the dependencies to make sure there is only 1 ShuffledRDD
val deps = new HashSet[RDD[_]]()
def visit(r: RDD[_]) {
for (dep <- r.dependencies) {
deps += dep.rdd
visit(dep.rdd)
}
}
visit(sums)
assert(deps.size === 2) // ShuffledRDD, ParallelCollection.
}
test("countApproxDistinctByKey") {
def error(est: Long, size: Long) = math.abs(est - size) / size.toDouble
/* Since HyperLogLog unique counting is approximate, and the relative standard deviation is
* only a statistical bound, the tests can fail for large values of relativeSD. We will be using
* relatively tight error bounds to check correctness of functionality rather than checking
* whether the approximation conforms with the requested bound.
*/
val relativeSD = 0.001
// For each value i, there are i tuples with first element equal to i.
// Therefore, the expected count for key i would be i.
val stacked = (1 to 100).flatMap(i => (1 to i).map(j => (i, j)))
val rdd1 = sc.parallelize(stacked)
val counted1 = rdd1.countApproxDistinctByKey(relativeSD).collect()
counted1.foreach{
case(k, count) => assert(error(count, k) < relativeSD)
}
val rnd = new Random()
// The expected count for key num would be num
val randStacked = (1 to 100).flatMap { i =>
val num = rnd.nextInt % 500
(1 to num).map(j => (num, j))
}
val rdd2 = sc.parallelize(randStacked)
val counted2 = rdd2.countApproxDistinctByKey(relativeSD, 4).collect()
counted2.foreach{
case(k, count) => assert(error(count, k) < relativeSD)
}
}
test("join") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w')))
val joined = rdd1.join(rdd2).collect()
assert(joined.size === 4)
assert(joined.toSet === Set(
(1, (1, 'x')),
(1, (2, 'x')),
(2, (1, 'y')),
(2, (1, 'z'))
))
}
test("join all-to-all") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (1, 3)))
val rdd2 = sc.parallelize(Array((1, 'x'), (1, 'y')))
val joined = rdd1.join(rdd2).collect()
assert(joined.size === 6)
assert(joined.toSet === Set(
(1, (1, 'x')),
(1, (1, 'y')),
(1, (2, 'x')),
(1, (2, 'y')),
(1, (3, 'x')),
(1, (3, 'y'))
))
}
test("leftOuterJoin") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w')))
val joined = rdd1.leftOuterJoin(rdd2).collect()
assert(joined.size === 5)
assert(joined.toSet === Set(
(1, (1, Some('x'))),
(1, (2, Some('x'))),
(2, (1, Some('y'))),
(2, (1, Some('z'))),
(3, (1, None))
))
}
test("rightOuterJoin") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w')))
val joined = rdd1.rightOuterJoin(rdd2).collect()
assert(joined.size === 5)
assert(joined.toSet === Set(
(1, (Some(1), 'x')),
(1, (Some(2), 'x')),
(2, (Some(1), 'y')),
(2, (Some(1), 'z')),
(4, (None, 'w'))
))
}
test("join with no matches") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((4, 'x'), (5, 'y'), (5, 'z'), (6, 'w')))
val joined = rdd1.join(rdd2).collect()
assert(joined.size === 0)
}
test("join with many output partitions") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w')))
val joined = rdd1.join(rdd2, 10).collect()
assert(joined.size === 4)
assert(joined.toSet === Set(
(1, (1, 'x')),
(1, (2, 'x')),
(2, (1, 'y')),
(2, (1, 'z'))
))
}
test("groupWith") {
val rdd1 = sc.parallelize(Array((1, 1), (1, 2), (2, 1), (3, 1)))
val rdd2 = sc.parallelize(Array((1, 'x'), (2, 'y'), (2, 'z'), (4, 'w')))
val joined = rdd1.groupWith(rdd2).collect()
assert(joined.size === 4)
assert(joined.toSet === Set(
(1, (ArrayBuffer(1, 2), ArrayBuffer('x'))),
(2, (ArrayBuffer(1), ArrayBuffer('y', 'z'))),
(3, (ArrayBuffer(1), ArrayBuffer())),
(4, (ArrayBuffer(), ArrayBuffer('w')))
))
}
test("zero-partition RDD") {
val emptyDir = Files.createTempDir()
val file = sc.textFile(emptyDir.getAbsolutePath)
assert(file.partitions.size == 0)
assert(file.collect().toList === Nil)
// Test that a shuffle on the file works, because this used to be a bug
assert(file.map(line => (line, 1)).reduceByKey(_ + _).collect().toList === Nil)
}
test("keys and values") {
val rdd = sc.parallelize(Array((1, "a"), (2, "b")))
assert(rdd.keys.collect().toList === List(1, 2))
assert(rdd.values.collect().toList === List("a", "b"))
}
test("default partitioner uses partition size") {
// specify 2000 partitions
val a = sc.makeRDD(Array(1, 2, 3, 4), 2000)
// do a map, which loses the partitioner
val b = a.map(a => (a, (a * 2).toString))
// then a group by, and see we didn't revert to 2 partitions
val c = b.groupByKey()
assert(c.partitions.size === 2000)
}
test("default partitioner uses largest partitioner") {
val a = sc.makeRDD(Array((1, "a"), (2, "b")), 2)
val b = sc.makeRDD(Array((1, "a"), (2, "b")), 2000)
val c = a.join(b)
assert(c.partitions.size === 2000)
}
test("subtract") {
val a = sc.parallelize(Array(1, 2, 3), 2)
val b = sc.parallelize(Array(2, 3, 4), 4)
val c = a.subtract(b)
assert(c.collect().toSet === Set(1))
assert(c.partitions.size === a.partitions.size)
}
test("subtract with narrow dependency") {
// use a deterministic partitioner
val p = new Partitioner() {
def numPartitions = 5
def getPartition(key: Any) = key.asInstanceOf[Int]
}
// partitionBy so we have a narrow dependency
val a = sc.parallelize(Array((1, "a"), (2, "b"), (3, "c"))).partitionBy(p)
// more partitions/no partitioner so a shuffle dependency
val b = sc.parallelize(Array((2, "b"), (3, "cc"), (4, "d")), 4)
val c = a.subtract(b)
assert(c.collect().toSet === Set((1, "a"), (3, "c")))
// Ideally we could keep the original partitioner...
assert(c.partitioner === None)
}
test("subtractByKey") {
val a = sc.parallelize(Array((1, "a"), (1, "a"), (2, "b"), (3, "c")), 2)
val b = sc.parallelize(Array((2, 20), (3, 30), (4, 40)), 4)
val c = a.subtractByKey(b)
assert(c.collect().toSet === Set((1, "a"), (1, "a")))
assert(c.partitions.size === a.partitions.size)
}
test("subtractByKey with narrow dependency") {
// use a deterministic partitioner
val p = new Partitioner() {
def numPartitions = 5
def getPartition(key: Any) = key.asInstanceOf[Int]
}
// partitionBy so we have a narrow dependency
val a = sc.parallelize(Array((1, "a"), (1, "a"), (2, "b"), (3, "c"))).partitionBy(p)
// more partitions/no partitioner so a shuffle dependency
val b = sc.parallelize(Array((2, "b"), (3, "cc"), (4, "d")), 4)
val c = a.subtractByKey(b)
assert(c.collect().toSet === Set((1, "a"), (1, "a")))
assert(c.partitioner.get === p)
}
test("foldByKey") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val sums = pairs.foldByKey(0)(_+_).collect()
assert(sums.toSet === Set((1, 7), (2, 1)))
}
test("foldByKey with mutable result type") {
val pairs = sc.parallelize(Array((1, 1), (1, 2), (1, 3), (1, 1), (2, 1)))
val bufs = pairs.mapValues(v => ArrayBuffer(v)).cache()
// Fold the values using in-place mutation
val sums = bufs.foldByKey(new ArrayBuffer[Int])(_ ++= _).collect()
assert(sums.toSet === Set((1, ArrayBuffer(1, 2, 3, 1)), (2, ArrayBuffer(1))))
// Check that the mutable objects in the original RDD were not changed
assert(bufs.collect().toSet === Set(
(1, ArrayBuffer(1)),
(1, ArrayBuffer(2)),
(1, ArrayBuffer(3)),
(1, ArrayBuffer(1)),
(2, ArrayBuffer(1))))
}
test("saveNewAPIHadoopFile should call setConf if format is configurable") {
val pairs = sc.parallelize(Array((new Integer(1), new Integer(1))))
// No error, non-configurable formats still work
pairs.saveAsNewAPIHadoopFile[FakeFormat]("ignored")
/*
Check that configurable formats get configured:
ConfigTestFormat throws an exception if we try to write
to it when setConf hasn't been called first.
Assertion is in ConfigTestFormat.getRecordWriter.
*/
pairs.saveAsNewAPIHadoopFile[ConfigTestFormat]("ignored")
}
}
/*
These classes are fakes for testing
"saveNewAPIHadoopFile should call setConf if format is configurable".
Unfortunately, they have to be top level classes, and not defined in
the test method, because otherwise Scala won't generate no-args constructors
and the test will therefore throw InstantiationException when saveAsNewAPIHadoopFile
tries to instantiate them with Class.newInstance.
*/
class FakeWriter extends RecordWriter[Integer, Integer] {
def close(p1: TaskAttemptContext) = ()
def write(p1: Integer, p2: Integer) = ()
}
class FakeCommitter extends OutputCommitter {
def setupJob(p1: JobContext) = ()
def needsTaskCommit(p1: TaskAttemptContext): Boolean = false
def setupTask(p1: TaskAttemptContext) = ()
def commitTask(p1: TaskAttemptContext) = ()
def abortTask(p1: TaskAttemptContext) = ()
}
class FakeFormat() extends OutputFormat[Integer, Integer]() {
def checkOutputSpecs(p1: JobContext) = ()
def getRecordWriter(p1: TaskAttemptContext): RecordWriter[Integer, Integer] = {
new FakeWriter()
}
def getOutputCommitter(p1: TaskAttemptContext): OutputCommitter = {
new FakeCommitter()
}
}
class ConfigTestFormat() extends FakeFormat() with Configurable {
var setConfCalled = false
def setConf(p1: Configuration) = {
setConfCalled = true
()
}
def getConf: Configuration = null
override def getRecordWriter(p1: TaskAttemptContext): RecordWriter[Integer, Integer] = {
assert(setConfCalled, "setConf was never called")
super.getRecordWriter(p1)
}
}
|
sryza/spark
|
core/src/test/scala/org/apache/spark/rdd/PairRDDFunctionsSuite.scala
|
Scala
|
apache-2.0
| 14,293 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security
import java.util.{Collection, Properties}
import org.apache.kafka.common.security.authenticator.CredentialCache
import org.apache.kafka.common.security.scram.ScramCredential
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.common.config.ConfigDef._
import org.apache.kafka.common.security.scram.internals.{ScramCredentialUtils, ScramMechanism}
import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache
class CredentialProvider(scramMechanisms: Collection[String], val tokenCache: DelegationTokenCache) {
val credentialCache = new CredentialCache
ScramCredentialUtils.createCache(credentialCache, scramMechanisms)
def updateCredentials(username: String, config: Properties): Unit = {
for (mechanism <- ScramMechanism.values()) {
val cache = credentialCache.cache(mechanism.mechanismName, classOf[ScramCredential])
if (cache != null) {
config.getProperty(mechanism.mechanismName) match {
case null => cache.remove(username)
case c => cache.put(username, ScramCredentialUtils.credentialFromString(c))
}
}
}
}
}
object CredentialProvider {
def userCredentialConfigs: ConfigDef = {
ScramMechanism.values.foldLeft(new ConfigDef) {
(c, m) => c.define(m.mechanismName, Type.STRING, null, Importance.MEDIUM, s"User credentials for SCRAM mechanism ${m.mechanismName}")
}
}
}
|
noslowerdna/kafka
|
core/src/main/scala/kafka/security/CredentialProvider.scala
|
Scala
|
apache-2.0
| 2,239 |
package com.thoughtworks.datacommons.prepbuddy.imputations
import com.thoughtworks.datacommons.prepbuddy.SparkTestCase
import com.thoughtworks.datacommons.prepbuddy.rdds.TransformableRDD
import com.thoughtworks.datacommons.prepbuddy.types.CSV
import com.thoughtworks.datacommons.prepbuddy.utils.RowRecord
import org.apache.spark.rdd.RDD
import scala.collection.mutable
class ImputationTest extends SparkTestCase {
test("should impute value with returned value of strategy") {
val data = Array("1,", "2,45", "3,65", "4,67", "5,23")
val dataSet: RDD[String] = sparkContext.parallelize(data)
val transformableRDD: TransformableRDD = new TransformableRDD(dataSet, CSV)
val imputed: TransformableRDD = transformableRDD.impute(1, new ImputationStrategy {
override def handleMissingData(record: RowRecord): String = "hello"
override def prepareSubstitute(rdd: TransformableRDD, missingDataColumn: Int): Unit = {}
})
val collected: Array[String] = imputed.collect()
assert(collected.contains("1,hello"))
assert(collected.contains("2,45"))
}
test("should impute value with returned value of strategy by referring to the column by name") {
val data = Array("1,", "2,45", "3,65", "4,67", "5,23")
val dataSet: RDD[String] = sparkContext.parallelize(data)
val schema: Map[String, Int] = Map("First" -> 0, "Second" -> 1)
val transformableRDD: TransformableRDD = new TransformableRDD(dataSet, CSV).useSchema(schema)
val imputed: TransformableRDD = transformableRDD.impute("Second", new ImputationStrategy {
override def handleMissingData(record: RowRecord): String = "hello"
override def prepareSubstitute(rdd: TransformableRDD, missingDataColumn: Int): Unit = {}
})
val collected: Array[String] = imputed.collect()
assert(collected.contains("1,hello"))
assert(collected.contains("2,45"))
}
test("should impute the missing values by considering missing hints") {
val initialDataSet: RDD[String] = sparkContext.parallelize(Array(
"1,NULL,2,3,4", "2,N/A,23,21,23",
"3,N/A,21,32,32", "4,-,2,3,4",
"5,,54,32,54", "6,32,22,33,23"))
val initialRDD: TransformableRDD = new TransformableRDD(initialDataSet)
val imputed: Array[String] = initialRDD.impute(1, new ImputationStrategy() {
def prepareSubstitute(rdd: TransformableRDD, missingDataColumn: Int) {
}
def handleMissingData(record: RowRecord): String = {
"X"
}
}, List("N/A", "-", "NA", "NULL")).collect
assert(imputed.contains("1,X,2,3,4"))
assert(imputed.contains("2,X,23,21,23"))
assert(imputed.contains("3,X,21,32,32"))
assert(imputed.contains("4,X,2,3,4"))
assert(imputed.contains("5,X,54,32,54"))
assert(imputed.contains("6,32,22,33,23"))
}
test("should impute the missing values by considering missing hints when column is specified by name") {
val initialDataSet: RDD[String] = sparkContext.parallelize(Array(
"1,NULL,2,3,4", "2,N/A,23,21,23",
"3,N/A,21,32,32", "4,-,2,3,4",
"5,,54,32,54", "6,32,22,33,23"))
val schema: Map[String, Int] = Map("First" -> 0, "Second" -> 1, "Third" -> 2, "Fourth" -> 3, "Fifth" -> 4)
val initialRDD: TransformableRDD = new TransformableRDD(initialDataSet).useSchema(schema)
val imputed: Array[String] = initialRDD.impute("Second", new ImputationStrategy() {
def prepareSubstitute(rdd: TransformableRDD, missingDataColumn: Int) {
}
def handleMissingData(record: RowRecord): String = {
"X"
}
}, List("N/A", "-", "NA", "NULL")).collect
assert(imputed.contains("1,X,2,3,4"))
assert(imputed.contains("2,X,23,21,23"))
assert(imputed.contains("3,X,21,32,32"))
assert(imputed.contains("4,X,2,3,4"))
assert(imputed.contains("5,X,54,32,54"))
assert(imputed.contains("6,32,22,33,23"))
}
test("should impute missing values by mean of the given column index ") {
val data = Array("1,", "2,45", "3,65", "4,67", "5,23")
val dataSet: RDD[String] = sparkContext.parallelize(data)
val transformableRDD: TransformableRDD = new TransformableRDD(dataSet, CSV)
val imputedByMean: TransformableRDD = transformableRDD.impute(1, new MeanSubstitution())
val collected: Array[String] = imputedByMean.collect()
assert(collected.contains("1,50.0"))
assert(collected.contains("2,45"))
}
test("should impute the missing values by approx mean") {
val data = Array("1,", "2,45", "3,65", "4,67", "5,23")
val dataSet: RDD[String] = sparkContext.parallelize(data)
val transformableRDD: TransformableRDD = new TransformableRDD(dataSet, CSV)
val imputedByMean: TransformableRDD = transformableRDD.impute(1, new ApproxMeanSubstitution)
val collected: Array[String] = imputedByMean.collect()
assert(collected.contains("1,50.0"))
assert(collected.contains("5,23"))
}
test("should impute the missing values by mode") {
val data = Array("1,", "2,45", "3,45", "4,", "5,23")
val dataSet: RDD[String] = sparkContext.parallelize(data)
val transformableRDD: TransformableRDD = new TransformableRDD(dataSet, CSV)
val imputedByMean: TransformableRDD = transformableRDD.impute(1, new ModeSubstitution())
val collected: Array[String] = imputedByMean.collect()
assert(collected.contains("1,45"))
assert(collected.contains("4,45"))
}
test("should impute by naive bayes substitution") {
val dataset: mutable.WrappedArray[String] = {
Array("sunny,hot,high,false,N",
"sunny,hot,high,true,N",
"overcast,hot,high,false,P",
"rain,mild,high,false,P",
"rain,cool,normal,false,P",
"rain,cool,normal,true,N",
"overcast,cool,normal,true,P",
"sunny,mild,high,false,N",
"sunny,cool,normal,false,P",
"rain,mild,normal,false,P",
"sunny,mild,normal,true,P",
"overcast,mild,high,true,P",
"overcast,hot,normal,false,P",
"rain,mild,high,true,N")
}
val initialDataSet: RDD[String] = sparkContext.parallelize(dataset)
val initialRDD: TransformableRDD = new TransformableRDD(initialDataSet)
val naiveBayesSubstitution: NaiveBayesSubstitution = new NaiveBayesSubstitution(Array(0, 1, 2, 3))
naiveBayesSubstitution.prepareSubstitute(initialRDD, 4)
var rowRecord: Array[String] = "sunny,cool,high,false".split(",")
val mostProbable: String = {
naiveBayesSubstitution.handleMissingData(new RowRecord(rowRecord))
}
assert("N" == mostProbable)
rowRecord = "rain,hot,high,false".split(",")
assert("N" == naiveBayesSubstitution.handleMissingData(new RowRecord(rowRecord)))
val record: Array[String] = "overcast, hot, high, true".split(",")
assert("P" == naiveBayesSubstitution.handleMissingData(new RowRecord(record)))
}
test("should impute by linear regression") {
val initialDataSet: RDD[String] = {
sparkContext.parallelize(Array("60,3.1", "61,3.6", "62,3.8", "63,4", "65,4.1"))
}
val initialRDD: TransformableRDD = new TransformableRDD(initialDataSet)
val strategy: UnivariateLinearRegressionSubstitution = new UnivariateLinearRegressionSubstitution(0)
strategy.prepareSubstitute(initialRDD, 1)
val record: Array[String] = Array[String]("64")
val expected: String = "4.06"
assert(expected == strategy.handleMissingData(new RowRecord(record)))
val emptyValue: Array[String] = Array[String]("")
val expected1: String = ""
assert(expected1 == strategy.handleMissingData(new RowRecord(emptyValue)))
}
}
|
data-commons/prep-buddy
|
src/test/scala/com/thoughtworks/datacommons/prepbuddy/imputations/ImputationTest.scala
|
Scala
|
apache-2.0
| 8,159 |
// NOTE: commented out in order to avoid scope pollution for typecheckError
// package scala.meta.tests
// package api
import munit._
import org.scalameta.tests._
class PublicSuite extends FunSuite {
test("quasiquotes without import") {
assert(typecheckError("""
q"hello"
""") == "value q is not a member of StringContext")
}
test("quasiquotes without static dialect") {
val currentDialect = scala.meta.Dialect.current.toString
assert(
typecheckError(
"""
import scala.meta._
implicit val dialect: scala.meta.Dialect = ???
q"hello"
"""
) == s"dialect of type scala.meta.Dialect is not supported by quasiquotes (to fix this, import something from scala.meta.dialects, e.g. scala.meta.dialects.${currentDialect})"
)
}
test("quasiquotes when everything's correct") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
q"hello"
""") == "")
}
test("InputLike.parse without import") {
assert(typecheckError("""
"".parse[scala.meta.Term]
""") == "value parse is not a member of String")
}
test("InputLike.parse without input-likeness") {
assert(typecheckError("""
import scala.meta._
1.parse[Term]
""") == "don't know how to convert Int to scala.meta.inputs.Input")
}
test("InputLike.parse without parseability") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
"".parse[Int]
""") == "don't know how to parse into Int")
}
test("InputLike.parse when everything's correct (static dialect)") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
"".parse[Term]
""") == "")
}
test("InputLike.parse when everything's correct (dynamic dialect)") {
assert(typecheckError("""
import scala.meta._
implicit val dialect: scala.meta.Dialect = ???
"".parse[Term]
""") == "")
}
test("InputLike.parse with various input types") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
(??? : Input).parse[Term]
(??? : String).parse[Term]
(??? : java.io.File).parse[Term]
(??? : java.nio.file.Path).parse[Term]
(??? : AbsolutePath).parse[Term]
(??? : Tokens).parse[Term]
(??? : Array[Char]).parse[Term]
""") == "")
}
// NOTE: this works because implicit scope for Scala211 includes meta.`package`
test("Dialect.parse without import") {
assert(typecheckError("""
scala.meta.dialects.Scala211("").parse[scala.meta.Term]
""") == "")
}
test("Dialect.parse without input-likeness") {
assert(typecheckError("""
scala.meta.dialects.Scala211(1).parse[scala.meta.Term]
""") == "don't know how to convert Int to scala.meta.inputs.Input")
}
test("Dialect.parse without parseability") {
assert(typecheckError("""
scala.meta.dialects.Scala211("").parse[Int]
""") == "don't know how to parse into Int")
}
test("Dialect.parse with various input types") {
assert(
typecheckError("""
scala.meta.dialects.Scala211(??? : scala.meta.Input).parse[scala.meta.Term]
scala.meta.dialects.Scala211(??? : String).parse[scala.meta.Term]
scala.meta.dialects.Scala211(??? : java.io.File).parse[scala.meta.Term]
scala.meta.dialects.Scala211(??? : scala.meta.Tokens).parse[scala.meta.Term]
scala.meta.dialects.Scala211(??? : Array[Char]).parse[scala.meta.Term]
""") == ""
)
}
test("tokenize without import") {
assert(typecheckError("""
"".tokenize
""") == "value tokenize is not a member of String")
}
test("tokenize without input-likeness") {
assert(typecheckError("""
import scala.meta._
1.tokenize
""") == "don't know how to convert Int to scala.meta.inputs.Input")
}
test("tokenize when everything's correct (static dialect)") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
"".tokenize
""") == "")
}
test("tokenize when everything's correct (dynamic dialect)") {
assert(typecheckError("""
import scala.meta._
implicit val dialect: scala.meta.Dialect = ???
"".tokenize
""") == "")
}
test("tokenize with various input types") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
(??? : Input).tokenize
(??? : String).tokenize
(??? : java.io.File).tokenize
(??? : Tokens).tokenize
(??? : Array[Char]).tokenize
""") == "")
}
// NOTE: this works because implicit scope for Scala211 includes meta.`package`
test("Dialect.tokenize without import") {
assert(typecheckError("""
scala.meta.dialects.Scala211("").tokenize
""") == "")
}
test("Dialect.tokenize without input-likeness") {
assert(typecheckError("""
scala.meta.dialects.Scala211(1).tokenize
""") == "don't know how to convert Int to scala.meta.inputs.Input")
}
test("Dialect.tokenize when everything's correct") {
assert(typecheckError("""
scala.meta.dialects.Scala211("").tokenize
""") == "")
}
test("Dialect.tokenize with various input types") {
assert(typecheckError("""
scala.meta.dialects.Scala211(??? : scala.meta.Input).tokenize
scala.meta.dialects.Scala211(??? : String).tokenize
scala.meta.dialects.Scala211(??? : java.io.File).tokenize
scala.meta.dialects.Scala211(??? : scala.meta.Tokens).tokenize
scala.meta.dialects.Scala211(??? : Array[Char]).tokenize
""") == "")
}
test("show[Syntax] without import") {
assert(typecheckError("""
(??? : scala.meta.Tree).show[Syntax]
""") == "not found: type Syntax")
}
test("show[Syntax] when everything's correct (static dialect)") {
assert(typecheckError("""
import scala.meta._
import scala.meta.dialects.Scala211
(??? : Tree).show[Syntax]
(??? : Tree).syntax
""") == "")
}
test("show[Syntax] when everything's correct (dynamic dialect)") {
assert(typecheckError("""
import scala.meta._
implicit val dialect: scala.meta.Dialect = ???
(??? : Tree).show[Syntax]
(??? : Tree).syntax
dialect(??? : Tree).syntax
""") == "")
}
test("show[Structure] without import") {
assert(typecheckError("""
(??? : scala.meta.Tree).show[Structure]
""") == "not found: type Structure")
}
test("show[Structure] when everything's correct") {
assert(typecheckError("""
import scala.meta._
(??? : Tree).show[Structure]
(??? : Tree).structure
""") == "")
}
test("Token.is[T] without import") {
assert(typecheckError("""
(??? : scala.meta.Token).is[scala.meta.Token]
(??? : scala.meta.Token).is[scala.meta.Token.Ident]
""") == "")
}
test("Tree.is[T] without import") {
assert(typecheckError("""
(??? : scala.meta.Tree).is[scala.meta.Tree]
(??? : scala.meta.Tree).is[scala.meta.Type]
""") == "")
}
}
|
scalameta/scalameta
|
tests/jvm/src/test/scala/scala/meta/tests/api/PublicSuite.scala
|
Scala
|
bsd-3-clause
| 7,102 |
package ch.descabato.core
import java.util.Objects
import java.util.concurrent.{ExecutorService, Executors}
import akka.actor.{ActorSystem, TypedActor, TypedProps}
import akka.stream.ActorMaterializer
import ch.descabato.core.actors._
import ch.descabato.core.config.BackupFolderConfiguration
import ch.descabato.core.util.FileManager
import ch.descabato.frontend.ProgressReporters
import ch.descabato.remote.{RemoteHandler, SimpleRemoteHandler}
import ch.descabato.utils.Utils
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
class Universe(val config: BackupFolderConfiguration) extends Utils with LifeCycle {
Objects.requireNonNull(config)
implicit val system = ActorSystem("Sys")
implicit val materializer = ActorMaterializer()
private var _finished = false
private var _shutdown = false
val cpuService: ExecutorService = Executors.newFixedThreadPool(Math.min(config.threads, 8))
implicit val ex = ExecutionContext.fromExecutorService(cpuService)
val eventBus = new MyEventBus()
val fileManagerNew = new FileManager(config)
val context = new BackupContext(config, system, fileManagerNew, ex, eventBus)
private val journalHandlerProps: TypedProps[JournalHandler] = TypedProps.apply(classOf[JournalHandler], new SimpleJournalHandler(context))
val journalHandler: JournalHandler = TypedActor(system).typedActorOf(journalHandlerProps.withTimeout(5.minutes))
private val chunkStorageProps: TypedProps[ChunkStorage] = TypedProps.apply(classOf[ChunkStorage], new ChunkStorageActor(context, journalHandler))
private val name = "blockStorageActor"
val chunkStorageActor: ChunkStorage = TypedActor(system).typedActorOf(chunkStorageProps.withTimeout(5.minutes), name)
private val metadataStorageProps: TypedProps[MetadataStorageActor] = TypedProps.apply[MetadataStorageActor](classOf[MetadataStorage], new MetadataStorageActor(context, journalHandler))
val metadataStorageActor: MetadataStorage = TypedActor(system).typedActorOf(metadataStorageProps.withTimeout(5.minutes))
private val compressorProps: TypedProps[Compressor] = TypedProps.apply[Compressor](classOf[Compressor], Compressors(config))
val compressor: Compressor = TypedActor(system).typedActorOf(compressorProps.withTimeout(5.minutes))
context.eventBus.subscribe(MySubscriber(TypedActor(system).getActorRefFor(metadataStorageActor), metadataStorageActor), MyEvent.globalTopic)
val actors = Seq(metadataStorageActor, chunkStorageActor)
val remoteActorOption: Option[RemoteHandler] = {
if (config.remoteOptions.enabled) {
val remoteActorProps: TypedProps[RemoteHandler] = TypedProps.apply[RemoteHandler](classOf[RemoteHandler], new SimpleRemoteHandler(context, journalHandler))
val remoteActor: RemoteHandler = TypedActor(system).typedActorOf(remoteActorProps)
context.eventBus.subscribe(MySubscriber(TypedActor(system).getActorRefFor(remoteActor), remoteActor), MyEvent.globalTopic)
Some(remoteActor)
} else {
None
}
}
override def startup(): Future[Boolean] = {
val allActors = actors ++ remoteActorOption
Future.sequence(allActors.map(_.startup())).map(_.reduce(_ && _))
}
override def finish(): Future[Boolean] = {
if (!_finished) {
waitForNormalActorsToFinish()
waitForRemoteActorToFinish()
_finished = true
}
Future.successful(true)
}
private def waitForNormalActorsToFinish() = {
var actorsToDo: Seq[LifeCycle] = actors
while (actorsToDo.nonEmpty) {
val futures = actorsToDo.map(x => (x, x.finish()))
actorsToDo = Seq.empty
for ((actor, future) <- futures) {
val hasFinished = Await.result(future, 1.minute)
if (!hasFinished) {
logger.info("One actor can not finish yet " + actor)
actorsToDo :+= actor
}
}
Thread.sleep(500)
}
}
private def waitForRemoteActorToFinish() = {
ProgressReporters.activeCounters = Seq(config.remoteOptions.uploaderCounter1)
remoteActorOption match {
case Some(remote) =>
var isFinished = false
do {
isFinished = Await.result(remote.finish(), 1.minute)
if (!isFinished) {
Thread.sleep(1000)
}
} while (!isFinished)
case None =>
// nothing to do
}
}
def shutdown(): Unit = {
if (!_shutdown) {
Await.result(finish(), 1.minute)
journalHandler.finish()
system.terminate()
cpuService.shutdown()
_shutdown = true
}
}
}
|
Stivo/DeScaBaTo
|
core/src/main/scala/ch/descabato/core/Universe.scala
|
Scala
|
gpl-3.0
| 4,535 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.net.URLEncoder
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.JavaConverters._
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.scheduler.StageInfo
import org.apache.spark.ui._
import org.apache.spark.ui.jobs.UIData.StageUIData
import org.apache.spark.util.Utils
private[ui] class StageTableBase(
request: HttpServletRequest,
stages: Seq[StageInfo],
tableHeaderID: String,
stageTag: String,
basePath: String,
subPath: String,
progressListener: JobProgressListener,
isFairScheduler: Boolean,
killEnabled: Boolean,
isFailedStage: Boolean) {
val allParameters = request.getParameterMap().asScala.toMap
val parameterOtherTable = allParameters.filterNot(_._1.startsWith(stageTag))
.map(para => para._1 + "=" + para._2(0))
val parameterStagePage = request.getParameter(stageTag + ".page")
val parameterStageSortColumn = request.getParameter(stageTag + ".sort")
val parameterStageSortDesc = request.getParameter(stageTag + ".desc")
val parameterStagePageSize = request.getParameter(stageTag + ".pageSize")
val parameterStagePrevPageSize = request.getParameter(stageTag + ".prevPageSize")
val stagePage = Option(parameterStagePage).map(_.toInt).getOrElse(1)
val stageSortColumn = Option(parameterStageSortColumn).map { sortColumn =>
UIUtils.decodeURLParameter(sortColumn)
}.getOrElse("Stage Id")
val stageSortDesc = Option(parameterStageSortDesc).map(_.toBoolean).getOrElse(
// New stages should be shown above old jobs by default.
if (stageSortColumn == "Stage Id") true else false
)
val stagePageSize = Option(parameterStagePageSize).map(_.toInt).getOrElse(100)
val stagePrevPageSize = Option(parameterStagePrevPageSize).map(_.toInt)
.getOrElse(stagePageSize)
val page: Int = {
// If the user has changed to a larger page size, then go to page 1 in order to avoid
// IndexOutOfBoundsException.
if (stagePageSize <= stagePrevPageSize) {
stagePage
} else {
1
}
}
val currentTime = System.currentTimeMillis()
val toNodeSeq = try {
new StagePagedTable(
stages,
tableHeaderID,
stageTag,
basePath,
subPath,
progressListener,
isFairScheduler,
killEnabled,
currentTime,
stagePageSize,
stageSortColumn,
stageSortDesc,
isFailedStage,
parameterOtherTable
).table(page)
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
<div class="alert alert-error">
<p>Error while rendering stage table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
}
}
private[ui] class StageTableRowData(
val stageInfo: StageInfo,
val stageData: Option[StageUIData],
val stageId: Int,
val attemptId: Int,
val schedulingPool: String,
val descriptionOption: Option[String],
val submissionTime: Long,
val formattedSubmissionTime: String,
val duration: Long,
val formattedDuration: String,
val inputRead: Long,
val inputReadWithUnit: String,
val outputWrite: Long,
val outputWriteWithUnit: String,
val shuffleRead: Long,
val shuffleReadWithUnit: String,
val shuffleWrite: Long,
val shuffleWriteWithUnit: String)
private[ui] class MissingStageTableRowData(
stageInfo: StageInfo,
stageId: Int,
attemptId: Int) extends StageTableRowData(
stageInfo, None, stageId, attemptId, "", None, 0, "", -1, "", 0, "", 0, "", 0, "", 0, "")
/** Page showing list of all ongoing and recently finished stages */
private[ui] class StagePagedTable(
stages: Seq[StageInfo],
tableHeaderId: String,
stageTag: String,
basePath: String,
subPath: String,
listener: JobProgressListener,
isFairScheduler: Boolean,
killEnabled: Boolean,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean,
isFailedStage: Boolean,
parameterOtherTable: Iterable[String]) extends PagedTable[StageTableRowData] {
override def tableId: String = stageTag + "-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped " +
"table-head-clickable table-cell-width-limited"
override def pageSizeFormField: String = stageTag + ".pageSize"
override def prevPageSizeFormField: String = stageTag + ".prevPageSize"
override def pageNumberFormField: String = stageTag + ".page"
val parameterPath = UIUtils.prependBaseUri(basePath) + s"/$subPath/?" +
parameterOtherTable.mkString("&")
override val dataSource = new StageDataSource(
stages,
listener,
currentTime,
pageSize,
sortColumn,
desc
)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
parameterPath +
s"&$pageNumberFormField=$page" +
s"&$stageTag.sort=$encodedSortColumn" +
s"&$stageTag.desc=$desc" +
s"&$pageSizeFormField=$pageSize" +
s"#$tableHeaderId"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$parameterPath&$stageTag.sort=$encodedSortColumn&$stageTag.desc=$desc#$tableHeaderId"
}
override def headers: Seq[Node] = {
// stageHeadersAndCssClasses has three parts: header title, tooltip information, and sortable.
// The tooltip information could be None, which indicates it does not have a tooltip.
// Otherwise, it has two parts: tooltip text, and position (true for left, false for default).
val stageHeadersAndCssClasses: Seq[(String, Option[(String, Boolean)], Boolean)] =
Seq(("Stage Id", None, true)) ++
{if (isFairScheduler) {Seq(("Pool Name", None, true))} else Seq.empty} ++
Seq(
("Description", None, true), ("Submitted", None, true), ("Duration", None, true),
("Tasks: Succeeded/Total", None, false),
("Input", Some((ToolTips.INPUT, false)), true),
("Output", Some((ToolTips.OUTPUT, false)), true),
("Shuffle Read", Some((ToolTips.SHUFFLE_READ, false)), true),
("Shuffle Write", Some((ToolTips.SHUFFLE_WRITE, true)), true)
) ++
{if (isFailedStage) {Seq(("Failure Reason", None, false))} else Seq.empty}
if (!stageHeadersAndCssClasses.filter(_._3).map(_._1).contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
stageHeadersAndCssClasses.map { case (header, tooltip, sortable) =>
val headerSpan = tooltip.map { case (title, left) =>
if (left) {
/* Place the shuffle write tooltip on the left (rather than the default position
of on top) because the shuffle write column is the last column on the right side and
the tooltip is wider than the column, so it doesn't fit on top. */
<span data-toggle="tooltip" data-placement="left" title={title}>
{header}
</span>
} else {
<span data-toggle="tooltip" title={title}>
{header}
</span>
}
}.getOrElse(
{header}
)
if (header == sortColumn) {
val headerLink = Unparsed(
parameterPath +
s"&$stageTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$stageTag.desc=${!desc}" +
s"&$stageTag.pageSize=$pageSize") +
s"#$tableHeaderId"
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th>
<a href={headerLink}>
{headerSpan}<span>
{Unparsed(arrow)}
</span>
</a>
</th>
} else {
if (sortable) {
val headerLink = Unparsed(
parameterPath +
s"&$stageTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$stageTag.pageSize=$pageSize") +
s"#$tableHeaderId"
<th>
<a href={headerLink}>
{headerSpan}
</a>
</th>
} else {
<th>
{headerSpan}
</th>
}
}
}
}
<thead>{headerRow}</thead>
}
override def row(data: StageTableRowData): Seq[Node] = {
<tr id={"stage-" + data.stageId + "-" + data.attemptId}>
{rowContent(data)}
</tr>
}
private def rowContent(data: StageTableRowData): Seq[Node] = {
data.stageData match {
case None => missingStageRow(data.stageId)
case Some(stageData) =>
val info = data.stageInfo
{if (data.attemptId > 0) {
<td>{data.stageId} (retry {data.attemptId})</td>
} else {
<td>{data.stageId}</td>
}} ++
{if (isFairScheduler) {
<td>
<a href={"%s/stages/pool?poolname=%s"
.format(UIUtils.prependBaseUri(basePath), data.schedulingPool)}>
{data.schedulingPool}
</a>
</td>
} else {
Seq.empty
}} ++
<td>{makeDescription(info, data.descriptionOption)}</td>
<td valign="middle">
{data.formattedSubmissionTime}
</td>
<td>{data.formattedDuration}</td>
<td class="progress-cell">
{UIUtils.makeProgressBar(started = stageData.numActiveTasks,
completed = stageData.completedIndices.size, failed = stageData.numFailedTasks,
skipped = 0, reasonToNumKilled = stageData.reasonToNumKilled, total = info.numTasks)}
</td>
<td>{data.inputReadWithUnit}</td>
<td>{data.outputWriteWithUnit}</td>
<td>{data.shuffleReadWithUnit}</td>
<td>{data.shuffleWriteWithUnit}</td> ++
{
if (isFailedStage) {
failureReasonHtml(info)
} else {
Seq.empty
}
}
}
}
private def failureReasonHtml(s: StageInfo): Seq[Node] = {
val failureReason = s.failureReason.getOrElse("")
val isMultiline = failureReason.indexOf('\\n') >= 0
// Display the first line by default
val failureReasonSummary = StringEscapeUtils.escapeHtml4(
if (isMultiline) {
failureReason.substring(0, failureReason.indexOf('\\n'))
} else {
failureReason
})
val details = if (isMultiline) {
// scalastyle:off
<span onclick="this.parentNode.querySelector('.stacktrace-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span> ++
<div class="stacktrace-details collapsed">
<pre>{failureReason}</pre>
</div>
// scalastyle:on
} else {
""
}
<td valign="middle">{failureReasonSummary}{details}</td>
}
private def makeDescription(s: StageInfo, descriptionOption: Option[String]): Seq[Node] = {
val basePathUri = UIUtils.prependBaseUri(basePath)
val killLink = if (killEnabled) {
val confirm =
s"if (window.confirm('Are you sure you want to kill stage ${s.stageId} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
// SPARK-6846 this should be POST-only but YARN AM won't proxy POST
/*
val killLinkUri = s"$basePathUri/stages/stage/kill/"
<form action={killLinkUri} method="POST" style="display:inline">
<input type="hidden" name="id" value={s.stageId.toString}/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
*/
val killLinkUri = s"$basePathUri/stages/stage/kill/?id=${s.stageId}"
<a href={killLinkUri} onclick={confirm} class="kill-link">(kill)</a>
} else {
Seq.empty
}
val nameLinkUri = s"$basePathUri/stages/stage?id=${s.stageId}&attempt=${s.attemptId}"
val nameLink = <a href={nameLinkUri} class="name-link">{s.name}</a>
val cachedRddInfos = s.rddInfos.filter(_.numCachedPartitions > 0)
val details = if (s.details.nonEmpty) {
<span onclick="this.parentNode.querySelector('.stage-details').classList.toggle('collapsed')"
class="expand-details">
+details
</span> ++
<div class="stage-details collapsed">
{if (cachedRddInfos.nonEmpty) {
Text("RDD: ") ++
cachedRddInfos.map { i =>
<a href={s"$basePathUri/storage/rdd?id=${i.id}"}>{i.name}</a>
}
}}
<pre>{s.details}</pre>
</div>
}
val stageDesc = descriptionOption.map(UIUtils.makeDescription(_, basePathUri))
<div>{stageDesc.getOrElse("")} {killLink} {nameLink} {details}</div>
}
protected def missingStageRow(stageId: Int): Seq[Node] = {
<td>{stageId}</td> ++
{if (isFairScheduler) {<td>-</td>} else Seq.empty} ++
<td>No data available for this stage</td> ++ // Description
<td></td> ++ // Submitted
<td></td> ++ // Duration
<td></td> ++ // Tasks: Succeeded/Total
<td></td> ++ // Input
<td></td> ++ // Output
<td></td> ++ // Shuffle Read
<td></td> // Shuffle Write
}
}
private[ui] class StageDataSource(
stages: Seq[StageInfo],
listener: JobProgressListener,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean) extends PagedDataSource[StageTableRowData](pageSize) {
// Convert StageInfo to StageTableRowData which contains the final contents to show in the table
// so that we can avoid creating duplicate contents during sorting the data
private val data = stages.map(stageRow).sorted(ordering(sortColumn, desc))
private var _slicedStageIds: Set[Int] = _
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[StageTableRowData] = {
val r = data.slice(from, to)
_slicedStageIds = r.map(_.stageId).toSet
r
}
private def stageRow(s: StageInfo): StageTableRowData = {
val stageDataOption = listener.stageIdToData.get((s.stageId, s.attemptId))
if (stageDataOption.isEmpty) {
return new MissingStageTableRowData(s, s.stageId, s.attemptId)
}
val stageData = stageDataOption.get
val description = stageData.description
val formattedSubmissionTime = s.submissionTime match {
case Some(t) => UIUtils.formatDate(new Date(t))
case None => "Unknown"
}
val finishTime = s.completionTime.getOrElse(currentTime)
// The submission time for a stage is misleading because it counts the time
// the stage waits to be launched. (SPARK-10930)
val taskLaunchTimes =
stageData.taskData.values.map(_.taskInfo.launchTime).filter(_ > 0)
val duration: Option[Long] =
if (taskLaunchTimes.nonEmpty) {
val startTime = taskLaunchTimes.min
if (finishTime > startTime) {
Some(finishTime - startTime)
} else {
Some(currentTime - startTime)
}
} else {
None
}
val formattedDuration = duration.map(d => UIUtils.formatDuration(d)).getOrElse("Unknown")
val inputRead = stageData.inputBytes
val inputReadWithUnit = if (inputRead > 0) Utils.bytesToString(inputRead) else ""
val outputWrite = stageData.outputBytes
val outputWriteWithUnit = if (outputWrite > 0) Utils.bytesToString(outputWrite) else ""
val shuffleRead = stageData.shuffleReadTotalBytes
val shuffleReadWithUnit = if (shuffleRead > 0) Utils.bytesToString(shuffleRead) else ""
val shuffleWrite = stageData.shuffleWriteBytes
val shuffleWriteWithUnit = if (shuffleWrite > 0) Utils.bytesToString(shuffleWrite) else ""
new StageTableRowData(
s,
stageDataOption,
s.stageId,
s.attemptId,
stageData.schedulingPool,
description,
s.submissionTime.getOrElse(0),
formattedSubmissionTime,
duration.getOrElse(-1),
formattedDuration,
inputRead,
inputReadWithUnit,
outputWrite,
outputWriteWithUnit,
shuffleRead,
shuffleReadWithUnit,
shuffleWrite,
shuffleWriteWithUnit
)
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[StageTableRowData] = {
val ordering: Ordering[StageTableRowData] = sortColumn match {
case "Stage Id" => Ordering.by(_.stageId)
case "Pool Name" => Ordering.by(_.schedulingPool)
case "Description" => Ordering.by(x => (x.descriptionOption, x.stageInfo.name))
case "Submitted" => Ordering.by(_.submissionTime)
case "Duration" => Ordering.by(_.duration)
case "Input" => Ordering.by(_.inputRead)
case "Output" => Ordering.by(_.outputWrite)
case "Shuffle Read" => Ordering.by(_.shuffleRead)
case "Shuffle Write" => Ordering.by(_.shuffleWrite)
case "Tasks: Succeeded/Total" =>
throw new IllegalArgumentException(s"Unsortable column: $sortColumn")
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
|
MLnick/spark
|
core/src/main/scala/org/apache/spark/ui/jobs/StageTable.scala
|
Scala
|
apache-2.0
| 17,912 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import java.io.{IOException, Closeable, EOFException}
import java.nio.ByteBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.spark.Logging
/**
* A reader for reading write ahead log files written using
* [[org.apache.spark.streaming.util.FileBasedWriteAheadLogWriter]]. This reads
* the records (bytebuffers) in the log file sequentially and return them as an
* iterator of bytebuffers.
*/
private[streaming] class FileBasedWriteAheadLogReader(path: String, conf: Configuration)
extends Iterator[ByteBuffer] with Closeable with Logging {
private val instream = HdfsUtils.getInputStream(path, conf)
private var closed = (instream == null) // the file may be deleted as we're opening the stream
private var nextItem: Option[ByteBuffer] = None
override def hasNext: Boolean = synchronized {
if (closed) {
return false
}
if (nextItem.isDefined) { // handle the case where hasNext is called without calling next
true
} else {
try {
val length = instream.readInt()
val buffer = new Array[Byte](length)
instream.readFully(buffer)
nextItem = Some(ByteBuffer.wrap(buffer))
logTrace("Read next item " + nextItem.get)
true
} catch {
case e: EOFException =>
logDebug("Error reading next item, EOF reached", e)
close()
false
case e: IOException =>
logWarning("Error while trying to read data. If the file was deleted, " +
"this should be okay.", e)
close()
if (HdfsUtils.checkFileExists(path, conf)) {
// If file exists, this could be a legitimate error
throw e
} else {
// File was deleted. This can occur when the daemon cleanup thread takes time to
// delete the file during recovery.
false
}
case e: Exception =>
logWarning("Error while trying to read data from HDFS.", e)
close()
throw e
}
}
}
override def next(): ByteBuffer = synchronized {
val data = nextItem.getOrElse {
close()
throw new IllegalStateException(
"next called without calling hasNext or after hasNext returned false")
}
nextItem = None // Ensure the next hasNext call loads new data.
data
}
override def close(): Unit = synchronized {
if (!closed) {
instream.close()
}
closed = true
}
}
|
chenc10/Spark-PAF
|
streaming/src/main/scala/org/apache/spark/streaming/util/FileBasedWriteAheadLogReader.scala
|
Scala
|
apache-2.0
| 3,299 |
package org.flowpaint.util
/**
*
*
* @author Hans Haggstrom
*/
case class RectangleImpl( x1 : Int, y1 : Int, width : Int, height : Int ) extends Rectangle
|
zzorn/flowpaint
|
src/main/scala/org/flowpaint/util/RectangleImpl.scala
|
Scala
|
gpl-2.0
| 166 |
package org.bowlerframework.persistence
import org.squeryl.{KeyedEntity, Table}
import org.squeryl.PrimitiveTypeMode._
import com.recursivity.commons.bean.{GenericTypeDefinition}
import org.squeryl.dsl.QueryYield
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: 30/01/2011
* Time: 03:50
* To change this template use File | Settings | File Templates.
*/
abstract class SquerylDao[T <: KeyedEntity[K], K](table: Table[T])(implicit m : scala.Predef.Manifest[T], k: Manifest[K]) extends Dao[T,K]{
private val typeString = m.toString.replace("[", "<").replace("]", ">")
private val keyString = k.toString.replace("[", "<").replace("]", ">")
private val typeDef = GenericTypeDefinition(typeString)
private val keyDef = GenericTypeDefinition(keyString)
def entityType = Class.forName(typeDef.clazz).asInstanceOf[Class[T]]
var fieldCls: Class[_] = null
keyDef.clazz match {
case "Long" => fieldCls = classOf[Long]
case "Int" => fieldCls = classOf[java.lang.Integer]
case "Float" => fieldCls = classOf[java.lang.Float]
case "Double" => fieldCls = classOf[java.lang.Double]
case "Boolean" => fieldCls = classOf[Boolean]
case "Short" => fieldCls = classOf[java.lang.Short]
case _ => fieldCls = Class.forName(keyDef.clazz)
}
def keyType = fieldCls.asInstanceOf[Class[K]]
def create(entity: T) = table.insert(entity)
def update(entity: T) = table.update(entity)
def findAll(offset: Int = 0, results: Int = Integer.MAX_VALUE) = from(table)(a => select(a)).page(offset, results).toList
def delete(entity: T) = table.delete(entity.id)
}
|
rkpandey/Bowler
|
persistence-mapper/src/test/scala/org/bowlerframework/persistence/SquerylDao.scala
|
Scala
|
bsd-3-clause
| 1,620 |
package scommons.client.controller
import io.github.shogowada.scalajs.reactjs.router.RouterProps.RouterProps
class RouteParams(props: RouterProps) {
def pathParams: PathParams = PathParams(props.location.pathname)
def allParams: PathParams = {
val location = props.location
PathParams(s"${location.pathname}${location.search}")
}
def push(url: String): Unit = props.history.push(url)
}
|
viktor-podzigun/scommons
|
ui/src/main/scala/scommons/client/controller/RouteParams.scala
|
Scala
|
apache-2.0
| 411 |
package com.rasterfoundry.datamodel
import io.circe._
import cats.syntax.either._
sealed abstract class ObjectType(val repr: String) {
override def toString = repr
}
object ObjectType {
case object Project extends ObjectType("PROJECT")
case object Scene extends ObjectType("SCENE")
case object Datasource extends ObjectType("DATASOURCE")
case object Shape extends ObjectType("SHAPE")
case object Workspace extends ObjectType("WORKSPACE")
case object Template extends ObjectType("TEMPLATE")
case object Analysis extends ObjectType("ANALYSIS")
case object Platform extends ObjectType("PLATFORM")
case object Organization extends ObjectType("ORGANIZATION")
case object Team extends ObjectType("TEAM")
case object User extends ObjectType("USER")
case object Upload extends ObjectType("UPLOAD")
case object Export extends ObjectType("EXPORT")
case object Feed extends ObjectType("FEED")
case object MapToken extends ObjectType("MAPTOKEN")
case object License extends ObjectType("LICENSE")
case object ToolTag extends ObjectType("TOOLTAG")
case object ToolCategory extends ObjectType("TOOLCATEGORY")
case object AOI extends ObjectType("AOI")
def fromString(s: String): ObjectType = s.toUpperCase match {
case "PROJECT" => Project
case "SCENE" => Scene
case "DATASOURCE" => Datasource
case "SHAPE" => Shape
case "WORKSPACE" => Workspace
case "TEMPLATE" => Template
case "ANALYSIS" => Analysis
case "PLATFORM" => Platform
case "ORGANIZATION" => Organization
case "TEAM" => Team
case "USER" => User
case "UPLOAD" => Upload
case "EXPORT" => Export
case "FEED" => Feed
case "MAPTOKEN" => MapToken
case "LICENSE" => License
case "TOOLTAG" => ToolTag
case "TOOLCATEGORY" => ToolCategory
case "AOI" => AOI
case _ => throw new Exception(s"Invalid ObjectType: ${s}")
}
implicit val ObjectTypeEncoder: Encoder[ObjectType] =
Encoder.encodeString.contramap[ObjectType](_.toString)
implicit val ObjectTypeDecoder: Decoder[ObjectType] =
Decoder.decodeString.emap { s =>
Either.catchNonFatal(fromString(s)).leftMap(_ => "ObjectType")
}
implicit val objectTypeKeyDecoder: KeyDecoder[ObjectType] =
new KeyDecoder[ObjectType] {
override def apply(key: String): Option[ObjectType] =
Some(ObjectType.fromString(key))
}
implicit val objectTypeKeyEncoder: KeyEncoder[ObjectType] =
new KeyEncoder[ObjectType] {
override def apply(objectType: ObjectType): String = objectType.toString
}
}
|
azavea/raster-foundry
|
app-backend/datamodel/src/main/scala/ObjectType.scala
|
Scala
|
apache-2.0
| 2,660 |
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Workflow Process entity
* @param workflowProcessId Wf Process ID
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param workFlowId Work Flow
* @param workflowResponsiveId Wf Responsive ID
* @param userId User ID
* @param workflowState Wf State
* @param messageId Message ID
* @param processing Processing
* @param processed Processed
* @param textMsg Text Msg
* @param entityId Entity ID
* @param recordId Record ID
* @param priority Priority
* @param uuid UUID
*/
case class WorkflowProcess(workflowProcessId: Int,
tenantId: Int,
organizationId : Int ,
isActive: Boolean = true,
created: DateTime = DateTime.now,
createdBy: Int,
updated: DateTime = DateTime.now,
updatedBy: Int,
workFlowId: Int,
workflowResponsiveId: Int,
userId: Option[Int],
workflowState: String,
messageId: Option[Int],
processing: Option[Boolean],
processed: Boolean = false,
textMsg: Option[String],
entityId: Int,
recordId: Int,
priority: Option[Int],
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = workflowProcessId
override val entityName: String = "AD_Wf_Process"
override val identifier: String = "AD_Wf_Process_ID"
}
object WorkflowProcess {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[WorkflowProcess]
def create(workflowProcessId: Int,
tenantId: Int,
organizationId : Int ,
isActive: Boolean,
created: DateTime,
createdBy: Int,
updated: DateTime,
updatedBy: Int,
workFlowId: Int,
workflowResponsiveId: Int,
userId: Int,
workflowState: String,
messageId: Int,
processing: Boolean,
processed: Boolean,
textMsg: String,
entityId: Int,
recordId: Int,
priority: Int,
uuid: String) = WorkflowProcess(workflowProcessId, tenantId, organizationId, isActive, created,
createdBy, updated, updatedBy, workFlowId, workflowResponsiveId, None, workflowState, None, None,
processed, None, entityId, recordId, None, uuid)
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/WorkflowProcess.scala
|
Scala
|
gpl-3.0
| 4,140 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.codecs.enumeratum.values
import kantan.codecs.enumeratum.laws.discipline.EnumeratedInt
import kantan.codecs.enumeratum.laws.discipline.arbitrary._
import kantan.codecs.laws.discipline.{StringCodecTests, StringDecoderTests, StringEncoderTests}
import kantan.codecs.laws.discipline.DisciplineSuite
class IntEnumCodecTests extends DisciplineSuite {
checkAll("StringDecoder[EnumeratedInt]", StringDecoderTests[EnumeratedInt].decoder[Int, Int])
checkAll("StringEncoder[EnumeratedInt]", StringEncoderTests[EnumeratedInt].encoder[Int, Int])
checkAll("StringCodec[EnumeratedInt]", StringCodecTests[EnumeratedInt].codec[Int, Int])
}
|
nrinaudo/kantan.codecs
|
enumeratum/core/shared/src/test/scala/kantan/codecs/enumeratum/values/IntEnumCodecTests.scala
|
Scala
|
apache-2.0
| 1,249 |
package com.github.projectflink.spark
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.zookeeper.KeeperException.SystemErrorException
import _root_.scala.collection.mutable
object Pagerank {
def main(args: Array[String]): Unit = {
val master = args(0)
val numVertices = args(1).toInt
val sparsity = args(2).toDouble
val maxIterations = args(3).toInt
val output = if(args.length > 4){
args(4)
}else{
null
}
val input = args(5)
val dop = args(6).toInt
val dampingFactor = 0.85
val threshold: Double = 0.005 / numVertices
val conf = new SparkConf().setAppName("Spark pagerank").setMaster(master)
conf.set("spark.hadoop.skipOutputChecks", "false")
implicit val sc = new SparkContext(conf)
val inData : RDD[String] = sc.textFile(input.toString)
val adjacencyMatrix = inData.repartition(dop).map{ line =>
val sp = line.split(" ").map(_.toInt)
(sp(0), sp.tail)
}
val adjacencyMatrixCached = adjacencyMatrix.cache();
var inPagerank = adjacencyMatrixCached.map { tup =>
(tup._1, 1.0/numVertices)
}
var i = 0
var terminated = false;
while( i < maxIterations && !terminated) {
i = i + 1
System.out.println("++++ Starting next iteration");
val outPagerank = inPagerank.join(adjacencyMatrixCached, dop).flatMap {
case (node, (rank, neighboursIt)) => {
val neighbours = neighboursIt.toSeq
neighbours.map {
(_, dampingFactor * rank / neighbours.length)
} :+ (node, (1 - dampingFactor) / numVertices)
}
}.reduceByKey(_ + _, dop)
//compute termination criterion
val count = outPagerank.join(inPagerank).flatMap {
case (node, (r1, r2)) => {
val delta = Math.abs(r1 - r2);
if(delta > threshold) {
Some(1)
}else{
None
}
}
}.count()
print("count = "+count+" at iteration "+i)
if(count == 0) {
terminated = true;
}
// set new inPr
inPagerank = outPagerank
}
// inPagerank is the outPageRank at this point.
if(output != null) {
inPagerank.saveAsTextFile(output+"_spark")
}else{
inPagerank.foreach(println _)
}
}
}
|
dataArtisans/performance
|
spark-jobs/src/main/scala/com/github/projectflink/spark/Pagerank.scala
|
Scala
|
apache-2.0
| 2,375 |
class Hello2 { def speak: String = "Hello" }
class Hi2 extends Hello2 {
override def speak: String = "Hi"
def hi: String = speak
}
class Speak2(override val speak: String) extends Hello2
|
grzegorzbalcerek/scala-book-examples
|
examples/Extending2.scala
|
Scala
|
mit
| 191 |
package com.thetestpeople.trt.analysis
import com.thetestpeople.trt.model._
import com.thetestpeople.trt.service.Clock
import com.thetestpeople.trt.utils.HasLogger
import com.thetestpeople.trt.utils.Utils
import com.thetestpeople.trt.utils.LockUtils._
import com.thetestpeople.trt.utils.CoalescingBlockingQueue
import java.util.concurrent._
import java.util.concurrent.locks.ReentrantLock
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.locks.Lock
/**
* @param async -- if true, process analysis asynchronously on background worker threads. If false, perform the analysis
* immediately when scheduled (useful for predictable testing).
*/
class AnalysisService(dao: Dao, clock: Clock, async: Boolean = true) extends HasLogger {
private val analysisResultLock: Lock = new ReentrantLock
private var historicalTestCountsByConfig: Map[Configuration, HistoricalTestCountsTimeline] = Map()
private var executionVolumeAnalysisResultOpt: Option[ExecutionVolumeAnalysisResult] = None
/**
* Queue of tests which need their analysis updating
*/
private val testQueue: CoalescingBlockingQueue[Id[Test]] = new CoalescingBlockingQueue
private def launchAnalyserThread() {
new Thread(new Runnable() {
def run() =
while (true)
handleOneQueueItem()
}).start()
}
if (async)
launchAnalyserThread()
private def handleOneQueueItem() {
val testId = testQueue.take()
// logger.debug("Remaining # of tests to analyse: " + testQueue.size)
try
analyseTest(testId)
catch {
case e: Exception ⇒
logger.error(s"Problem analysing test $testId, skipping", e)
}
}
def scheduleAnalysis(testIds: Seq[Id[Test]]) =
if (async)
testIds.foreach(testQueue.offer)
else
testIds.foreach(analyseTest)
def analyseTest(testId: Id[Test]) = dao.transaction {
val testAnalyser = new TestAnalyser(clock, dao.getSystemConfiguration)
val executions = dao.getExecutionsForTest(testId)
for {
(configuration, executionsForConfig) ← executions.groupBy(_.configuration)
analysis ← testAnalyser.analyse(executionsForConfig)
} updateAnalysis(testId, configuration, analysis)
}
private def updateAnalysis(testId: Id[Test], configuration: Configuration, analysis: TestAnalysis) {
dao.upsertAnalysis(Analysis(
testId = testId,
configuration = configuration,
status = analysis.status,
weather = analysis.weather,
consecutiveFailures = analysis.consecutiveFailures,
failingSinceOpt = analysis.failingSinceOpt,
lastPassedExecutionIdOpt = analysis.lastPassedExecutionOpt.map(_.id),
lastPassedTimeOpt = analysis.lastPassedExecutionOpt.map(_.executionTime),
lastFailedExecutionIdOpt = analysis.lastFailedExecutionOpt.map(_.id),
lastFailedTimeOpt = analysis.lastFailedExecutionOpt.map(_.executionTime),
whenAnalysed = analysis.whenAnalysed,
medianDurationOpt = analysis.medianDurationOpt,
lastSummaryOpt = analysis.lastSummaryOpt))
logger.debug(s"Updated analysis for test $testId")
}
private def getHistoricalTestAnalyser() = {
val systemConfiguration = dao.getSystemConfiguration
val executionIntervalsByConfig = dao.getExecutionIntervalsByConfig
new HistoricalTestAnalyser(executionIntervalsByConfig, systemConfiguration)
}
def analyseAllExecutions() = dao.transaction {
val executionVolumeAnalyser = new ExecutionVolumeAnalyser
val historicalTestAnalyser = getHistoricalTestAnalyser()
dao.iterateAllExecutions { executions ⇒
for (executionGroup ← new ExecutionGroupIterator(executions)) {
historicalTestAnalyser.executionGroup(executionGroup)
executionVolumeAnalyser.executionGroup(executionGroup)
}
}
analysisResultLock.withLock {
historicalTestCountsByConfig = historicalTestAnalyser.finalise
executionVolumeAnalysisResultOpt = Some(executionVolumeAnalyser.finalise)
}
}
def deleteAll() = analysisResultLock.withLock {
logger.debug("Clearing analysis results")
historicalTestCountsByConfig = Map()
executionVolumeAnalysisResultOpt = None
}
def clearHistoricalTestCounts() = analysisResultLock.withLock {
historicalTestCountsByConfig = Map()
}
def getAllHistoricalTestCounts: AllHistoricalTestCounts = analysisResultLock.withLock {
new AllHistoricalTestCounts(historicalTestCountsByConfig)
}
def getExecutionVolume(configurationOpt: Option[Configuration]): Option[ExecutionVolume] = analysisResultLock.withLock {
for {
analysisResult ← executionVolumeAnalysisResultOpt
volume ← analysisResult.getExecutionVolume(configurationOpt)
if volume.countsByDate.nonEmpty
} yield volume
}
}
|
thetestpeople/trt
|
app/com/thetestpeople/trt/analysis/AnalysisService.scala
|
Scala
|
mit
| 4,772 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io._
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.RDDCheckpointData
import org.apache.spark.util.{MetadataCleanerType, MetadataCleaner, TimeStampedHashMap}
private[spark] object ResultTask {
// A simple map between the stage id to the serialized byte array of a task.
// Served as a cache for task serialization because serialization can be
// expensive on the master node if it needs to launch thousands of tasks.
val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]]
val metadataCleaner = new MetadataCleaner(MetadataCleanerType.RESULT_TASK, serializedInfoCache.clearOldValues)
def serializeInfo(stageId: Int, rdd: RDD[_], func: (TaskContext, Iterator[_]) => _): Array[Byte] = {
synchronized {
val old = serializedInfoCache.get(stageId).orNull
if (old != null) {
old
} else {
val out = new ByteArrayOutputStream
val ser = SparkEnv.get.closureSerializer.newInstance()
val objOut = ser.serializeStream(new GZIPOutputStream(out))
objOut.writeObject(rdd)
objOut.writeObject(func)
objOut.close()
val bytes = out.toByteArray
serializedInfoCache.put(stageId, bytes)
bytes
}
}
}
def deserializeInfo(stageId: Int, bytes: Array[Byte]): (RDD[_], (TaskContext, Iterator[_]) => _) = {
val loader = Thread.currentThread.getContextClassLoader
val in = new GZIPInputStream(new ByteArrayInputStream(bytes))
val ser = SparkEnv.get.closureSerializer.newInstance()
val objIn = ser.deserializeStream(in)
val rdd = objIn.readObject().asInstanceOf[RDD[_]]
val func = objIn.readObject().asInstanceOf[(TaskContext, Iterator[_]) => _]
(rdd, func)
}
def clearCache() {
synchronized {
serializedInfoCache.clear()
}
}
}
/**
* A task that sends back the output to the driver application.
*
* See [[org.apache.spark.scheduler.Task]] for more information.
*
* @param stageId id of the stage this task belongs to
* @param rdd input to func
* @param func a function to apply on a partition of the RDD
* @param _partitionId index of the number in the RDD
* @param locs preferred task execution locations for locality scheduling
* @param outputId index of the task in this job (a job can launch tasks on only a subset of the
* input RDD's partitions).
*/
private[spark] class ResultTask[T, U](
stageId: Int,
var rdd: RDD[T],
var func: (TaskContext, Iterator[T]) => U,
_partitionId: Int,
@transient locs: Seq[TaskLocation],
var outputId: Int)
extends Task[U](stageId, _partitionId) with Externalizable {
def this() = this(0, null, null, 0, null, 0)
var split = if (rdd == null) null else rdd.partitions(partitionId)
@transient private val preferredLocs: Seq[TaskLocation] = {
if (locs == null) Nil else locs.toSet.toSeq
}
override def runTask(context: TaskContext): U = {
metrics = Some(context.taskMetrics)
try {
func(context, rdd.iterator(split, context))
} finally {
context.executeOnCompleteCallbacks()
}
}
override def preferredLocations: Seq[TaskLocation] = preferredLocs
override def toString = "ResultTask(" + stageId + ", " + partitionId + ")"
override def writeExternal(out: ObjectOutput) {
RDDCheckpointData.synchronized {
split = rdd.partitions(partitionId)
out.writeInt(stageId)
val bytes = ResultTask.serializeInfo(
stageId, rdd, func.asInstanceOf[(TaskContext, Iterator[_]) => _])
out.writeInt(bytes.length)
out.write(bytes)
out.writeInt(partitionId)
out.writeInt(outputId)
out.writeLong(epoch)
out.writeObject(split)
}
}
override def readExternal(in: ObjectInput) {
val stageId = in.readInt()
val numBytes = in.readInt()
val bytes = new Array[Byte](numBytes)
in.readFully(bytes)
val (rdd_, func_) = ResultTask.deserializeInfo(stageId, bytes)
rdd = rdd_.asInstanceOf[RDD[T]]
func = func_.asInstanceOf[(TaskContext, Iterator[T]) => U]
partitionId = in.readInt()
outputId = in.readInt()
epoch = in.readLong()
split = in.readObject().asInstanceOf[Partition]
}
}
|
mkolod/incubator-spark
|
core/src/main/scala/org/apache/spark/scheduler/ResultTask.scala
|
Scala
|
apache-2.0
| 5,117 |
package com.twitter.scalding
import com.twitter.algebird.Semigroup
object ExecutionUtil {
/**
* Generate a list of executions from a date range
*
* @param duration
* Duration to split daterange
* @param fn
* Function to run a execution given a date range
* @return
* Sequence of Executions per Day
*/
def executionsFromDates[T](duration: Duration)(fn: DateRange => Execution[T])(implicit
dr: DateRange
): Seq[Execution[T]] =
dr.each(duration).map(fn).toSeq
/**
* Split a DateRange and allow for max parallel running of executions
*
* @param duration
* Duration to split daterange
* @param parallelism
* How many jobs to run in parallel
* @param fn
* Function to run a execution given a date range
* @return
* Seq of Dates split by Duration with corresponding execution result
*/
def runDatesWithParallelism[T](duration: Duration, parallelism: Int = 1)(
fn: DateRange => Execution[T]
)(implicit dr: DateRange): Execution[Seq[(DateRange, T)]] = {
val dates = dr.each(duration).toSeq
Execution.withParallelism(dates.map(fn), parallelism).map(e => dates.zip(e))
}
/**
* Split a DateRange and allow for max parallel running of executions
*
* @param duration
* Duration to split daterange
* @param parallelism
* How many jobs to run in parallel
* @param fn
* Function to run a execution given a date range
* @return
* Execution of Sequences
*/
def runDateRangeWithParallelism[T](duration: Duration, parallelism: Int = 1)(fn: DateRange => Execution[T])(
implicit dr: DateRange
): Execution[Seq[T]] =
runDatesWithParallelism(duration, parallelism)(fn).map(_.map { case (_, t) => t })
/**
* Same as runDateRangeWithParallelism, but sums the sequence of values after running. This is useful when
* you want to do a calculation in parallel over many durations and join the results together.
*
* For example, a common use case is when T is a TypedPipe[U] and you want to independently compute the
* pipes on each day and union them into a single TypedPipe at the end.
*
* Another possible use case would be if the executions were created by summing intermediate monoids (e.g. T
* was a Map[String,HLL] since algebird supports monoids for maps and hll) and you wanted to do a final
* aggregation of the Monoids computed for each duration.
*/
def runDateRangeWithParallelismSum[T](duration: Duration, parallelism: Int = 1)(
fn: DateRange => Execution[T]
)(implicit dr: DateRange, semigroup: Semigroup[T]): Execution[T] = {
require(dr.each(duration).nonEmpty, "Date Range can not be empty")
runDateRangeWithParallelism(duration, parallelism)(fn)(dr)
.map(_.reduceLeft[T] { case (l, r) => Semigroup.plus(l, r) })
}
}
|
twitter/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/ExecutionUtil.scala
|
Scala
|
apache-2.0
| 2,836 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.hadoop
import config.{MailConfig, WorstRuntimesPerTraceClientConfig}
import email.{Email, EmailContent}
import com.twitter.logging.Logger
/**
* Runs all the jobs which write to file on the input, and sends those as emails.
* The arguments are expected to be inputdirname servicenamefile
*/
object PostprocessWriteToFile {
val jobList = List(("WorstRuntimesPerTrace", (new WorstRuntimesPerTraceClientConfig())()),
("Timeouts", new TimeoutsClient()),
("Retries", new RetriesClient()),
("MemcacheRequest", new MemcacheRequestClient()),
("ExpensiveEndpoints", new ExpensiveEndpointsClient()))
def main(args: Array[String]) {
val input = args(0)
val serviceNames = args(1)
val output = if (args.length < 3) null else args(2)
EmailContent.populateServiceNames(serviceNames)
for (jobTuple <- jobList) {
val (jobName, jobClient) = jobTuple
jobClient.start(input + "/" + jobName, output)
}
if (output != null) {
EmailContent.setOutputDir(output)
EmailContent.writeAll()
}
val serviceToEmail = EmailContent.writeAllAsStrings()
for (tuple <- serviceToEmail) {
val (service, content) = tuple
EmailContent.getEmailAddress(service) match {
case Some(addresses) => addresses.foreach {address => (new MailConfig())().send(new Email(address, "Service Report for " + service, content))}
}
}
}
}
//TODO: Replace (or supplement) this with one main method that runs all jobs
/**
* Runs the PopularKeysClient on the input
*/
object ProcessPopularKeys {
def main(args : Array[String]) {
val portNumber = augmentString(args(2)).toInt
EmailContent.populateServiceNames(args(0))
val c = new PopularKeyValuesClient(portNumber)
c.start(args(0), args(1))
}
}
/**
* Runs the PopularAnnotationsClient on the input
*/
object ProcessPopularAnnotations {
def main(args : Array[String]) {
val portNumber = augmentString(args(2)).toInt
EmailContent.populateServiceNames(args(0))
val c = new PopularAnnotationsClient(portNumber)
c.start(args(0), args(1))
}
}
/**
* Runs the MemcacheRequestClient on the input
*/
object ProcessMemcacheRequest {
def main(args : Array[String]) {
EmailContent.populateServiceNames(args(0))
val c = new MemcacheRequestClient()
c.start(args(0), args(1))
EmailContent.writeAll()
}
}
/**
* Runs the TimeoutsClient on the input
*/
object ProcessTimeouts {
def main(args : Array[String]) {
EmailContent.populateServiceNames(args(0))
val c = new TimeoutsClient()
c.start(args(0), args(1))
EmailContent.writeAll()
}
}
/**
* Runs the ExpensiveEndpointsClient on the input
*/
object ProcessExpensiveEndpoints {
def main(args: Array[String]) {
EmailContent.populateServiceNames(args(0))
val c = new ExpensiveEndpointsClient()
c.start(args(0), args(1))
EmailContent.writeAll()
}
}
object ProcessWorstRuntimesPerTrace {
def main(args: Array[String]) {
EmailContent.populateServiceNames(args(0))
val c = (new WorstRuntimesPerTraceClientConfig()).apply()
c.start(args(0), args(1))
EmailContent.writeAll()
}
}
|
devcamcar/zipkin
|
zipkin-hadoop-job-runner/src/main/scala/com/twitter/zipkin/hadoop/Postprocess.scala
|
Scala
|
apache-2.0
| 3,842 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.nio.charset.StandardCharsets.UTF_8
import java.util.Collections
import java.util.concurrent.CountDownLatch
import scala.collection.mutable
import scala.util.{Success, Try}
import org.apache.commons.io.FileUtils
import org.apache.commons.lang3.RandomStringUtils
import org.apache.hadoop.fs.Path
import org.mockito.Mockito.when
import org.scalactic.TolerantNumerics
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark.{SparkException, TestUtils}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, Column, DataFrame, Dataset, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Literal, Rand, Randn, Shuffle, Uuid}
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes.Complete
import org.apache.spark.sql.connector.read.InputPartition
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2}
import org.apache.spark.sql.execution.exchange.ReusedExchangeExec
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{MemorySink, TestForeachWriter}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.{BlockingSource, MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging with MockitoSugar {
import AwaitTerminationTester._
import testImplicits._
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
sqlContext.streams.active.foreach(_.stop())
}
test("name unique in active queries") {
withTempDir { dir =>
def startQuery(name: Option[String]): StreamingQuery = {
val writer = MemoryStream[Int].toDS.writeStream
name.foreach(writer.queryName)
writer
.foreach(new TestForeachWriter)
.start()
}
// No name by default, multiple active queries can have no name
val q1 = startQuery(name = None)
assert(q1.name === null)
val q2 = startQuery(name = None)
assert(q2.name === null)
// Can be set by user
val q3 = startQuery(name = Some("q3"))
assert(q3.name === "q3")
// Multiple active queries cannot have same name
val e = intercept[IllegalArgumentException] {
startQuery(name = Some("q3"))
}
q1.stop()
q2.stop()
q3.stop()
}
}
test(
"id unique in active queries + persists across restarts, runId unique across start/restarts") {
val inputData = MemoryStream[Int]
withTempDir { dir =>
var cpDir: String = null
def startQuery(restart: Boolean): StreamingQuery = {
if (cpDir == null || !restart) cpDir = s"$dir/${RandomStringUtils.randomAlphabetic(10)}"
MemoryStream[Int].toDS().groupBy().count()
.writeStream
.format("memory")
.outputMode("complete")
.queryName(s"name${RandomStringUtils.randomAlphabetic(10)}")
.option("checkpointLocation", cpDir)
.start()
}
// id and runId unique for new queries
val q1 = startQuery(restart = false)
val q2 = startQuery(restart = false)
assert(q1.id !== q2.id)
assert(q1.runId !== q2.runId)
q1.stop()
q2.stop()
// id persists across restarts, runId unique across restarts
val q3 = startQuery(restart = false)
q3.stop()
val q4 = startQuery(restart = true)
q4.stop()
assert(q3.id === q3.id)
assert(q3.runId !== q4.runId)
// Only one query with same id can be active
withSQLConf(SQLConf.STREAMING_STOP_ACTIVE_RUN_ON_RESTART.key -> "false") {
val q5 = startQuery(restart = false)
val e = intercept[IllegalStateException] {
startQuery(restart = true)
}
}
}
}
testQuietly("isActive, exception, and awaitTermination") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive),
AssertOnQuery(_.exception.isEmpty),
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
TestAwaitTermination(ExpectBlocked),
TestAwaitTermination(ExpectBlocked, timeoutMs = 2000),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = false),
StopStream,
AssertOnQuery(_.isActive === false),
AssertOnQuery(_.exception.isEmpty),
TestAwaitTermination(ExpectNotBlocked),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 2000, expectedReturnValue = true),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = true),
StartStream(),
AssertOnQuery(_.isActive),
AddData(inputData, 0),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
TestAwaitTermination(ExpectException[SparkException]),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 2000),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 10),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("OneTime trigger, commit log, and exception") {
import Trigger.Once
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive),
StopStream,
AddData(inputData, 1, 2),
StartStream(trigger = Once),
CheckAnswer(6, 3),
StopStream, // clears out StreamTest state
AssertOnQuery { q =>
// both commit log and offset log contain the same (latest) batch id
q.commitLog.getLatest().map(_._1).getOrElse(-1L) ==
q.offsetLog.getLatest().map(_._1).getOrElse(-2L)
},
AssertOnQuery { q =>
// blow away commit log and sink result
q.commitLog.purge(1)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(trigger = Once),
CheckAnswer(6, 3), // ensure we fall back to offset log and reprocess batch
StopStream,
AddData(inputData, 3),
StartStream(trigger = Once),
CheckLastBatch(2), // commit log should be back in place
StopStream,
AddData(inputData, 0),
StartStream(trigger = Once),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("status, lastProgress, and recentProgress") {
import StreamingQuerySuite._
clock = new StreamManualClock
/** Custom MemoryStream that waits for manual clock to reach a time */
val inputData = new MemoryStream[Int](0, sqlContext) {
private def dataAdded: Boolean = currentOffset.offset != -1
// latestOffset should take 50 ms the first time it is called after data is added
override def latestOffset(): OffsetV2 = synchronized {
if (dataAdded) clock.waitTillTime(1050)
super.latestOffset()
}
// getBatch should take 100 ms the first time it is called
override def planInputPartitions(start: OffsetV2, end: OffsetV2): Array[InputPartition] = {
synchronized {
clock.waitTillTime(1150)
super.planInputPartitions(start, end)
}
}
}
// query execution should take 350 ms the first time it is called
val mapped = inputData.toDS.coalesce(1).as[Long].map { x =>
clock.waitTillTime(1500) // this will only wait the first time when clock < 1500
10 / x
}.agg(count("*")).as[Long]
case class AssertStreamExecThreadIsWaitingForTime(targetTime: Long)
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingFor(targetTime))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "") {
override def toString: String = s"AssertStreamExecThreadIsWaitingForTime($targetTime)"
}
case class AssertClockTime(time: Long)
extends AssertOnQuery(q => clock.getTimeMillis() === time, "") {
override def toString: String = s"AssertClockTime($time)"
}
var lastProgressBeforeStop: StreamingQueryProgress = null
testStream(mapped, OutputMode.Complete)(
StartStream(Trigger.ProcessingTime(1000), triggerClock = clock),
AssertStreamExecThreadIsWaitingForTime(1000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress when `latestOffset` is being called
AddData(inputData, 1, 2),
AdvanceManualClock(1000), // time = 1000 to start new trigger, will block on `latestOffset`
AssertStreamExecThreadIsWaitingForTime(1050),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message.startsWith("Getting offsets from")),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
AdvanceManualClock(50), // time = 1050 to unblock `latestOffset`
AssertClockTime(1050),
// will block on `planInputPartitions` that needs 1350
AssertStreamExecThreadIsWaitingForTime(1150),
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
AdvanceManualClock(100), // time = 1150 to unblock `planInputPartitions`
AssertClockTime(1150),
AssertStreamExecThreadIsWaitingForTime(1500), // will block on map task that needs 1500
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch processing has completed
AdvanceManualClock(350), // time = 1500 to unblock map task
AssertClockTime(1500),
CheckAnswer(2),
AssertStreamExecThreadIsWaitingForTime(2000), // will block until the next trigger
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.lastProgress != null)
assert(query.recentProgress.exists(_.numInputRows > 0))
assert(query.recentProgress.last.eq(query.lastProgress))
val progress = query.lastProgress
assert(progress.id === query.id)
assert(progress.name === query.name)
assert(progress.batchId === 0)
assert(progress.timestamp === "1970-01-01T00:00:01.000Z") // 100 ms in UTC
assert(progress.numInputRows === 2)
assert(progress.processedRowsPerSecond === 4.0)
assert(progress.durationMs.get("latestOffset") === 50)
assert(progress.durationMs.get("queryPlanning") === 100)
assert(progress.durationMs.get("walCommit") === 0)
assert(progress.durationMs.get("addBatch") === 350)
assert(progress.durationMs.get("triggerExecution") === 500)
assert(progress.sources.length === 1)
assert(progress.sources(0).description contains "MemoryStream")
assert(progress.sources(0).startOffset === null) // no prior offset
assert(progress.sources(0).endOffset === "0")
assert(progress.sources(0).processedRowsPerSecond === 4.0) // 2 rows processed in 500 ms
assert(progress.stateOperators.length === 1)
assert(progress.stateOperators(0).numRowsUpdated === 1)
assert(progress.stateOperators(0).numRowsTotal === 1)
assert(progress.sink.description contains "MemorySink")
true
},
// Test whether input rate is updated after two batches
AssertStreamExecThreadIsWaitingForTime(2000), // blocked waiting for next trigger time
AddData(inputData, 1, 2),
AdvanceManualClock(500), // allow another trigger
AssertClockTime(2000),
AssertStreamExecThreadIsWaitingForTime(3000), // will block waiting for next trigger time
CheckAnswer(4),
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.recentProgress.last.eq(query.lastProgress))
assert(query.lastProgress.batchId === 1)
assert(query.lastProgress.inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).startOffset === "0")
assert(query.lastProgress.sources(0).endOffset === "1")
true
},
// Test status and progress after data is not available for a trigger
AdvanceManualClock(1000), // allow another trigger
AssertStreamExecThreadIsWaitingForTime(4000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
// Test status and progress after query stopped
AssertOnQuery { query =>
lastProgressBeforeStop = query.lastProgress
true
},
StopStream,
AssertOnQuery(_.lastProgress.json === lastProgressBeforeStop.json),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Stopped"),
// Test status and progress after query terminated with error
StartStream(Trigger.ProcessingTime(1000), triggerClock = clock),
AdvanceManualClock(1000), // ensure initial trigger completes before AddData
AddData(inputData, 0),
AdvanceManualClock(1000), // allow another trigger
ExpectFailure[SparkException](),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message.startsWith("Terminated with exception"))
)
}
test("lastProgress should be null when recentProgress is empty") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
// Creating source is blocked so recentProgress is empty and lastProgress should be null
assert(sq.lastProgress === null)
// Release the latch and stop the query
BlockingSource.latch.countDown()
sq.stop()
}
}
test("codahale metrics") {
val inputData = MemoryStream[Int]
/** Whether metrics of a query is registered for reporting */
def isMetricsRegistered(query: StreamingQuery): Boolean = {
val sourceName = s"spark.streaming.${query.id}"
val sources = spark.sparkContext.env.metricsSystem.getSourcesByName(sourceName)
require(sources.size <= 1)
sources.nonEmpty
}
// Disabled by default
assert(spark.conf.get(SQLConf.STREAMING_METRICS_ENABLED.key).toBoolean === false)
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "false") {
testStream(inputData.toDF)(
AssertOnQuery { q => !isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
// Registered when enabled
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "true") {
testStream(inputData.toDF)(
AssertOnQuery { q => isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
}
test("SPARK-22975: MetricsReporter defaults when there was no progress reported") {
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "true") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
val gauges = sq.streamMetrics.metricRegistry.getGauges
assert(gauges.get("latency").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("processingRate-total").getValue.asInstanceOf[Double] == 0.0)
assert(gauges.get("inputRate-total").getValue.asInstanceOf[Double] == 0.0)
assert(gauges.get("eventTime-watermark").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("states-rowsTotal").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("states-usedBytes").getValue.asInstanceOf[Long] == 0)
sq.stop()
}
}
}
test("SPARK-37147: MetricsReporter does not fail when durationMs is empty") {
val stateOpProgressMock = mock[StreamingQueryProgress]
when(stateOpProgressMock.durationMs).thenReturn(Collections.emptyMap[String, java.lang.Long]())
val streamExecMock = mock[StreamExecution]
when(streamExecMock.lastProgress).thenReturn(stateOpProgressMock)
val gauges = new MetricsReporter(streamExecMock, "").metricRegistry.getGauges()
assert(Try(gauges.get("latency").getValue) == Success(0L))
}
test("input row calculation with same V1 source used twice in self-join") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val progress = getStreamingQuery(streamingInputDF.join(streamingInputDF, "value"))
.recentProgress.head
assert(progress.numInputRows === 20) // data is read multiple times in self-joins
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 20)
}
test("input row calculation with mixed batch and streaming V1 sources") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
// Trigger input has 10 rows, static input has 2 rows,
// therefore after the first trigger, the calculated input rows should be 10
val progress = getStreamingQuery(streamingInputDF.join(staticInputDF, "value"))
.recentProgress.head
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with trigger input DF having multiple leaves in V1 source") {
val streamingTriggerDF =
spark.createDataset(1 to 5).toDF.union(spark.createDataset(6 to 10).toDF)
require(streamingTriggerDF.logicalPlan.collectLeaves().size > 1)
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF)
// After the first trigger, the calculated input rows should be 10
val progress = getStreamingQuery(streamingInputDF).recentProgress.head
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with same V2 source used twice in self-union") {
val streamInput = MemoryStream[Int]
testStream(streamInput.toDF().union(streamInput.toDF()))(
AddData(streamInput, 1, 2, 3),
CheckAnswer(1, 1, 2, 2, 3, 3),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
// The source is scanned twice because of self-union
assert(lastProgress.get.numInputRows == 6)
true
}
)
}
test("input row calculation with same V2 source used twice in self-join") {
def checkQuery(check: AssertOnQuery): Unit = {
val memoryStream = MemoryStream[Int]
// TODO: currently the streaming framework always add a dummy Project above streaming source
// relation, which breaks exchange reuse, as the optimizer will remove Project from one side.
// Here we manually add a useful Project, to trigger exchange reuse.
val streamDF = memoryStream.toDF().select('value + 0 as "v")
testStream(streamDF.join(streamDF, "v"))(
AddData(memoryStream, 1, 2, 3),
CheckAnswer(1, 2, 3),
check
)
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
checkQuery(AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
// The source is scanned twice because of self-join
assert(lastProgress.get.numInputRows == 6)
true
})
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "true") {
checkQuery(AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
assert(q.lastExecution.executedPlan.collect {
case r: ReusedExchangeExec => r
}.length == 1)
// The source is scanned only once because of exchange reuse
assert(lastProgress.get.numInputRows == 3)
true
})
}
}
test("input row calculation with trigger having data for only one of two V2 sources") {
val streamInput1 = MemoryStream[Int]
val streamInput2 = MemoryStream[Int]
testStream(streamInput1.toDF().union(streamInput2.toDF()))(
AddData(streamInput1, 1, 2, 3),
CheckLastBatch(1, 2, 3),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 2)
assert(lastProgress.get.sources(0).numInputRows == 3)
assert(lastProgress.get.sources(1).numInputRows == 0)
true
},
AddData(streamInput2, 4, 5),
CheckLastBatch(4, 5),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 2)
assert(lastProgress.get.sources.length == 2)
assert(lastProgress.get.sources(0).numInputRows == 0)
assert(lastProgress.get.sources(1).numInputRows == 2)
true
}
)
}
test("input row calculation with mixed batch and streaming V2 sources") {
val streamInput = MemoryStream[Int]
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
testStream(streamInput.toDF().join(staticInputDF, "value"))(
AddData(streamInput, 1, 2, 3),
AssertOnQuery { q =>
q.processAllAvailable()
// The number of leaves in the trigger's logical plan should be same as the executed plan.
require(
q.lastExecution.logical.collectLeaves().length ==
q.lastExecution.executedPlan.collectLeaves().length)
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 1)
assert(lastProgress.get.sources(0).numInputRows == 3)
true
}
)
val streamInput2 = MemoryStream[Int]
val staticInputDF2 = staticInputDF.union(staticInputDF).cache()
testStream(streamInput2.toDF().join(staticInputDF2, "value"))(
AddData(streamInput2, 1, 2, 3),
AssertOnQuery { q =>
q.processAllAvailable()
// The number of leaves in the trigger's logical plan should be different from
// the executed plan. The static input will have two leaves in the logical plan
// (due to the union), but will be converted to a single leaf in the executed plan
// (due to the caching, the cached subplan is replaced by a single InMemoryTableScanExec).
require(
q.lastExecution.logical.collectLeaves().length !=
q.lastExecution.executedPlan.collectLeaves().length)
// Despite the mismatch in total number of leaves in the logical and executed plans,
// we should be able to attribute streaming input metrics to the streaming sources.
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 1)
assert(lastProgress.get.sources(0).numInputRows == 3)
true
}
)
}
testQuietly("StreamExecution metadata garbage collection") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(6 / _)
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "1") {
// Run 3 batches, and then assert that only 2 metadata files is are at the end
// since the first should have been purged.
testStream(mapped)(
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
AddData(inputData, 1, 2),
CheckAnswer(6, 3, 6, 3),
AddData(inputData, 4, 6),
CheckAnswer(6, 3, 6, 3, 1, 1),
AssertOnQuery("metadata log should contain only two files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 2 && toTest.head == "1")
true
}
)
}
val inputData2 = MemoryStream[Int]
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "2") {
// Run 5 batches, and then assert that 3 metadata files is are at the end
// since the two should have been purged.
testStream(inputData2.toDS())(
AddData(inputData2, 1, 2),
CheckAnswer(1, 2),
AddData(inputData2, 1, 2),
CheckAnswer(1, 2, 1, 2),
AddData(inputData2, 3, 4),
CheckAnswer(1, 2, 1, 2, 3, 4),
AddData(inputData2, 5, 6),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6),
AddData(inputData2, 7, 8),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6, 7, 8),
AssertOnQuery("metadata log should contain three files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 3 && toTest.head == "2")
true
}
)
}
}
testQuietly("StreamingQuery should be Serializable but cannot be used in executors") {
def startQuery(ds: Dataset[Int], queryName: String): StreamingQuery = {
ds.writeStream
.queryName(queryName)
.format("memory")
.start()
}
val input = MemoryStream[Int] :: MemoryStream[Int] :: MemoryStream[Int] :: Nil
val q1 = startQuery(input(0).toDS, "stream_serializable_test_1")
val q2 = startQuery(input(1).toDS.map { i =>
// Emulate that `StreamingQuery` get captured with normal usage unintentionally.
// It should not fail the query.
val q = q1
i
}, "stream_serializable_test_2")
val q3 = startQuery(input(2).toDS.map { i =>
// Emulate that `StreamingQuery` is used in executors. We should fail the query with a clear
// error message.
q1.explain()
i
}, "stream_serializable_test_3")
try {
input.foreach(_.addData(1))
// q2 should not fail since it doesn't use `q1` in the closure
q2.processAllAvailable()
// The user calls `StreamingQuery` in the closure and it should fail
val e = intercept[StreamingQueryException] {
q3.processAllAvailable()
}
assert(e.getCause.isInstanceOf[SparkException])
assert(e.getCause.getCause.getCause.isInstanceOf[IllegalStateException])
TestUtils.assertExceptionMsg(e, "StreamingQuery cannot be used in executors")
} finally {
q1.stop()
q2.stop()
q3.stop()
}
}
test("StreamExecution should call stop() on sources when a stream is stopped") {
var calledStop = false
val source = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(StopStream)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
testQuietly("SPARK-19774: StreamExecution should call stop() on sources when a stream fails") {
var calledStop = false
val source1 = new Source {
override def stop(): Unit = {
throw new RuntimeException("Oh no!")
}
override def getOffset: Option[Offset] = Some(LongOffset(1))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.range(2).toDF(MockSourceProvider.fakeSchema.fieldNames: _*)
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
val source2 = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source1, source2) {
val df1 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
val df2 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
testStream(df1.union(df2).map(i => i / 0))(
AssertOnQuery { sq =>
intercept[StreamingQueryException](sq.processAllAvailable())
sq.exception.isDefined && !sq.isActive
}
)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
test("get the query id in source") {
@volatile var queryId: String = null
val source = new Source {
override def stop(): Unit = {}
override def getOffset: Option[Offset] = {
queryId = spark.sparkContext.getLocalProperty(StreamExecution.QUERY_ID_KEY)
None
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = spark.emptyDataFrame
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(
AssertOnQuery { sq =>
sq.processAllAvailable()
assert(sq.id.toString === queryId)
assert(sq.runId.toString !== queryId)
true
}
)
}
}
test("processAllAvailable should not block forever when a query is stopped") {
val input = MemoryStream[Int]
input.addData(1)
val query = input.toDF().writeStream
.trigger(Trigger.Once())
.format("console")
.start()
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
test("SPARK-22238: don't check for RDD partitions during streaming aggregation preparation") {
val stream = MemoryStream[(Int, Int)]
val baseDf = Seq((1, "A"), (2, "b")).toDF("num", "char").where("char = 'A'")
val otherDf = stream.toDF().toDF("num", "numSq")
.join(broadcast(baseDf), "num")
.groupBy('char)
.agg(sum('numSq))
testStream(otherDf, OutputMode.Complete())(
AddData(stream, (1, 1), (2, 4)),
CheckLastBatch(("A", 1)))
}
test("Uuid in streaming query should not produce same uuids in each execution") {
val uuids = mutable.ArrayBuffer[String]()
def collectUuid: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach(r => uuids += r.getString(0))
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(Uuid()))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectUuid),
AddData(stream, 2),
CheckAnswer(collectUuid)
)
assert(uuids.distinct.size == 2)
}
test("Rand/Randn in streaming query should not produce same results in each execution") {
val rands = mutable.ArrayBuffer[Double]()
def collectRand: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach { r =>
rands += r.getDouble(0)
rands += r.getDouble(1)
}
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(new Rand()), new Column(new Randn()))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectRand),
AddData(stream, 2),
CheckAnswer(collectRand)
)
assert(rands.distinct.size == 4)
}
test("Shuffle in streaming query should not produce same results in each execution") {
val rands = mutable.ArrayBuffer[Seq[Int]]()
def collectShuffle: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach { r =>
rands += r.getSeq[Int](0)
}
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(new Shuffle(Literal.create[Seq[Int]](0 until 100))))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectShuffle),
AddData(stream, 2),
CheckAnswer(collectShuffle)
)
assert(rands.distinct.size == 2)
}
test("StreamingRelationV2/StreamingExecutionRelation/ContinuousExecutionRelation.toJSON " +
"should not fail") {
val df = spark.readStream.format("rate").load()
assert(df.logicalPlan.toJSON.contains("StreamingRelationV2"))
testStream(df)(
AssertOnQuery(_.logicalPlan.toJSON.contains("StreamingDataSourceV2Relation"))
)
testStream(df)(
StartStream(trigger = Trigger.Continuous(100)),
AssertOnQuery(_.logicalPlan.toJSON.contains("StreamingDataSourceV2Relation"))
)
}
test("special characters in checkpoint path") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk @#chk")
val inputData = MemoryStream[Int]
inputData.addData(1)
val q = inputData.toDF()
.writeStream
.format("noop")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start()
try {
q.processAllAvailable()
assert(checkpointDir.listFiles().toList.nonEmpty)
} finally {
q.stop()
}
}
}
/**
* Copy the checkpoint generated by Spark 2.4.0 from test resource to `dir` to set up a legacy
* streaming checkpoint.
*/
private def setUp2dot4dot0Checkpoint(dir: File): Unit = {
val input = getClass.getResource("/structured-streaming/escaped-path-2.4.0")
assert(input != null, "cannot find test resource '/structured-streaming/escaped-path-2.4.0'")
val inputDir = new File(input.toURI)
// Copy test files to tempDir so that we won't modify the original data.
FileUtils.copyDirectory(inputDir, dir)
// Spark 2.4 and earlier escaped the _spark_metadata path once
val legacySparkMetadataDir = new File(
dir,
new Path("output %@#output/_spark_metadata").toUri.toString)
// Migrate from legacy _spark_metadata directory to the new _spark_metadata directory.
// Ideally we should copy "_spark_metadata" directly like what the user is supposed to do to
// migrate to new version. However, in our test, "tempDir" will be different in each run and
// we need to fix the absolute path in the metadata to match "tempDir".
val sparkMetadata = FileUtils.readFileToString(new File(legacySparkMetadataDir, "0"), UTF_8)
FileUtils.write(
new File(legacySparkMetadataDir, "0"),
sparkMetadata.replaceAll("TEMPDIR", dir.getCanonicalPath), UTF_8)
}
test("detect escaped path and report the migration guide") {
// Assert that the error message contains the migration conf, path and the legacy path.
def assertMigrationError(errorMessage: String, path: File, legacyPath: File): Unit = {
Seq(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key,
path.getCanonicalPath,
legacyPath.getCanonicalPath).foreach { msg =>
assert(errorMessage.contains(msg))
}
}
withTempDir { tempDir =>
setUp2dot4dot0Checkpoint(tempDir)
// Here are the paths we will use to create the query
val outputDir = new File(tempDir, "output %@#output")
val checkpointDir = new File(tempDir, "chk %@#chk")
val sparkMetadataDir = new File(tempDir, "output %@#output/_spark_metadata")
// The escaped paths used by Spark 2.4 and earlier.
// Spark 2.4 and earlier escaped the checkpoint path three times
val legacyCheckpointDir = new File(
tempDir,
new Path(new Path(new Path("chk %@#chk").toUri.toString).toUri.toString).toUri.toString)
// Spark 2.4 and earlier escaped the _spark_metadata path once
val legacySparkMetadataDir = new File(
tempDir,
new Path("output %@#output/_spark_metadata").toUri.toString)
// Reading a file sink output in a batch query should detect the legacy _spark_metadata
// directory and throw an error
val e = intercept[SparkException] {
spark.read.load(outputDir.getCanonicalPath).as[Int]
}
assertMigrationError(e.getMessage, sparkMetadataDir, legacySparkMetadataDir)
// Restarting the streaming query should detect the legacy _spark_metadata directory and
// throw an error
val inputData = MemoryStream[Int]
val e2 = intercept[SparkException] {
inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
}
assertMigrationError(e2.getMessage, sparkMetadataDir, legacySparkMetadataDir)
// Move "_spark_metadata" to fix the file sink and test the checkpoint path.
FileUtils.moveDirectory(legacySparkMetadataDir, sparkMetadataDir)
// Restarting the streaming query should detect the legacy
// checkpoint path and throw an error.
val e3 = intercept[SparkException] {
inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
}
assertMigrationError(e3.getMessage, checkpointDir, legacyCheckpointDir)
// Fix the checkpoint path and verify that the user can migrate the issue by moving files.
FileUtils.moveDirectory(legacyCheckpointDir, checkpointDir)
val q = inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
// Check the query id to make sure it did use checkpoint
assert(q.id.toString == "09be7fb3-49d8-48a6-840d-e9c2ad92a898")
// Verify that the batch query can read "_spark_metadata" correctly after migration.
val df = spark.read.load(outputDir.getCanonicalPath)
assert(df.queryExecution.executedPlan.toString contains "MetadataLogFileIndex")
checkDatasetUnorderly(df.as[Int], 1, 2, 3)
} finally {
q.stop()
}
}
}
test("ignore the escaped path check when the flag is off") {
withTempDir { tempDir =>
setUp2dot4dot0Checkpoint(tempDir)
val outputDir = new File(tempDir, "output %@#output")
val checkpointDir = new File(tempDir, "chk %@#chk")
withSQLConf(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key -> "false") {
// Verify that the batch query ignores the legacy "_spark_metadata"
val df = spark.read.load(outputDir.getCanonicalPath)
assert(!(df.queryExecution.executedPlan.toString contains "MetadataLogFileIndex"))
checkDatasetUnorderly(df.as[Int], 1, 2, 3)
val inputData = MemoryStream[Int]
val q = inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
// Check the query id to make sure it ignores the legacy checkpoint
assert(q.id.toString != "09be7fb3-49d8-48a6-840d-e9c2ad92a898")
} finally {
q.stop()
}
}
}
}
test("containsSpecialCharsInPath") {
Seq("foo/b ar",
"/foo/b ar",
"file:/foo/b ar",
"file://foo/b ar",
"file:///foo/b ar",
"file://foo:bar@bar/foo/b ar").foreach { p =>
assert(StreamExecution.containsSpecialCharsInPath(new Path(p)), s"failed to check $p")
}
Seq("foo/bar",
"/foo/bar",
"file:/foo/bar",
"file://foo/bar",
"file:///foo/bar",
"file://foo:bar@bar/foo/bar",
// Special chars not in a path should not be considered as such urls won't hit the escaped
// path issue.
"file://foo:b ar@bar/foo/bar",
"file://foo:bar@b ar/foo/bar",
"file://f oo:bar@bar/foo/bar").foreach { p =>
assert(!StreamExecution.containsSpecialCharsInPath(new Path(p)), s"failed to check $p")
}
}
test("SPARK-32456: SQL union in streaming query of append mode without watermark") {
val inputData1 = MemoryStream[Int]
val inputData2 = MemoryStream[Int]
withTempView("s1", "s2") {
inputData1.toDF().createOrReplaceTempView("s1")
inputData2.toDF().createOrReplaceTempView("s2")
val unioned = spark.sql(
"select s1.value from s1 union select s2.value from s2")
checkExceptionMessage(unioned)
}
}
test("SPARK-32456: distinct in streaming query of append mode without watermark") {
val inputData = MemoryStream[Int]
withTempView("deduptest") {
inputData.toDF().toDF("value").createOrReplaceTempView("deduptest")
val distinct = spark.sql("select distinct value from deduptest")
checkExceptionMessage(distinct)
}
}
test("SPARK-32456: distinct in streaming query of complete mode") {
val inputData = MemoryStream[Int]
withTempView("deduptest") {
inputData.toDF().toDF("value").createOrReplaceTempView("deduptest")
val distinct = spark.sql("select distinct value from deduptest")
testStream(distinct, Complete)(
AddData(inputData, 1, 2, 3, 3, 4),
CheckAnswer(Row(1), Row(2), Row(3), Row(4))
)
}
}
testQuietly("limit on empty batch should not cause state store error") {
// The source only produces two batches, the first batch is empty and the second batch has data.
val source = new Source {
var batchId = 0
override def stop(): Unit = {}
override def getOffset: Option[Offset] = {
Some(LongOffset(batchId + 1))
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
if (batchId == 0) {
batchId += 1
Dataset.ofRows(spark, LocalRelation(schema.toAttributes, Nil, isStreaming = true))
} else {
Dataset.ofRows(spark,
LocalRelation(schema.toAttributes, InternalRow(10) :: Nil, isStreaming = true))
}
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.limit(1)
testStream(df)(
StartStream(),
AssertOnQuery { q =>
q.processAllAvailable()
true
},
CheckAnswer(10))
}
}
private def checkExceptionMessage(df: DataFrame): Unit = {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val exception = intercept[AnalysisException](
df.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath))
assert(exception.getMessage.contains(
"Append output mode not supported when there are streaming aggregations on streaming " +
"DataFrames/DataSets without watermark"))
}
}
}
/** Create a streaming DF that only execute one batch in which it returns the given static DF */
private def createSingleTriggerStreamingDF(triggerDF: DataFrame): DataFrame = {
require(!triggerDF.isStreaming)
// A streaming Source that generate only on trigger and returns the given Dataframe as batch
val source = new Source() {
override def schema: StructType = triggerDF.schema
override def getOffset: Option[Offset] = Some(LongOffset(0))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
sqlContext.internalCreateDataFrame(
triggerDF.queryExecution.toRdd, triggerDF.schema, isStreaming = true)
}
override def stop(): Unit = {}
}
StreamingExecutionRelation(source, spark)
}
/** Returns the query at the end of the first trigger of streaming DF */
private def getStreamingQuery(streamingDF: DataFrame): StreamingQuery = {
try {
val q = streamingDF.writeStream.format("memory").queryName("test").start()
q.processAllAvailable()
q
} finally {
spark.streams.active.foreach(_.stop())
}
}
/** Returns the last query progress from query.recentProgress where numInputRows is positive */
def getLastProgressWithData(q: StreamingQuery): Option[StreamingQueryProgress] = {
q.recentProgress.filter(_.numInputRows > 0).lastOption
}
/**
* A [[StreamAction]] to test the behavior of `StreamingQuery.awaitTermination()`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
case class TestAwaitTermination(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int = -1,
expectedReturnValue: Boolean = false
) extends AssertOnQuery(
TestAwaitTermination.assertOnQueryCondition(expectedBehavior, timeoutMs, expectedReturnValue),
"Error testing awaitTermination behavior"
) {
override def toString(): String = {
s"TestAwaitTermination($expectedBehavior, timeoutMs = $timeoutMs, " +
s"expectedReturnValue = $expectedReturnValue)"
}
}
object TestAwaitTermination {
/**
* Tests the behavior of `StreamingQuery.awaitTermination`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
def assertOnQueryCondition(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int,
expectedReturnValue: Boolean
)(q: StreamExecution): Boolean = {
def awaitTermFunc(): Unit = {
if (timeoutMs <= 0) {
q.awaitTermination()
} else {
val returnedValue = q.awaitTermination(timeoutMs)
assert(returnedValue === expectedReturnValue, "Returned value does not match expected")
}
}
AwaitTerminationTester.test(expectedBehavior, () => awaitTermFunc())
true // If the control reached here, then everything worked as expected
}
}
}
object StreamingQuerySuite {
// Singleton reference to clock that does not get serialized in task closures
var clock: StreamManualClock = null
}
|
shaneknapp/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
|
Scala
|
apache-2.0
| 50,896 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.agg.batch
import org.apache.flink.streaming.api.operators.OneInputStreamOperator
import org.apache.flink.table.data.binary.BinaryRowData
import org.apache.flink.table.data.utils.JoinedRowData
import org.apache.flink.table.data.{GenericRowData, RowData}
import org.apache.flink.table.functions.AggregateFunction
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, ProjectionCodeGenerator}
import org.apache.flink.table.planner.functions.aggfunctions.DeclarativeAggregateFunction
import org.apache.flink.table.planner.plan.utils.{AggregateInfo, AggregateInfoList}
import org.apache.flink.table.runtime.generated.GeneratedOperator
import org.apache.flink.table.runtime.operators.TableStreamOperator
import org.apache.flink.table.runtime.operators.aggregate.BytesHashMapSpillMemorySegmentPool
import org.apache.flink.table.runtime.util.collections.binary.BytesMap
import org.apache.flink.table.types.logical.{LogicalType, RowType}
import org.apache.calcite.tools.RelBuilder
/**
* Operator code generator for HashAggregation, Only deal with [[DeclarativeAggregateFunction]]
* and aggregateBuffers should be update(e.g.: setInt) in [[BinaryRowData]].
* (Hash Aggregate performs much better than Sort Aggregate).
*/
class HashAggCodeGenerator(
ctx: CodeGeneratorContext,
builder: RelBuilder,
aggInfoList: AggregateInfoList,
inputType: RowType,
outputType: RowType,
grouping: Array[Int],
auxGrouping: Array[Int],
isMerge: Boolean,
isFinal: Boolean) {
private lazy val aggInfos: Array[AggregateInfo] = aggInfoList.aggInfos
private lazy val functionIdentifiers: Map[AggregateFunction[_, _], String] =
AggCodeGenHelper.getFunctionIdentifiers(aggInfos)
private lazy val aggBufferNames: Array[Array[String]] =
AggCodeGenHelper.getAggBufferNames(auxGrouping, aggInfos)
private lazy val aggBufferTypes: Array[Array[LogicalType]] = AggCodeGenHelper.getAggBufferTypes(
inputType,
auxGrouping,
aggInfos)
private lazy val groupKeyRowType = AggCodeGenHelper.projectRowType(inputType, grouping)
private lazy val aggBufferRowType = RowType.of(aggBufferTypes.flatten, aggBufferNames.flatten)
def genWithKeys(): GeneratedOperator[OneInputStreamOperator[RowData, RowData]] = {
val inputTerm = CodeGenUtils.DEFAULT_INPUT1_TERM
val className = if (isFinal) "HashAggregateWithKeys" else "LocalHashAggregateWithKeys"
// add logger
val logTerm = CodeGenUtils.newName("LOG")
ctx.addReusableLogger(logTerm, className)
// gen code to do group key projection from input
val currentKeyTerm = CodeGenUtils.newName("currentKey")
val currentKeyWriterTerm = CodeGenUtils.newName("currentKeyWriter")
val keyProjectionCode = ProjectionCodeGenerator.generateProjectionExpression(
ctx,
inputType,
groupKeyRowType,
grouping,
inputTerm = inputTerm,
outRecordTerm = currentKeyTerm,
outRecordWriterTerm = currentKeyWriterTerm).code
// gen code to create groupKey, aggBuffer Type array
// it will be used in BytesHashMap and BufferedKVExternalSorter if enable fallback
val groupKeyTypesTerm = CodeGenUtils.newName("groupKeyTypes")
val aggBufferTypesTerm = CodeGenUtils.newName("aggBufferTypes")
HashAggCodeGenHelper.prepareHashAggKVTypes(
ctx, groupKeyTypesTerm, aggBufferTypesTerm, groupKeyRowType, aggBufferRowType)
val binaryRowTypeTerm = classOf[BinaryRowData].getName
// gen code to aggregate and output using hash map
val aggregateMapTerm = CodeGenUtils.newName("aggregateMap")
val lookupInfoTypeTerm = classOf[BytesMap.LookupInfo[_, _]].getCanonicalName
val lookupInfo = ctx.addReusableLocalVariable(
lookupInfoTypeTerm,
"lookupInfo")
HashAggCodeGenHelper.prepareHashAggMap(
ctx,
groupKeyTypesTerm,
aggBufferTypesTerm,
aggregateMapTerm)
val outputTerm = CodeGenUtils.newName("hashAggOutput")
val (reuseGroupKeyTerm, reuseAggBufferTerm) =
HashAggCodeGenHelper.prepareTermForAggMapIteration(
ctx,
outputTerm,
outputType,
if (grouping.isEmpty) classOf[GenericRowData] else classOf[JoinedRowData])
val currentAggBufferTerm = ctx.addReusableLocalVariable(binaryRowTypeTerm, "currentAggBuffer")
val (initedAggBuffer, aggregate, outputExpr) = HashAggCodeGenHelper.genHashAggCodes(
isMerge,
isFinal,
ctx,
builder,
(grouping, auxGrouping),
inputTerm,
inputType,
aggInfos,
currentAggBufferTerm,
aggBufferRowType,
aggBufferTypes,
outputTerm,
outputType,
reuseGroupKeyTerm,
reuseAggBufferTerm)
val outputResultFromMap = HashAggCodeGenHelper.genAggMapIterationAndOutput(
ctx, isFinal, aggregateMapTerm, reuseGroupKeyTerm, reuseAggBufferTerm, outputExpr)
// gen code to deal with hash map oom, if enable fallback we will use sort agg strategy
val sorterTerm = CodeGenUtils.newName("sorter")
val retryAppend = HashAggCodeGenHelper.genRetryAppendToMap(
aggregateMapTerm, currentKeyTerm, initedAggBuffer, lookupInfo, currentAggBufferTerm)
val (dealWithAggHashMapOOM, fallbackToSortAggCode) = HashAggCodeGenHelper.genAggMapOOMHandling(
isFinal,
ctx,
builder,
(grouping, auxGrouping),
aggInfos,
functionIdentifiers,
logTerm,
aggregateMapTerm,
(groupKeyTypesTerm, aggBufferTypesTerm),
(groupKeyRowType, aggBufferRowType),
aggBufferNames,
aggBufferTypes,
outputTerm,
outputType,
outputResultFromMap,
sorterTerm,
retryAppend)
HashAggCodeGenHelper.prepareMetrics(ctx, aggregateMapTerm, if (isFinal) sorterTerm else null)
val lazyInitAggBufferCode = if (auxGrouping.nonEmpty) {
s"""
|// lazy init agg buffer (with auxGrouping)
|${initedAggBuffer.code}
""".stripMargin
} else {
""
}
val processCode =
s"""
| // input field access for group key projection and aggregate buffer update
|${ctx.reuseInputUnboxingCode(inputTerm)}
| // project key from input
|$keyProjectionCode
| // look up output buffer using current group key
|$lookupInfo = ($lookupInfoTypeTerm) $aggregateMapTerm.lookup($currentKeyTerm);
|$currentAggBufferTerm = ($binaryRowTypeTerm) $lookupInfo.getValue();
|
|if (!$lookupInfo.isFound()) {
| $lazyInitAggBufferCode
| // append empty agg buffer into aggregate map for current group key
| try {
| $currentAggBufferTerm =
| $aggregateMapTerm.append($lookupInfo, ${initedAggBuffer.resultTerm});
| } catch (java.io.EOFException exp) {
| $dealWithAggHashMapOOM
| }
|}
| // aggregate buffer fields access
|${ctx.reuseInputUnboxingCode(currentAggBufferTerm)}
| // do aggregate and update agg buffer
|${aggregate.code}
|""".stripMargin.trim
val endInputCode = if (isFinal) {
val memPoolTypeTerm = classOf[BytesHashMapSpillMemorySegmentPool].getName
s"""
|if ($sorterTerm == null) {
| // no spilling, output by iterating aggregate map.
| $outputResultFromMap
|} else {
| // spill last part of input' aggregation output buffer
| $sorterTerm.sortAndSpill(
| $aggregateMapTerm.getRecordAreaMemorySegments(),
| $aggregateMapTerm.getNumElements(),
| new $memPoolTypeTerm($aggregateMapTerm.getBucketAreaMemorySegments()));
| // only release floating memory in advance.
| $aggregateMapTerm.free(true);
| // fall back to sort based aggregation
| $fallbackToSortAggCode
|}
""".stripMargin
} else {
s"$outputResultFromMap"
}
AggCodeGenHelper.generateOperator(
ctx,
className,
classOf[TableStreamOperator[RowData]].getCanonicalName,
processCode,
endInputCode,
inputType)
}
}
|
apache/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/agg/batch/HashAggCodeGenerator.scala
|
Scala
|
apache-2.0
| 8,940 |
/*
* Copyright (C) 2018 Joan Goyeau.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala.kstream
import java.time.Duration.ofSeconds
import org.apache.kafka.streams.kstream.JoinWindows
import org.apache.kafka.streams.scala.ImplicitConversions._
import org.apache.kafka.streams.scala.Serdes._
import org.apache.kafka.streams.scala.StreamsBuilder
import org.apache.kafka.streams.scala.utils.TestDriver
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{FlatSpec, Matchers}
@RunWith(classOf[JUnitRunner])
class KStreamTest extends FlatSpec with Matchers with TestDriver {
"filter a KStream" should "filter records satisfying the predicate" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).filter((_, value) => value != "value2").to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.pipeRecord(sourceTopic, ("3", "value3"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value3"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"filterNot a KStream" should "filter records not satisfying the predicate" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).filterNot((_, value) => value == "value2").to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.pipeRecord(sourceTopic, ("3", "value3"))
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value3"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"foreach a KStream" should "run foreach actions on records" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
var acc = ""
builder.stream[String, String](sourceTopic).foreach((_, value) => acc += value)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
acc shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
acc shouldBe "value1value2"
testDriver.close()
}
"peek a KStream" should "run peek actions on records" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
var acc = ""
builder.stream[String, String](sourceTopic).peek((_, v) => acc += v).to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
acc shouldBe "value1"
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("2", "value2"))
acc shouldBe "value1value2"
testDriver.readRecord[String, String](sinkTopic).value shouldBe "value2"
testDriver.close()
}
"selectKey a KStream" should "select a new key" in {
val builder = new StreamsBuilder()
val sourceTopic = "source"
val sinkTopic = "sink"
builder.stream[String, String](sourceTopic).selectKey((_, value) => value).to(sinkTopic)
val testDriver = createTestDriver(builder)
testDriver.pipeRecord(sourceTopic, ("1", "value1"))
testDriver.readRecord[String, String](sinkTopic).key shouldBe "value1"
testDriver.pipeRecord(sourceTopic, ("1", "value2"))
testDriver.readRecord[String, String](sinkTopic).key shouldBe "value2"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
"join 2 KStreams" should "join correctly records" in {
val builder = new StreamsBuilder()
val sourceTopic1 = "source1"
val sourceTopic2 = "source2"
val sinkTopic = "sink"
val stream1 = builder.stream[String, String](sourceTopic1)
val stream2 = builder.stream[String, String](sourceTopic2)
stream1.join(stream2)((a, b) => s"$a-$b", JoinWindows.of(ofSeconds(1))).to(sinkTopic)
val now = System.currentTimeMillis()
val testDriver = createTestDriver(builder, now)
testDriver.pipeRecord(sourceTopic1, ("1", "topic1value1"), now)
testDriver.pipeRecord(sourceTopic2, ("1", "topic2value1"), now)
testDriver.readRecord[String, String](sinkTopic).value shouldBe "topic1value1-topic2value1"
testDriver.readRecord[String, String](sinkTopic) shouldBe null
testDriver.close()
}
}
|
KevinLiLu/kafka
|
streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
|
Scala
|
apache-2.0
| 5,678 |
import scala.reflect.runtime.universe._
class Foo(bar: String) extends annotation.ConstantAnnotation
object Test extends App {
val tree = reify{@Foo(bar = "qwe") class C}.tree
println(tree.toString)
}
|
scala/scala
|
test/files/run/t5224.scala
|
Scala
|
apache-2.0
| 207 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter19
import scala.collection.mutable
class Mappable[A](elements: List[A]) {
def map[B](f: A => B): List[B] = {
val result = mutable.ListBuffer[B]()
elements.foreach {
result += f(_)
}
result.toList
}
// recursive version (with a nested def)
def recur_map[B](f: A => B): List[B] = {
def recur(head: A, tail: List[A]): List[B] = {
tail match {
case Nil => List(f(head))
case _ => f(head) +: recur(tail.head, tail.tail)
}
}
recur(elements.head, elements.tail)
}
// tail recursive version
def tail_recur_map[B](f: A => B): List[B] = {
def recur(accumulator: List[B], elements: List[A]): List[B] = {
elements match {
case Nil => accumulator
case head :: tail => recur(accumulator :+ f(head), tail)
}
}
recur(List[B](), elements)
}
}
package map {
object Example extends App {
val numbers = List(1, 2, 54, 4, 12, 43, 54, 23, 34)
val mappable = new Mappable(numbers)
println(mappable.map(_ * 2))
println(mappable.recur_map(_ * 2))
println(mappable.tail_recur_map(_ * 2))
}
}
|
tobyweston/learn-scala-java-devs
|
src/main/scala/s4j/scala/chapter19/Mappable.scala
|
Scala
|
apache-2.0
| 1,759 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.core.j
import javax.inject.Inject
import play.api.http.HttpRequestHandler
import play.api.mvc.RequestHeader
import play.http.{ HttpRequestHandler => JHttpRequestHandler, HandlerForRequest }
import play.mvc.Http.{ RequestHeader => JRequestHeader }
/**
* Adapter from a Java HttpRequestHandler to a Scala HttpRequestHandler
*/
class JavaHttpRequestHandlerAdapter @Inject() (underlying: JHttpRequestHandler) extends HttpRequestHandler {
override def handlerForRequest(request: RequestHeader) = {
val handlerForRequest = underlying.handlerForRequest(new RequestHeaderImpl(request))
(handlerForRequest.getRequestHeader.asScala, handlerForRequest.getHandler)
}
}
/**
* Adapter from a Java HttpRequestHandler to a Scala HttpRequestHandler
*/
class JavaHttpRequestHandlerDelegate @Inject() (underlying: HttpRequestHandler) extends JHttpRequestHandler {
override def handlerForRequest(requestHeader: JRequestHeader) = {
val (newRequest, handler) = underlying.handlerForRequest(requestHeader.asScala())
new HandlerForRequest(new RequestHeaderImpl(newRequest), handler)
}
}
|
wsargent/playframework
|
framework/src/play/src/main/scala/play/core/j/JavaHttpRequestHandlerAdapter.scala
|
Scala
|
apache-2.0
| 1,185 |
/*
* Copyright (C) 2017 HAT Data Exchange Ltd
* SPDX-License-Identifier: AGPL-3.0
*
* This file is part of the Hub of All Things project (HAT).
*
* HAT is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License
* as published by the Free Software Foundation, version 3 of
* the License.
*
* HAT is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General
* Public License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*
* Written by Andrius Aucinas <[email protected]>
* 2 / 2017
*/
import sbt.Keys._
import sbt._
////*******************************
//// Basic settings
////*******************************
object BasicSettings extends AutoPlugin {
override def trigger = allRequirements
object autoImport {
object BuildEnv extends Enumeration {
val Production, Stage, Test, Developement = Value
}
val buildEnv = settingKey[BuildEnv.Value]("the current build environment")
def excludeSpecs2(module: ModuleID): ModuleID = {
module.excludeAll(
ExclusionRule(organization = "org.specs2"),
ExclusionRule(organization = "org.seleniumhq.selenium")
)
}
}
import autoImport._
override def projectSettings =
Seq(
organization := "org.hatdex",
version := "2.6.9-SNAPSHOT",
name := "HAT",
resolvers ++= Dependencies.resolvers,
scalaVersion := Dependencies.Versions.scalaVersion,
crossScalaVersions := Dependencies.Versions.crossScala,
scalacOptions ++= Seq(
"-deprecation", // Emit warning and location for usages of deprecated APIs.
"-encoding",
"utf-8", // Specify character encoding used by source files.
"-explaintypes", // Explain type errors in more detail.
"-feature", // Emit warning and location for usages of features that should be imported explicitly.
"-language:existentials", // Existential types (besides wildcard types) can be written and inferred
"-language:experimental.macros", // Allow macro definition (besides implementation and application)
"-language:higherKinds", // Allow higher-kinded types
"-language:implicitConversions", // Allow definition of implicit functions called views
"-unchecked", // Enable additional warnings where generated code depends on assumptions.
"-Xcheckinit", // Wrap field accessors to throw an exception on uninitialized access.
//"-Xfatal-warnings", // Fail the compilation if there are any warnings.
"-Xfuture", // Turn on future language features.
"-Xlint:adapted-args", // Warn if an argument list is modified to match the receiver.
"-Xlint:by-name-right-associative", // By-name parameter of right associative operator.
"-Xlint:constant", // Evaluation of a constant arithmetic expression results in an error.
"-Xlint:delayedinit-select", // Selecting member of DelayedInit.
"-Xlint:doc-detached", // A Scaladoc comment appears to be detached from its element.
"-Xlint:inaccessible", // Warn about inaccessible types in method signatures.
"-Xlint:infer-any", // Warn when a type argument is inferred to be `Any`.
"-Xlint:missing-interpolator", // A string literal appears to be missing an interpolator id.
"-Xlint:nullary-override", // Warn when non-nullary `def f()' overrides nullary `def f'.
"-Xlint:nullary-unit", // Warn when nullary methods return Unit.
"-Xlint:option-implicit", // Option.apply used implicit view.
"-Xlint:package-object-classes", // Class or object defined in package object.
"-Xlint:poly-implicit-overload", // Parameterized overloaded implicit methods are not visible as view bounds.
"-Xlint:private-shadow", // A private field (or class parameter) shadows a superclass field.
"-Xlint:stars-align", // Pattern sequence wildcard must align with sequence component.
"-Xlint:type-parameter-shadow", // A local type parameter shadows a type already in scope.
"-Xlint:unsound-match", // Pattern match may not be typesafe.
"-Yno-adapted-args", // Do not adapt an argument list (either by inserting () or creating a tuple) to match the receiver.
"-Ypartial-unification", // Enable partial unification in type constructor inference
"-Ywarn-dead-code", // Warn when dead code is identified.
"-Ywarn-extra-implicit", // Warn when more than one implicit parameter section is defined.
"-Ywarn-inaccessible", // Warn about inaccessible types in method signatures.
"-Ywarn-infer-any", // Warn when a type argument is inferred to be `Any`.
"-Ywarn-nullary-override", // Warn when non-nullary `def f()' overrides nullary `def f'.
"-Ywarn-nullary-unit", // Warn when nullary methods return Unit.
"-Ywarn-numeric-widen", // Warn when numerics are widened.
"-Ywarn-unused:implicits", // Warn if an implicit parameter is unused.
"-Ywarn-unused:imports", // Warn if an import selector is not referenced.
"-Ywarn-unused:locals", // Warn if a local definition is unused.
"-Ywarn-unused:params", // Warn if a value parameter is unused.
"-Ywarn-unused:patvars", // Warn if a variable bound in a pattern is unused.
"-Ywarn-unused:privates", // Warn if a private member is unused.
"-Ywarn-value-discard" // Warn when non-Unit expression results are unused.
),
scalacOptions in Test ~= { (options: Seq[String]) =>
options filterNot (_ == "-Ywarn-dead-code") // Allow dead code in tests (to support using mockito).
},
parallelExecution in Test := false,
fork in Test := true,
// Needed to avoid https://github.com/travis-ci/travis-ci/issues/3775 in forked tests
// in Travis with `sudo: false`.
// See https://github.com/sbt/sbt/issues/653
// and https://github.com/travis-ci/travis-ci/issues/3775
javaOptions += "-Xmx1G",
buildEnv := {
sys.props
.get("env")
.orElse(sys.env.get("BUILD_ENV"))
.flatMap {
case "prod" => Some(BuildEnv.Production)
case "stage" => Some(BuildEnv.Stage)
case "test" => Some(BuildEnv.Test)
case "dev" => Some(BuildEnv.Developement)
case unknown => None
}
.getOrElse(BuildEnv.Developement)
},
// give feed back
onLoadMessage := {
// depend on the old message as well
val defaultMessage = onLoadMessage.value
val env = buildEnv.value
s"""|$defaultMessage
|Running in build environment: $env""".stripMargin
}
)
}
////*******************************
//// ScalaDoc settings
////*******************************
//object Doc extends AutoPlugin {
//
// import play.core.PlayVersion
//
// override def projectSettings = Seq(
// autoAPIMappings := true,
// apiURL := Some(url(s"http://hub-of-all-things.github.io/doc/${version.value}/")),
// apiMappings ++= {
// implicit val cp = (fullClasspath in Compile).value
// Map(
// jarFor("com.typesafe.play", "play") -> url(s"http://www.playframework.com/documentation/${PlayVersion.current}/api/scala/"),
// scalaInstance.value.libraryJar -> url(s"http://www.scala-lang.org/api/${scalaVersion.value}/")
// )
// }
// )
//
// /**
// * Gets the JAR file for a package.
// *
// * @param organization The organization name.
// * @param name The name of the package.
// * @param cp The class path.
// * @return The file which points to the JAR.
// * @see http://stackoverflow.com/a/20919304/2153190
// */
// private def jarFor(organization: String, name: String)(implicit cp: Seq[Attributed[File]]): File = {
// (for {
// entry <- cp
// module <- entry.get(moduleID.key)
// if module.organization == organization
// if module.name.startsWith(name)
// jarFile = entry.data
// } yield jarFile).head
// }
//}
|
Hub-of-all-Things/HAT2.0
|
project/BuildSettings.scala
|
Scala
|
agpl-3.0
| 8,317 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats.buffers
import scala.collection.mutable
import io.gatling.charts.stats.{ GroupRecord, RequestRecord }
import io.gatling.commons.stats.{ Group, KO, Status }
import io.gatling.core.config.GatlingConfiguration
private[stats] trait ResponseTimeRangeBuffers {
val responseTimeRangeBuffers = mutable.Map.empty[BufferKey, ResponseTimeRangeBuffer]
def getResponseTimeRangeBuffers(requestName: Option[String], group: Option[Group])(implicit configuration: GatlingConfiguration): ResponseTimeRangeBuffer =
responseTimeRangeBuffers.getOrElseUpdate(BufferKey(requestName, group, None), new ResponseTimeRangeBuffer)
def updateResponseTimeRangeBuffer(record: RequestRecord)(implicit configuration: GatlingConfiguration): Unit = {
import record._
getResponseTimeRangeBuffers(Some(name), group).update(responseTime, status)
getResponseTimeRangeBuffers(None, None).update(responseTime, status)
}
def updateGroupResponseTimeRangeBuffer(record: GroupRecord)(implicit configuration: GatlingConfiguration): Unit =
getResponseTimeRangeBuffers(None, Some(record.group)).update(record.duration, record.status)
class ResponseTimeRangeBuffer(implicit configuration: GatlingConfiguration) {
var low = 0
var middle = 0
var high = 0
var ko = 0
def update(time: Int, status: Status): Unit = {
if (status == KO) ko += 1
else if (time < configuration.charting.indicators.lowerBound) low += 1
else if (time > configuration.charting.indicators.higherBound) high += 1
else middle += 1
}
}
}
|
ryez/gatling
|
gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/ResponseTimeRangeBuffers.scala
|
Scala
|
apache-2.0
| 2,199 |
package api
import spray.http.StatusCodes._
import spray.http._
import spray.routing._
import directives.{CompletionMagnet, RouteDirectives}
import spray.util.{SprayActorLogging, LoggingContext}
import util.control.NonFatal
import spray.httpx.marshalling.Marshaller
import spray.http.HttpHeaders.RawHeader
import akka.actor.{ActorLogging, Actor}
/**
* Holds potential error response with the HTTP status and optional body
*
* @param responseStatus the status code
* @param response the optional body
*/
case class ErrorResponseException(responseStatus: StatusCode, response: Option[HttpEntity]) extends Exception
/**
* Allows you to construct Spray ``HttpService`` from a concatenation of routes; and wires in the error handler.
* It also logs all internal server errors using ``SprayActorLogging``.
*
* @param route the (concatenated) route
*/
class RoutedHttpService(route: Route) extends Actor with HttpService with SprayActorLogging {
implicit def actorRefFactory = context
implicit val handler = ExceptionHandler {
case NonFatal(ErrorResponseException(statusCode, entity)) => ctx =>
ctx.complete(statusCode, entity)
case NonFatal(e) => ctx => {
log.error(e, InternalServerError.defaultMessage)
ctx.complete(InternalServerError)
}
}
def receive: Receive =
runRoute(route)(handler, RejectionHandler.Default, context, RoutingSettings.default, LoggingContext.fromActorRefFactory)
}
|
opyate/taximeter
|
src/main/scala/api/services.scala
|
Scala
|
mit
| 1,445 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools
import com.beust.jcommander.JCommander
import org.locationtech.geomesa.tools.export.{ConvertCommand, GenerateAvroSchemaCommand}
import org.locationtech.geomesa.tools.status._
import org.locationtech.geomesa.tools.{Command, Runner}
object KuduRunner extends Runner {
override val name: String = "geomesa-kudu"
override def createCommands(jc: JCommander): Seq[Command] = Seq(
new data.KuduCreateSchemaCommand,
new data.KuduDeleteCatalogCommand,
new data.KuduRemoveSchemaCommand,
new data.KuduUpdateSchemaCommand,
new export.KuduExportCommand,
new ingest.KuduDeleteFeaturesCommand,
new ingest.KuduIngestCommand,
new status.KuduDescribeSchemaCommand,
new status.KuduExplainCommand,
new status.KuduKeywordsCommand,
new status.KuduGetTypeNamesCommand,
new status.KuduGetSftConfigCommand,
new stats.KuduStatsBoundsCommand,
new stats.KuduStatsCountCommand,
new stats.KuduStatsTopKCommand,
new stats.KuduStatsHistogramCommand,
// common commands, placeholders for script functions
new ConvertCommand,
new ConfigureCommand,
new ClasspathCommand,
new EnvironmentCommand,
new GenerateAvroSchemaCommand,
new HelpCommand(this, jc),
new ScalaConsoleCommand,
new VersionCommand
)
}
|
elahrvivaz/geomesa
|
geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/KuduRunner.scala
|
Scala
|
apache-2.0
| 1,785 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib
import slamdata.Predef.Int
import quasar.contrib.matryoshka.LazyEqual
import _root_.matryoshka.Delay
import _root_.scalaz.{Show, Functor, Equal, Traverse, Order}
import _root_.iotaz.{CopK, TListK}
package object iota {
implicit def copkFunctor[LL <: TListK](implicit M: FunctorMaterializer[LL]): Functor[CopK[LL, ?]] = M.materialize(offset = 0)
implicit def copkTraverse[LL <: TListK](implicit M: TraverseMaterializer[LL]): Traverse[CopK[LL, ?]] = M.materialize(offset = 0)
implicit def copkEqual[LL <: TListK](implicit M: EqualKMaterializer[LL]): Delay[Equal, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def copkShow[LL <: TListK](implicit M: ShowKMaterializer[LL]): Delay[Show, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def copkOrder[LL <: TListK](implicit M: OrderKMaterializer[LL]): Delay[Order, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def copkLazyEqual[LL <: TListK](implicit M: LazyEqualKMaterializer[LL]): Delay[LazyEqual, CopK[LL, ?]] = M.materialize(offset = 0)
def mkInject[F[_], LL <: TListK](i: Int): CopK.Inject[F, CopK[LL, ?]] = {
CopK.Inject.injectFromInjectL[F, LL](
CopK.InjectL.makeInjectL[F, LL](
new TListK.Pos[LL, F] { val index: Int = i }
)
)
}
type ACopK[a] = CopK[_, a]
type :<<:[F[_], G[a] <: ACopK[a]] = CopK.Inject[F, G]
}
|
quasar-analytics/quasar
|
foundation/src/main/scala/quasar/contrib/iota/package.scala
|
Scala
|
apache-2.0
| 1,948 |
package twitter
case class Tweet(
contributors: Option[Seq[Contributor]],
coordinates: Option[Coordinates],
created_at: String,
current_user_retweet: Option[Map[String, String]],
entities: Option[Entities],
favorite_count: Option[Int],
favorited: Option[Boolean],
filter_level: String,
id: Long,
id_str: String,
in_reply_to_screen_name: Option[String],
in_reply_to_status_id: Option[Long],
in_reply_to_status_id_str: Option[String],
in_reply_to_user_id: Option[Long],
in_reply_to_user_id_str: Option[String],
lang: Option[String],
place: Option[Place],
possibly_sensitive: Option[Boolean],
quoted_status_id: Option[Long],
quoted_status_id_str: Option[String],
quoted_status: Option[Tweet],
scopes: Option[Map[String, Either[String, Boolean]]],
retweet_count: Int,
retweeted: Option[Boolean],
retweeted_status: Option[Tweet],
source: String,
text: String,
truncated: Boolean,
user: Users,
withheldCopyright: Option[Boolean],
withheldInCountries: Option[Seq[String]],
withheldScope: Option[String]
)
case class Users(
contributors_enabled: Boolean,
created_at: String,
default_profile: Boolean,
default_profile_image: Boolean,
description: Option[String],
entities: Option[Entities],
favourites_count: Int,
follow_request_sent: Option[Boolean],
following: Option[Boolean],
followers_count: Int,
friends_count: Int,
geo_enabled: Boolean,
id: Long,
id_str: String,
is_translator: Boolean,
lang: String,
listed_count: Int,
location: Option[String],
name: String,
notifications: Option[Boolean],
profile_background_color: String,
profile_background_image_url: String,
profile_background_image_url_https: String,
profile_background_tile: Boolean,
profile_banner_url: Option[String],
profile_image_url: String,
profile_image_url_https: String,
profile_link_color: String,
profile_sidebar_border_color: String,
profile_sidebar_fill_color: String,
profile_text_color: String,
profile_use_background_image: Boolean,
`protected`: Boolean,
screen_name: String,
show_all_inline_media: Option[Boolean],
status: Option[Tweets],
statuses_count: Int,
time_zone: Option[String],
url: Option[String],
utc_offset: Option[Int],
verified: Boolean,
withheld_in_countries: Option[String],
withheld_scope: Option[String]
)
case class Tweets(
contributors: Seq[Contributor],
coordinates: Coordinates,
created_at: String,
current_user_retweet: Option[Map[String, String]],
entities: Option[Entities],
favorite_count: Option[Int],
favorited: Option[Boolean],
filter_level: String,
id: Long,
id_str: String,
in_reply_to_screen_name: Option[String],
in_reply_to_status_id: Option[Long],
in_reply_to_status_id_str: Option[String],
in_reply_to_user_id: Option[Long],
in_reply_to_user_id_str: Option[String],
lang: Option[String],
place: Option[Place],
possibly_sensitive: Option[Boolean],
quoted_status_id: Option[Long],
quoted_status_id_str: Option[String],
quoted_status: Option[Tweet],
scopes: Option[Map[String, String]],
retweet_count: Int,
retweeted: Option[Boolean],
retweeted_status: Option[Tweet],
source: String,
text: String,
truncated: Boolean,
user: Users,
withheld_copyright: Option[Boolean],
withheld_in_countries: Option[Seq[String]],
withheld_scope: Option[String]
)
case class Place(
attributes: Map[String, String],
bounding_box: BoundingBox,
country: String,
country_code: String,
full_name: String,
id: String,
name: String,
place_type: String,
url: String
)
case class Media(
display_url: Option[String],
expanded_url: Option[String],
id: Long,
id_str: String,
indices: Seq[Int],
media_url: String,
media_url_https: String,
sizes: Sizes,
source_status_id: Option[Long],
source_status_id_str: Option[String],
`type`: String,
url: String
)
case class BoundingBox(coordinates: Seq[Seq[Seq[Float]]], `type`: String)
case class Contributor(id: Long, id_str: String, screen_name: String)
case class Coordinates(coordinates: Seq[Float], `type`: String)
case class Entities(hashtags: Seq[Hashtag], media: Option[Seq[Media]], urls: Seq[Url], user_mentions: Option[Seq[UserMention]])
case class Sizes(thumb: Size, large: Size, medium: Size, small: Size)
case class Size(h: Int, w: Int, resize: String)
case class Hashtag(indices: Seq[Int], text: String)
case class Url(display_url: Option[String], expanded_url: Option[String], indices: Seq[Int], url: String)
case class UserMention(id: Long, id_str: String, indices: Seq[Int], name: String, screen_name: String)
|
callicles/akka-streams-intro
|
src/main/scala/twitter/Tweet.scala
|
Scala
|
mit
| 6,759 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.laws._
import cats.laws.discipline._
import monix.eval.{Coeval, Task}
import monix.execution.exceptions.DummyException
import monix.tail.batches.BatchCursor
object IterantTakeLastSuite extends BaseTestSuite {
test("Iterant.takeLast is equivalent with List.takeRight") { implicit s =>
check3 { (list: List[Int], idx: Int, nr: Int) =>
val iter = arbitraryListToIterant[Task, Int](list, math.abs(idx) + 1).onErrorIgnore
val stream = iter ++ Iterant[Task].of(1, 2, 3)
val n = if (nr == 0) 0 else math.abs(math.abs(nr) % 20)
stream.takeLast(n).toListL <-> stream.toListL.map(_.takeRight(n))
}
}
test("Iterant.takeLast protects against broken batches") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextBatchS[Int](new ThrowExceptionBatch(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.takeLast(10)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.takeLast protects against broken cursors") { implicit s =>
check1 { (iter: Iterant[Task, Int]) =>
val dummy = DummyException("dummy")
val suffix = Iterant[Task].nextCursorS[Int](new ThrowExceptionCursor(dummy), Task.now(Iterant[Task].empty))
val stream = iter.onErrorIgnore ++ suffix
val received = stream.takeLast(10)
received <-> Iterant[Task].haltS[Int](Some(dummy))
}
}
test("Iterant.takeLast preserves the source guarantee") { implicit s =>
var effect = 0
val stop = Coeval.eval(effect += 1)
val source = Iterant[Coeval].nextCursorS(BatchCursor(1,2,3), Coeval.now(Iterant[Coeval].empty[Int])).guarantee(stop)
val stream = source.takeLast(3)
stream.completedL.value()
assertEquals(effect, 1)
}
}
|
Wogan/monix
|
monix-tail/shared/src/test/scala/monix/tail/IterantTakeLastSuite.scala
|
Scala
|
apache-2.0
| 2,549 |
/*
* Copyright 2018 Dell Inc. or its subsidiaries. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://www.apache.org/licenses/LICENSE-2.0.txt
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.dellemc.ecs.s3.sample
import com.amazonaws.services.s3.AmazonS3
import com.amazonaws.services.s3.model.ObjectMetadata
import com.amazonaws.util.StringInputStream
object _03_UpdateObjects extends BucketAndObjectValidator {
/**
* Run the class.
*
* @param args
*/
def main(args: Array[String]): Unit = {
val newContent: String = "new object content"
updateObject(AWSS3Factory.getS3ClientWithV4Signatures(), AWSS3Factory.S3_BUCKET, AWSS3Factory.S3_OBJECT, newContent)
}
/**
* Check the element content, update it, then verify that it has changed.
*
* @param s3Client the client to use
* @param bucketName the bucket to use
* @param key the object to change
* @param newContent the new content
*/
def updateObject(s3Client: AmazonS3, bucketName: String, key: String, newContent: String) = {
try {
checkObjectContent(s3Client, bucketName, key)
val metadata: ObjectMetadata = new ObjectMetadata()
metadata.setContentLength(newContent.length())
s3Client.putObject(bucketName, key, new StringInputStream( newContent ), metadata)
checkObjectContent(s3Client, bucketName, key)
} catch { case e: Exception => outputException(e) }
println()
}
}
|
EMCECS/ecs-samples
|
aws-scala-workshop/src/main/scala/com/dellemc/ecs/s3/sample/_03_UpdateObjects.scala
|
Scala
|
apache-2.0
| 1,922 |
package graffiti
import scala.concurrent.Future
/**
* @author Alexander De Leon <[email protected]>
*/
package object oauth {
type TokenAuthenticator[U] = Option[String] => Future[Option[U]]
}
|
alexdeleon/graffiti
|
core/src/main/scala/graffiti/oauth/package.scala
|
Scala
|
mit
| 200 |
package demo
package components
package materialui
import chandu0101.scalajs.react.components.materialui.{MuiList, MuiListItem}
import demo.components.materialui.svgicons._
import japgolly.scalajs.react.ReactComponentB
import japgolly.scalajs.react.vdom.prefix_<^._
object MuiListDemo {
val code =
"""
| MuiList()(
| MuiListItem(leftIcon = ContentInbox())("Inbox")
| MuiListItem(leftIcon = ActionGrade())("Starred"),
| MuiListItem(leftIcon = ContentSend())("Sent Mail"),
| MuiListItem(leftIcon = ContentDrafts())("Drafts")
| )
|
""".stripMargin
val component = ReactComponentB[Unit]("MuiListDemo")
.render(P => {
<.div(
CodeExample(code, "MuiList")(
MobileTearSheet(
MuiList()(
MuiListItem(leftIcon = ContentInbox())("Inbox"),
MuiListItem(leftIcon = ActionGrade())("Starred"),
MuiListItem(leftIcon = ContentSend())("Sent Mail"),
MuiListItem(leftIcon = ContentDrafts())("Drafts")
)
)
)
)
}).buildU
def apply() = component()
}
|
tpdi/scalajs-react-components
|
demo/src/main/scala/demo/components/materialui/MuiListDemo.scala
|
Scala
|
apache-2.0
| 1,148 |
/*
* Copyright 2015 Dmitriy Yefremov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.yefremov.sleipnir.generator.types
import com.linkedin.data.schema.EnumDataSchema
import net.yefremov.sleipnir.generator.txt.EnumTemplate
import net.yefremov.sleipnir.generator.GeneratedClass
import scala.collection.JavaConverters._
/**
* A generator for [[EnumDataSchema]] types.
* @author Dmitriy Yefremov
*/
class EnumTypeGenerator(override val schema: EnumDataSchema,
override val parentGenerator: Option[TypeGenerator],
override val namespacePrefix: Option[String],
override val filename: String) extends AbstractTypeGenerator {
override val name: TypeName = escapeScalaReserved(alias.getOrElse {
TypeName(
schema.getName,
namespace(schema.getNamespace),
namespace(schema.getFullName),
namespace(s"${schema.getFullName}.Type")
)
})
val symbols: String = {
val symbols = schema.getSymbols.asScala
val escaped = symbols.map(escapeScalaReserved)
escaped.mkString(", ")
}
override val referencedGenerators: Seq[TypeGenerator] = Seq.empty
override def generateClass: Option[GeneratedClass] = {
logger.info(s"Generating ${name.fullClassName}")
val source = EnumTemplate(this).toString()
generatedClass(source)
}
}
|
dmitriy-yefremov/sleipnir
|
generator/src/main/scala/net/yefremov/sleipnir/generator/types/EnumTypeGenerator.scala
|
Scala
|
apache-2.0
| 1,908 |
package flyweight
case class Code(code: String)
|
BBK-PiJ-2015-67/sdp-portfolio
|
exercises/week11/src/main/scala/flyweight/Code.scala
|
Scala
|
unlicense
| 49 |
package controllers
import models.User
trait WebController extends AuthController[User]
|
chang850/play_slick
|
app/controllers/WebController.scala
|
Scala
|
mit
| 90 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models._
import views._
object Application extends Controller {
// -- Authentication
val loginForm = Form(
tuple(
"email" -> text,
"password" -> text
) verifying ("Invalid email or password", result => result match {
case (email, password) => User.authenticate(email, password).isDefined
})
)
/**
* Login page.
*/
def login = Action { implicit request =>
Ok(html.login(loginForm))
}
/**
* Handle login form submission.
*/
def authenticate = Action { implicit request =>
loginForm.bindFromRequest.fold(
formWithErrors => BadRequest(html.login(formWithErrors)),
user => Redirect(routes.Projects.index).withSession("email" -> user._1)
)
}
/**
* Logout and clean the session.
*/
def logout = Action {
Redirect(routes.Application.login).withNewSession.flashing(
"success" -> "You've been logged out"
)
}
// -- Javascript routing
def javascriptRoutes = Action { implicit request =>
import routes.javascript._
Ok(
Routes.javascriptRouter("jsRoutes")(
Projects.add, Projects.delete, Projects.rename,
Projects.addGroup, Projects.deleteGroup, Projects.renameGroup,
Projects.addUser, Projects.removeUser, Tasks.addFolder,
Tasks.renameFolder, Tasks.deleteFolder, Tasks.index,
Tasks.add, Tasks.update, Tasks.delete
)
)
}
}
/**
* Provide security features
*/
trait Secured {
/**
* Retrieve the connected user email.
*/
private def username(request: RequestHeader) = request.session.get("email")
/**
* Redirect to login if the user in not authorized.
*/
private def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.Application.login)
// --
/**
* Action for authenticated users.
*/
def IsAuthenticated(f: => String => Request[AnyContent] => Result) = Security.Authenticated(username, onUnauthorized) { user =>
Action(request => f(user)(request))
}
/**
* Check if the connected user is a member of this project.
*/
def IsMemberOf(project: Long)(f: => String => Request[AnyContent] => Result) = IsAuthenticated { user => request =>
if(Project.isMember(project, user)) {
f(user)(request)
} else {
Results.Forbidden
}
}
/**
* Check if the connected user is a owner of this task.
*/
def IsOwnerOf(task: Long)(f: => String => Request[AnyContent] => Result) = IsAuthenticated { user => request =>
if(Task.isOwner(task, user)) {
f(user)(request)
} else {
Results.Forbidden
}
}
}
|
gslowikowski/scoverage-maven-samples
|
playframework/singlemodule/zentasks/zentasks-scala-2.11/app/controllers/Application.scala
|
Scala
|
apache-2.0
| 2,710 |
/*
* Copyright 2015 eleflow.com.br.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param.shared
import org.apache.spark.ml.TimeSeriesEstimator
import org.apache.spark.ml.evaluation.KeyValueEvaluator
import org.apache.spark.ml.param.Param
import org.apache.spark.ml.tuning.ValidatorParams
/**
* Created by dirceu on 25/04/16.
*/
trait TimeSeriesBestModelFinderParam[T] extends ValidatorParams {
/**
* Param for ratio between train and validation data. Must be between 0 and 1.
* Default: 0.75
*
* @group param
*/
val keyValueEvaluator: Param[KeyValueEvaluator[T]] = new Param(
this,
"keyValueEvaluator",
"evaluator used to select hyper-parameters that maximize the validated metric"
)
/**
* param for the estimator to be validated
*
* @group param
*/
val timeSeriesEstimator: Param[TimeSeriesEstimator[T, _]] = new Param(
this,
"timeSeriesEstimator",
"timeseries estimator for selection"
)
/** @group getParam */
def getTimeSeriesEstimator: TimeSeriesEstimator[T, _] =
$(timeSeriesEstimator)
def getKeyValueEvaluator: KeyValueEvaluator[T] = $(keyValueEvaluator)
}
|
eleflow/uberdata
|
iuberdata_core/src/main/scala/org/apache/spark/ml/param/shared/TimeSeriesBestModelFinderParam.scala
|
Scala
|
apache-2.0
| 1,697 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.Properties
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.metrics.source.Source
import org.apache.spark.util._
private[spark] class TaskContextImpl(
val stageId: Int,
val partitionId: Int,
override val taskAttemptId: Long,
override val attemptNumber: Int,
var _taskMemoryManager: TaskMemoryManager,
localProperties: Properties,
@transient private val metricsSystem: MetricsSystem,
// The default value is only used in tests.
override val taskMetrics: TaskMetrics = TaskMetrics.empty,
var batchId: Int = 0)
extends TaskContext
with Logging {
/** List of callback functions to execute when the task completes. */
@transient private val onCompleteCallbacks = new ArrayBuffer[TaskCompletionListener]
/** List of callback functions to execute when the task fails. */
@transient private val onFailureCallbacks = new ArrayBuffer[TaskFailureListener]
// Whether the corresponding task has been killed.
@volatile private var interrupted: Boolean = false
// Whether the task has completed.
@volatile private var completed: Boolean = false
// Whether the task has failed.
@volatile private var failed: Boolean = false
override def addTaskCompletionListener(listener: TaskCompletionListener): this.type = {
onCompleteCallbacks += listener
this
}
override def addTaskFailureListener(listener: TaskFailureListener): this.type = {
onFailureCallbacks += listener
this
}
override def taskMemoryManager(): TaskMemoryManager = _taskMemoryManager
/** Marks the task as failed and triggers the failure listeners. */
private[spark] def markTaskFailed(error: Throwable): Unit = {
// failure callbacks should only be called once
if (failed) return
failed = true
val errorMsgs = new ArrayBuffer[String](2)
// Process failure callbacks in the reverse order of registration
onFailureCallbacks.reverse.foreach { listener =>
try {
listener.onTaskFailure(this, error)
} catch {
case e: Throwable =>
errorMsgs += e.getMessage
logError("Error in TaskFailureListener", e)
}
}
if (errorMsgs.nonEmpty) {
throw new TaskCompletionListenerException(errorMsgs, Option(error))
}
}
/** Marks the task as completed and triggers the completion listeners. */
private[spark] def markTaskCompleted(): Unit = {
completed = true
val errorMsgs = new ArrayBuffer[String](2)
// Process complete callbacks in the reverse order of registration
onCompleteCallbacks.reverse.foreach { listener =>
try {
listener.onTaskCompletion(this)
} catch {
case e: Throwable =>
errorMsgs += e.getMessage
logError("Error in TaskCompletionListener", e)
}
}
if (errorMsgs.nonEmpty) {
throw new TaskCompletionListenerException(errorMsgs)
}
}
/** Marks the task for interruption, i.e. cancellation. */
private[spark] def markInterrupted(): Unit = {
interrupted = true
}
override def isCompleted(): Boolean = completed
override def isRunningLocally(): Boolean = false
override def isInterrupted(): Boolean = interrupted
override def getLocalProperty(key: String): String = localProperties.getProperty(key)
override def getMetricsSources(sourceName: String): Seq[Source] =
metricsSystem.getSourcesByName(sourceName)
private[spark] override def registerAccumulator(a: AccumulatorV2[_, _]): Unit = {
taskMetrics.registerAccumulator(a)
}
}
|
likithkailas/StreamingSystems
|
core/src/main/scala/org/apache/spark/TaskContextImpl.scala
|
Scala
|
apache-2.0
| 4,548 |
package genetic.func
import java.lang.Math._
import java.util.Random
import genetic.func.Func._
import genetic.util.Util.formatScientific
abstract class Func(val minX: Double, val maxX: Double, val minY: Double, val maxY: Double) {
val name: String
// The function's minimum should be 0.
protected def evaluate(x: Double, y: Double): Double
protected def maxValue: Double
def calc(solution: FuncSolution): Double =
evaluate(toRange(solution.x, minX, maxX), toRange(solution.y, minY, maxY)) / maxValue
def showScientific: Boolean
}
object Func {
def toRange(v: Double, min: Double, max: Double): Double =
v * (max - min) + min
}
object HoldersTableFunction extends Func(-10, 10, -10, 10) {
val name: String = "Holder's Table Function"
// The function's minimum should be 0.
override protected def evaluate(x: Double, y: Double): Double =
-abs(sin(x) * cos(y) * exp(abs(1 - sqrt(x * x + y * y) / PI))) + 19.2085
override def maxValue: Double = 19.2085
override def showScientific: Boolean = false
}
object LabTestFunction extends Func(-10, 10, -10, 10) {
val name: String = "Lab Test Function"
// The function's minimum should be 0.
override protected def evaluate(x: Double, y: Double): Double =
20 + x * x + y * y - 10 * (cos(2 * PI * x) + cos(2 * PI * y))
override def maxValue: Double = 20 + maxX * maxX + maxY * maxY + 20
override def showScientific: Boolean = true
}
case class FuncSolution(x: Double, y: Double, func: Func) {
def xInRange = Func.toRange(x, func.minX, func.maxX)
def yInRange = Func.toRange(y, func.minY, func.maxY)
override def toString: String = {
s"(${formatScientific(xInRange, 3)}, ${formatScientific(yInRange, 3)})"
}
}
object FuncSolution {
def genFuncSolution(func: Func, rand: Random): FuncSolution = {
FuncSolution(
rand.nextDouble(),
rand.nextDouble(),
func)
}
}
|
NightRa/AILab
|
Genetic/src/main/scala/genetic/func/FuncSolution.scala
|
Scala
|
apache-2.0
| 1,903 |
package mesos.dns.client.model
case class Service(service: String, host: String, ip: String, port: Int)
|
jmarin/mesos-proxy
|
src/main/scala/mesos/dns/client/model/Service.scala
|
Scala
|
apache-2.0
| 105 |
package com.twitter.finagle.stats
/**
* Exposes the value of a function. For example, one could add a gauge for a
* computed health metric.
*/
trait Gauge {
def remove()
}
|
BuoyantIO/twitter-util
|
util-stats/src/main/scala/com/twitter/finagle/stats/Gauge.scala
|
Scala
|
apache-2.0
| 178 |
package com.sksamuel.elastic4s.requests.searches.aggs
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.elastic4s.ext.OptionImplicits._
case class KeyedFiltersAggregation(name: String,
filters: Iterable[(String, Query)],
otherBucket: Option[Boolean] = None,
otherBucketKey: Option[String] = None,
subaggs: Seq[AbstractAggregation] = Nil,
metadata: Map[String, AnyRef] = Map.empty)
extends Aggregation {
type T = KeyedFiltersAggregation
def otherBucket(otherBucket: Boolean): T = copy(otherBucket = otherBucket.some)
def otherBucketKey(otherBucketKey: String): T = copy(otherBucketKey = otherBucketKey.some)
override def subAggregations(aggs: Iterable[AbstractAggregation]): T = copy(subaggs = aggs.toSeq)
override def metadata(map: Map[String, AnyRef]): T = copy(metadata = map)
}
|
sksamuel/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/aggs/KeyedFiltersAggregation.scala
|
Scala
|
apache-2.0
| 1,029 |
package ru.arkoit.finchrich.controller.internal
import io.finch.Endpoint
import ru.arkoit.finchrich.controller.Controller
import scala.reflect.macros.whitebox
import macrocompat.bundle
@bundle
private[finchrich] class FinchRichMacro(val c: whitebox.Context) {
import c.universe._
def materialize[T <: Controller : c.WeakTypeTag, R : c.WeakTypeTag]: Tree = {
def symbolResultType(s: Symbol): Type = s match {
case x if x.isMethod => x.asMethod.returnType
case x => x.typeSignature
}
def filterApplicableTerms(t: Type) =
t.members
.filter(x => x.isTerm && x.isPublic && !x.isSynthetic)
.map(_.asTerm)
.filter {
case x if x.isMethod =>
val ms = x.asMethod
!ms.isConstructor && (ms.returnType <:< c.weakTypeOf[Controller] | ms.returnType <:< c.weakTypeOf[Endpoint[_]])
case x if !x.isMethod => x.typeSignature <:< c.weakTypeOf[Controller] | x.typeSignature <:< c.weakTypeOf[Endpoint[_]]
case _ => false
}
def extract(t: Type, context: Tree): List[(Tree, Type)] = {
filterApplicableTerms(t).toList.flatMap{
case x if symbolResultType(x) <:< c.weakTypeOf[Controller] => extract(symbolResultType(x), q"$context.${x.name.toTermName}")
case x => List((q"$context.${x.name.toTermName}", symbolResultType(x).typeArgs.head))
}
}
val (exSyms, exTypes) = extract(c.weakTypeOf[T], q"a").unzip
if (exSyms.isEmpty)
c.abort(c.enclosingPosition, "Controller passed to the controllerToEndpoint function does not contain neither endpoints nor other non-empty controllers.")
val result = exSyms.foldLeft(q"": Tree)((a, b) => a match {
case q"" => q"$b"
case _ => q"$a.:+:($b)"
})
val resultType = if (exTypes.length == 1) q"${exTypes.head}" else
exTypes.reverse.foldRight(tq"": Tree)((a, b) => b match {
case tq"" => tq":+:[$a, CNil]"
case _ => tq":+:[$a, $b]"
})
q"""
import io.finch.Endpoint
import shapeless._
import ru.arkoit.finchrich.controller.EndpointExtractor
new EndpointExtractor[${c.weakTypeOf[T]}] {
type R = $resultType
def apply(a: ${c.weakTypeOf[T]}): Endpoint[R] = $result
}
"""
}
}
|
akozhemiakin/finchrich
|
controller/src/main/scala/ru/arkoit/finchrich/controller/internal/FinchRichMacro.scala
|
Scala
|
apache-2.0
| 2,270 |
package io.findify.s3mock.transfermanager
import java.io.{ByteArrayInputStream, File, FileInputStream}
import com.amazonaws.services.s3.model.ObjectMetadata
import io.findify.s3mock.S3MockTest
import scala.io.Source
/**
* Created by shutty on 11/23/16.
*/
class PutGetTest extends S3MockTest {
override def behaviour(fixture: => Fixture) = {
val s3 = fixture.client
val tm = fixture.tm
it should "put files with TransferManager" in {
s3.createBucket("tm1")
val upload = tm.upload("tm1", "hello1", new ByteArrayInputStream("hello".getBytes), new ObjectMetadata())
val result = upload.waitForUploadResult()
result.getKey shouldBe "hello1"
}
it should "download files with TransferManager" in {
val file = File.createTempFile("hello1", ".s3mock")
val download = tm.download("tm1", "hello1", file)
download.waitForCompletion()
val result = Source.fromInputStream(new FileInputStream(file), "UTF-8").mkString
result shouldBe "hello"
}
it should "copy file with TransferManager" in {
val copy = tm.copy("tm1", "hello1", "tm1", "hello2")
val result = copy.waitForCopyResult()
result.getDestinationKey shouldBe "hello2"
val hello2 = s3.getObject("tm1", "hello2")
getContent(hello2) shouldBe "hello"
}
}
}
|
findify/s3mock
|
src/test/scala/io/findify/s3mock/transfermanager/PutGetTest.scala
|
Scala
|
mit
| 1,328 |
/*
* Copyright 2014 Eric Zoerner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalable.client
import javafx.{ stage ⇒ jfxs }
import akka.actor.ActorSystem
import akka.event.Logging
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.util.Try
import scalable.client.login.LoginListener
import scalable.infrastructure.api.AskLogin
import scalable.infrastructure.api.ResultStatus._
import scalafx.application.Platform
import scalafx.event.ActionEvent
import scalafx.scene.control.{ ToggleButton, Button, TextField }
import scalafx.scene.layout.{ VBox, GridPane }
import scalafx.scene.text.Text
import scalafx.stage.Stage
import scalafxml.core.macros.sfxml
/**
* Handles Login operations.
*
* @author Eric Zoerner <a href="mailto:[email protected]">[email protected]</a>
*/
@sfxml
class LoginController(private val usernameField: TextField,
private val passwordField: TextField,
private val failedText: Text,
private val timedOutText: Text,
private val root: GridPane,
private val actorSystem: ActorSystem,
private val advancedView: VBox,
private val advancedToggle: ToggleButton,
private val hostText: TextField,
private val portText: TextField) extends LoginListener {
private val log = Logging(actorSystem, this.getClass)
@volatile private var waiting = true
hostText.text = Configuration.host
portText.text = Configuration.portTcp.toString
var stage: Stage = null
Platform.runLater(usernameField.requestFocus())
def onKeyTyped(): Unit = {
failedText.visible.value = false
timedOutText.visible.value = false
}
def login(event: ActionEvent) = {
timedOutText.visible.value = false
val appSupervisor = appSupervisorSelection(actorSystem)
appSupervisor !
(hostText.text.value,
Try(portText.text.value.toInt).getOrElse(Configuration.portTcp),
AskLogin(usernameField.text.value,
passwordField.text.value))
// start a timer to timeout if no response is received
actorSystem.scheduler.scheduleOnce(5.seconds) {
if (waiting) {
log.error("Unsuccessful login: Timed Out")
Platform.runLater(timedOutText.visible.value = true)
}
}
}
def toggleAdvanced(event: ActionEvent) = {
advancedView.setVisible(advancedToggle.selected.value)
}
override def loginResult(resultStatus: ResultStatus, username: String) = {
val appSupervisor = appSupervisorSelection(actorSystem)
waiting = false
resultStatus match {
case Ok ⇒
log.debug("Successful Login")
appSupervisor ! OpenLobby(username)
Platform.runLater {
if (stage != null) stage.hide()
}
case WrongPassword ⇒
Platform.runLater(failedText.visible.value = true)
log.error(s"Unsuccessful login: $resultStatus, $username")
}
}
def exit() = {
Platform.exit()
}
override def setLoginStage(stage: Stage): Unit = this.stage = stage
}
|
ezoerner/scalable-chat
|
client/src/main/scala/scalable/client/LoginController.scala
|
Scala
|
apache-2.0
| 3,688 |
package com.sksamuel.elastic4s.requests.searches.queries
import com.sksamuel.elastic4s.JsonSugar
import com.sksamuel.elastic4s.handlers.searches.queries
import com.sksamuel.elastic4s.handlers.searches.queries.IntervalsQueryBuilderFn
import com.sksamuel.elastic4s.requests.script.Script
import org.scalatest.GivenWhenThen
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class IntervalsQueryBuilderFnTest extends AnyFunSuite with Matchers with GivenWhenThen with JsonSugar {
test("Should correctly build intervals query") {
Given("An intervals query")
val query = IntervalsQuery("my_text", AllOf(List(
Match(query = "my favorite food").maxGaps(0).ordered(true),
AnyOf(intervals = List(
Match(query = "hot water"),
Match(query = "cold porridge")
))
)).ordered(true))
When("Intervals query is built")
val queryBody = IntervalsQueryBuilderFn(query)
Then("query should have right fields")
queryBody.string() should matchJson(intervalsQuery)
}
def intervalsQuery: String =
"""
|{
| "intervals" : {
| "my_text" : {
| "all_of" : {
| "ordered" : true,
| "intervals" : [
| {
| "match" : {
| "query" : "my favorite food",
| "max_gaps" : 0,
| "ordered" : true
| }
| },
| {
| "any_of" : {
| "intervals" : [
| { "match" : { "query" : "hot water" } },
| { "match" : { "query" : "cold porridge" } }
| ]
| }
| }
| ]
| }
| }
| }
|}
""".stripMargin.replace("\\n", "")
test("Should correctly build intervals query with a filter") {
Given("An intervals query with a filter")
val query = IntervalsQuery("my_text", Match(query = "hot porridge").maxGaps(10).filter(
IntervalsFilter().notContaining(Match(query = "salty")
)))
When("Intervals query is built")
val queryBody = queries.IntervalsQueryBuilderFn(query)
println(queryBody.string())
Then("query should have right fields")
queryBody.string() should matchJson(intervalsWithFilterQuery)
}
def intervalsWithFilterQuery: String =
"""
|{
| "intervals" : {
| "my_text" : {
| "match" : {
| "query" : "hot porridge",
| "max_gaps" : 10,
| "filter" : {
| "not_containing" : {
| "match" : {
| "query" : "salty"
| }
| }
| }
| }
| }
| }
|}
""".stripMargin.replace("\\n", "")
test("Should correctly build intervals query with a script") {
Given("An intervals query with a script")
val query = IntervalsQuery("my_text", Match("hot porridge").filter(
IntervalsFilter().script(Script(
"interval.start > 10 && interval.end < 20 && interval.gaps == 0"
))
))
When("Intervals query is built")
val queryBody = queries.IntervalsQueryBuilderFn(query)
println(queryBody.string())
Then("query should have right fields")
queryBody.string() should matchJson(intervalsWithScriptQuery)
}
def intervalsWithScriptQuery: String =
"""
|{
| "intervals" : {
| "my_text" : {
| "match" : {
| "query" : "hot porridge",
| "filter" : {
| "script" : {
| "source" : "interval.start > 10 && interval.end < 20 && interval.gaps == 0"
| }
| }
| }
| }
| }
|}
""".stripMargin.replace("\\n", "")
}
|
sksamuel/elastic4s
|
elastic4s-core/src/test/scala/com/sksamuel/elastic4s/requests/searches/queries/IntervalsQueryBuilderFnTest.scala
|
Scala
|
apache-2.0
| 3,848 |
package scalapb.spark
import com.google.protobuf.ByteString
import com.google.protobuf.Descriptors.{ EnumValueDescriptor, FieldDescriptor }
import com.google.protobuf.Descriptors.FieldDescriptor.JavaType
import scalapb.{ GeneratedMessage, GeneratedMessageCompanion, Message }
import org.apache.spark.sql.types.{ ArrayType, StructField }
import org.apache.spark.sql.{ DataFrame, Row, SQLContext, SparkSession }
object ProtoSQL {
import scala.language.existentials
def protoToDataFrame[T <: GeneratedMessage with Message[T] : GeneratedMessageCompanion](
sparkSession: SparkSession, protoRdd: org.apache.spark.rdd.RDD[T]): DataFrame = {
sparkSession.createDataFrame(protoRdd.map(messageToRow[T]), schemaFor[T])
}
def protoToDataFrame[T <: GeneratedMessage with Message[T] : GeneratedMessageCompanion](
sqlContext: SQLContext, protoRdd: org.apache.spark.rdd.RDD[T]): DataFrame = {
protoToDataFrame(sqlContext.sparkSession, protoRdd)
}
def schemaFor[T <: GeneratedMessage with Message[T]](implicit cmp: GeneratedMessageCompanion[T]) = {
import org.apache.spark.sql.types._
import collection.JavaConverters._
StructType(cmp.javaDescriptor.getFields.asScala.map(structFieldFor))
}
private def toRowData(fd: FieldDescriptor, obj: Any) = fd.getJavaType match {
case JavaType.BYTE_STRING => obj.asInstanceOf[ByteString].toByteArray
case JavaType.ENUM => obj.asInstanceOf[EnumValueDescriptor].getName
case JavaType.MESSAGE => messageToRow(obj.asInstanceOf[T forSome { type T <: GeneratedMessage with Message[T] }])
case _ => obj
}
def messageToRow[T <: GeneratedMessage with Message[T]](msg: T): Row = {
import collection.JavaConversions._
Row(
msg.companion.javaDescriptor.getFields.map {
fd =>
val obj = msg.getField(fd)
if (obj != null) {
if (fd.isRepeated) {
obj.asInstanceOf[Traversable[Any]].map(toRowData(fd, _))
} else {
toRowData(fd, obj)
}
} else null
}: _*)
}
def dataTypeFor(fd: FieldDescriptor) = {
import com.google.protobuf.Descriptors.FieldDescriptor.JavaType._
import org.apache.spark.sql.types._
fd.getJavaType match {
case INT => IntegerType
case LONG => LongType
case FLOAT => FloatType
case DOUBLE => DoubleType
case BOOLEAN => BooleanType
case STRING => StringType
case BYTE_STRING => BinaryType
case ENUM => StringType
case MESSAGE =>
import collection.JavaConverters._
StructType(fd.getMessageType.getFields.asScala.map(structFieldFor))
}
}
def structFieldFor(fd: FieldDescriptor): StructField = {
val dataType = dataTypeFor(fd)
StructField(
fd.getName,
if (fd.isRepeated) ArrayType(dataType, containsNull = false) else dataType,
nullable = !fd.isRequired && !fd.isRepeated
)
}
}
|
trueaccord/sparksql-scalapb
|
sparksql-scalapb/src/main/scala/scalapb/spark/ProtoSQL.scala
|
Scala
|
apache-2.0
| 2,914 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions
import org.apache.flink.table.api.{DataTypes, _}
import org.apache.flink.table.expressions.ApiExpressionUtils.valueLiteral
import org.apache.flink.table.planner.expressions.utils.MapTypeTestBase
import org.apache.flink.table.planner.utils.DateTimeTestUtil.{localDate, localDateTime, localTime => gLocalTime}
import org.junit.Test
import java.time.{LocalDateTime => JLocalTimestamp}
class MapTypeTest extends MapTypeTestBase {
@Test
def testInputTypeGeneralization(): Unit = {
testAllApis(
map(1, "ABC", 2.0, "D"),
"map(1, 'ABC', 2.0, 'D')",
"MAP[1, 'ABC', cast(2.0 AS DOUBLE), 'D']",
"{1.0=ABC, 2.0=D}")
}
@Test
def testItem(): Unit = {
testSqlApi("f0['map is null']", "null")
testSqlApi("f1['map is empty']", "null")
testSqlApi("f2['b']", "13")
testSqlApi("f3[1]", "null")
testSqlApi("f3[12]", "a")
testSqlApi("f2[f3[12]]", "12")
}
@Test
def testMapLiteral(): Unit = {
// primitive literals
testAllApis(map(1, 1), "map(1, 1)", "MAP[1, 1]", "{1=1}")
testAllApis(
map(true, true),
"map(true, true)",
"map[TRUE, TRUE]",
"{true=true}")
// object literals
testTableApi(map(BigDecimal(1), BigDecimal(1)), "map(1p, 1p)", "{1=1}")
testAllApis(
map(map(1, 2), map(3, 4)),
"map(map(1, 2), map(3, 4))",
"MAP[MAP[1, 2], MAP[3, 4]]",
"{{1=2}={3=4}}")
testAllApis(
map(1 + 2, 3 * 3, 3 - 6, 4 - 2),
"map(1 + 2, 3 * 3, 3 - 6, 4 - 2)",
"map[1 + 2, 3 * 3, 3 - 6, 4 - 2]",
"{3=9, -3=2}")
testAllApis(
map(1, nullOf(DataTypes.INT)),
"map(1, Null(INT))",
"map[1, NULLIF(1,1)]",
"{1=null}")
// explicit conversion
testAllApis(
map(1, 2L , 3, 4L),
"map(1, 2L, 3, 4L)",
"MAP[1, CAST(2 AS BIGINT), 3, CAST(4 AS BIGINT)]",
"{1=2, 3=4}")
testAllApis(
map(valueLiteral(localDate("1985-04-11")), valueLiteral(gLocalTime("14:15:16")),
valueLiteral(localDate("2018-07-26")), valueLiteral(gLocalTime("17:18:19"))),
"map('1985-04-11'.toDate, '14:15:16'.toTime, '2018-07-26'.toDate, '17:18:19'.toTime)",
"MAP[DATE '1985-04-11', TIME '14:15:16', DATE '2018-07-26', TIME '17:18:19']",
"{1985-04-11=14:15:16, 2018-07-26=17:18:19}")
// There is no timestamp literal function in Java String Table API,
// toTimestamp is casting string to TIMESTAMP(3) which is not the same to timestamp literal.
testTableApi(
map(valueLiteral(gLocalTime("14:15:16")), valueLiteral(localDateTime("1985-04-11 14:15:16")),
valueLiteral(gLocalTime("17:18:19")), valueLiteral(localDateTime("2018-07-26 17:18:19"))),
"{14:15:16=1985-04-11 14:15:16, 17:18:19=2018-07-26 17:18:19}")
testSqlApi(
"MAP[TIME '14:15:16', TIMESTAMP '1985-04-11 14:15:16', " +
"TIME '17:18:19', TIMESTAMP '2018-07-26 17:18:19']",
"{14:15:16=1985-04-11 14:15:16, 17:18:19=2018-07-26 17:18:19}")
testAllApis(
map(valueLiteral(gLocalTime("14:15:16")),
valueLiteral(localDateTime("1985-04-11 14:15:16.123")),
valueLiteral(gLocalTime("17:18:19")),
valueLiteral(localDateTime("2018-07-26 17:18:19.123"))),
"map('14:15:16'.toTime, '1985-04-11 14:15:16.123'.toTimestamp, " +
"'17:18:19'.toTime, '2018-07-26 17:18:19.123'.toTimestamp)",
"MAP[TIME '14:15:16', TIMESTAMP '1985-04-11 14:15:16.123', " +
"TIME '17:18:19', TIMESTAMP '2018-07-26 17:18:19.123']",
"{14:15:16=1985-04-11 14:15:16.123, 17:18:19=2018-07-26 17:18:19.123}")
testTableApi(
map(valueLiteral(gLocalTime("14:15:16")),
valueLiteral(JLocalTimestamp.of(1985, 4, 11, 14, 15, 16, 123456000)),
valueLiteral(gLocalTime("17:18:19")),
valueLiteral(JLocalTimestamp.of(2018, 7, 26, 17, 18, 19, 123456000))),
"{14:15:16=1985-04-11 14:15:16.123456, 17:18:19=2018-07-26 17:18:19.123456}")
testSqlApi(
"MAP[TIME '14:15:16', TIMESTAMP '1985-04-11 14:15:16.123456', " +
"TIME '17:18:19', TIMESTAMP '2018-07-26 17:18:19.123456']",
"{14:15:16=1985-04-11 14:15:16.123456, 17:18:19=2018-07-26 17:18:19.123456}")
testAllApis(
map(BigDecimal(2.0002), BigDecimal(2.0003)),
"map(2.0002p, 2.0003p)",
"MAP[CAST(2.0002 AS DECIMAL(5, 4)), CAST(2.0003 AS DECIMAL(5, 4))]",
"{2.0002=2.0003}")
// implicit type cast only works on SQL API
testSqlApi("MAP['k1', CAST(1 AS DOUBLE), 'k2', CAST(2 AS FLOAT)]", "{k1=1.0, k2=2.0}")
}
@Test
def testMapField(): Unit = {
testAllApis(
map('f4, 'f5),
"map(f4, f5)",
"MAP[f4, f5]",
"{foo=12}")
testAllApis(
map('f4, 'f1),
"map(f4, f1)",
"MAP[f4, f1]",
"{foo={}}")
testAllApis(
map('f2, 'f3),
"map(f2, f3)",
"MAP[f2, f3]",
"{{a=12, b=13}={12=a, 13=b}}")
testAllApis(
map('f1.at("a"), 'f5),
"map(f1.at('a'), f5)",
"MAP[f1['a'], f5]",
"{null=12}")
testAllApis(
'f1,
"f1",
"f1",
"{}")
testAllApis(
'f2,
"f2",
"f2",
"{a=12, b=13}")
testAllApis(
'f2.at("a"),
"f2.at('a')",
"f2['a']",
"12")
testAllApis(
'f3.at(12),
"f3.at(12)",
"f3[12]",
"a")
testAllApis(
map('f4, 'f3).at("foo").at(13),
"map(f4, f3).at('foo').at(13)",
"MAP[f4, f3]['foo'][13]",
"b")
}
@Test
def testMapOperations(): Unit = {
// comparison
testAllApis(
'f1 === 'f2,
"f1 === f2",
"f1 = f2",
"false")
testAllApis(
'f3 === 'f7,
"f3 === f7",
"f3 = f7",
"true")
testAllApis(
'f5 === 'f2.at("a"),
"f5 === f2.at('a')",
"f5 = f2['a']",
"true")
testAllApis(
'f8 === 'f9,
"f8 === f9",
"f8 = f9",
"true")
testAllApis(
'f10 === 'f11,
"f10 === f11",
"f10 = f11",
"true")
testAllApis(
'f8 !== 'f9,
"f8 !== f9",
"f8 <> f9",
"false")
testAllApis(
'f10 !== 'f11,
"f10 !== f11",
"f10 <> f11",
"false")
testAllApis(
'f0.at("map is null"),
"f0.at('map is null')",
"f0['map is null']",
"null")
testAllApis(
'f1.at("map is empty"),
"f1.at('map is empty')",
"f1['map is empty']",
"null")
testAllApis(
'f2.at("b"),
"f2.at('b')",
"f2['b']",
"13")
testAllApis(
'f3.at(1),
"f3.at(1)",
"f3[1]",
"null")
testAllApis(
'f3.at(12),
"f3.at(12)",
"f3[12]",
"a")
testAllApis(
'f3.cardinality(),
"f3.cardinality()",
"CARDINALITY(f3)",
"2")
testAllApis(
'f2.at("a").isNotNull,
"f2.at('a').isNotNull",
"f2['a'] IS NOT NULL",
"true")
testAllApis(
'f2.at("a").isNull,
"f2.at('a').isNull",
"f2['a'] IS NULL",
"false")
testAllApis(
'f2.at("c").isNotNull,
"f2.at('c').isNotNull",
"f2['c'] IS NOT NULL",
"false")
testAllApis(
'f2.at("c").isNull,
"f2.at('c').isNull",
"f2['c'] IS NULL",
"true")
}
@Test
def testMapTypeCasting(): Unit = {
testTableApi(
'f2.cast(DataTypes.MAP(DataTypes.STRING, DataTypes.INT)),
"f2.cast(MAP(STRING, INT))",
"{a=12, b=13}"
)
}
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/expressions/MapTypeTest.scala
|
Scala
|
apache-2.0
| 8,226 |
package org.template.sr
/*
* Copyright KOLIBERO under one or more contributor license agreements.
* KOLIBERO licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.predictionio.controller.{Engine,EngineFactory}
import org.joda.time.DateTime
case class Query(
val features: Array[Double]
) extends Serializable
case class PredictedResult(
coefficients: Array[Double],
intercept: Double,
scale: Double,
prediction: Double,
quantiles: Array[Double]
) extends Serializable
object SREngine extends EngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
classOf[Preparator],
Map("sr" -> classOf[SRAlgorithm]),
classOf[Serving])
}
}
|
goliasz/pio-template-sr
|
src/main/scala/Engine.scala
|
Scala
|
apache-2.0
| 1,227 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.routing.sird
import java.net.URL
import java.net.URI
import java.util.regex.Pattern
import play.api.mvc.RequestHeader
import play.utils.UriEncoding
import scala.collection.concurrent.TrieMap
import scala.util.matching.Regex
/**
* The path extractor.
*
* Supported data types that can be extracted from:
* - play.api.mvc.RequestHeader
* - String
* - java.net.URI
* - java.net.URL
*
* @param regex The regex that is used to extract the raw parts.
* @param partDescriptors Descriptors saying whether each part should be decoded or not.
*/
class PathExtractor(regex: Regex, partDescriptors: Seq[PathPart.Value]) {
def unapplySeq(path: String): Option[List[String]] = extract(path)
def unapplySeq(request: RequestHeader): Option[List[String]] = extract(request.path)
def unapplySeq(url: URL): Option[List[String]] = Option(url.getPath).flatMap(extract)
def unapplySeq(uri: URI): Option[List[String]] = Option(uri.getRawPath).flatMap(extract)
private def extract(path: String): Option[List[String]] = {
regex.unapplySeq(path).map { parts =>
parts.zip(partDescriptors).map {
case (part, PathPart.Decoded) => UriEncoding.decodePathSegment(part, "utf-8")
case (part, PathPart.Raw) => part
case (part, pathPart) => throw new MatchError(s"unexpected ($path, $pathPart)")
}
}
}
}
object PathExtractor {
// Memoizes all the routes, so that the route doesn't have to be parsed, and the resulting regex compiled,
// on each invocation.
// There is a possible memory leak here, especially if RouteContext is instantiated dynamically. But,
// under normal usage, there will only be as many entries in this cache as there are usages of this
// string interpolator in code - even in a very dynamic classloading environment with many different
// strings being interpolated, the chances of this cache ever causing an out of memory error are very
// low.
private val cache = TrieMap.empty[Seq[String], PathExtractor]
/**
* Lookup the PathExtractor from the cache, or create and store a new one if not found.
*/
def cached(parts: Seq[String]): PathExtractor = {
cache.getOrElseUpdate(
parts, {
// "parse" the path
val (regexParts, descs) = parts.tail.map {
part =>
if (part.startsWith("*")) {
// It's a .* matcher
"(.*)" + Pattern.quote(part.drop(1)) -> PathPart.Raw
} else if (part.startsWith("<") && part.contains(">")) {
// It's a regex matcher
val splitted = part.split(">", 2)
val regex = splitted(0).drop(1)
"(" + regex + ")" + Pattern.quote(splitted(1)) -> PathPart.Raw
} else {
// It's an ordinary path part matcher
"([^/]*)" + Pattern.quote(part) -> PathPart.Decoded
}
}.unzip
new PathExtractor(regexParts.mkString(Pattern.quote(parts.head), "", "/?").r, descs)
}
)
}
}
/**
* A path part descriptor. Describes whether the path part should be decoded, or left as is.
*/
private object PathPart extends Enumeration {
val Decoded, Raw = Value
}
|
marcospereira/playframework
|
core/play/src/main/scala/play/api/routing/sird/PathExtractor.scala
|
Scala
|
apache-2.0
| 3,305 |
package org.krazykat.ontapi.models.naElement.mode7
import org.krazykat.ontapi.models.naElement.BaseVersion
case class Version(version: String, isClustered: Boolean) extends BaseVersion(version, isClustered)
|
ehudkaldor/scala-ontapi
|
src/main/scala/org/krazykat/ontapi/models/naElement/mode7/Version.scala
|
Scala
|
gpl-2.0
| 209 |
package akka.persistence.kafka
import akka.actor.ActorRef
import akka.persistence.kafka.BrokerWatcher.BrokersUpdated
import akka.persistence.kafka.MetadataConsumer.Broker
import kafka.utils.{ZkUtils, ZKStringSerializer, ZKConfig}
import org.I0Itec.zkclient.ZkClient
object BrokerWatcher {
case class BrokersUpdated(brokers: List[Broker])
}
class BrokerWatcher(zkConfig: ZKConfig, listener: ActorRef) {
lazy val zkClient = new ZkClient(
zkConfig.zkConnect,
zkConfig.zkSessionTimeoutMs,
zkConfig.zkConnectionTimeoutMs,
ZKStringSerializer)
lazy val childWatcher = new ChildDataWatcher[String](zkClient, ZkUtils.BrokerIdsPath, { d =>
listener ! BrokersUpdated(buildBrokers(d))
})
def start(): List[Broker] = {
buildBrokers(childWatcher.start())
}
def stop(): Unit = {
childWatcher.stop()
zkClient.close()
}
private def buildBrokers(d: Map[String, String]): List[Broker] = {
d.values.map(Broker.fromString).flatMap(x => x).toList
}
}
|
crispywalrus/akka-persistence-kafka
|
src/main/scala/akka/persistence/kafka/BrokerWatcher.scala
|
Scala
|
apache-2.0
| 995 |
/**
* The MIT License (MIT)
* <p/>
* Copyright (c) 2016 ScalateKids
* <p/>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p/>
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* <p/>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
* <p/>
* @author Scalatekids
* @version 1.0
* @since 1.0
*/
package com.actorbase.actorsystem.actors.storekeeper
import akka.actor.{ Actor, ActorLogging, ActorRef, OneForOneStrategy, Props }
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import akka.cluster.pubsub.DistributedPubSub
import akka.actor.SupervisorStrategy._
import com.actorbase.actorsystem.messages.StorekeeperMessages._
import com.actorbase.actorsystem.messages.StorefinderMessages.{PartialMapTransaction, UpdateCollectionSize}
import com.actorbase.actorsystem.messages.WarehousemanMessages.{ Init, Save }
import com.actorbase.actorsystem.messages.ClientActorMessages.Response
import com.actorbase.actorsystem.messages.ManagerMessages.OneMore
import com.actorbase.actorsystem.actors.warehouseman.Warehouseman
import com.actorbase.actorsystem.utils.CryptoUtils
import scala.concurrent.duration._
import scala.language.postfixOps
object Storekeeper {
def props(n: String, o: String, s: Int): Props = Props(classOf[Storekeeper], n, o, s)
}
/**
*
* @param data
* @param manager
* @param range
* @param maxSize
*/
class Storekeeper(private val collectionName: String, private val collectionOwner: String, indicativeSize: Int) extends Actor with ActorLogging {
val mediator = DistributedPubSub(context.system).mediator
// subscribe to the topic named "persist-data"
mediator ! Subscribe("persist-data", self)
private val warehouseman = context.actorOf(Warehouseman.props( collectionOwner + collectionName ))
private var manager: Option[ActorRef] = None
private var checked = false
warehouseman ! Init( collectionName, collectionOwner)
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case _: Exception => Resume
}
/**
* Receive method of the Storekeper actor, it does different things based on the message it receives:<br>
* _InitMn: when the actor receives this message it initialize its corresponding manager actor.<br>
* _GetItem: when the actor receives this message it returns a value associated with the input key<br>
* _GetAll: when the actor receives this message it returns all its contents<br>
* _RemoveItem: when the actor receives this message it remove a value associated with the input key</br>
* _ins: when the actor receives this message it insert an item to the collection; it can or not allow the update of the item depending on what is specified by the boolean param update</br>
* _Persist: when the actor receives this message it persists its data to disk.<br>
*
*/
def receive = running(Map[String, Array[Byte]]().empty)
/**
* Represents the state of the actor during process of the messages inside
* his mailbox
*/
def running(data: Map[String, Array[Byte]]): Receive = {
case message: StorekeeperMessage => message match {
/**
* InitMn message, this actor will initialize its corresponding manager actor
*
* @param mn a manager actor rapresenting the corresponding manager
*/
case InitMn(mn) => manager = Some(mn)
/**
* GetItem message, this actor will send back a value associated with the input key
*
* @param key a String representing the key of the item to be returned (sta roba sarà da cambiare)
*
*/
case GetItem(key) =>
// println(data.contains(key))
data get key map (v => sender ! Right(Response(CryptoUtils.bytesToAny(v)))) getOrElse sender ! Left("UndefinedKey")
/**
* GetAllItem message, this actor will send back the collection name and all the collection.
*/
case GetAll(parent, requester) =>
if (data.nonEmpty)
parent ! PartialMapTransaction(requester, sender, data)
/**
* RemoveItem message, when the actor receive this message it will erase the item associated with the
* key in input. This method doesn't throw an exception if the item is not present.
*/
case RemoveItem(parent, key) =>
if (data contains(key)) {
parent ! UpdateCollectionSize(0, false)
sender ! "OK"
context become running(data - key)
} else sender ! "UndefinedKey"
/**
* Insert message, insert a key/value into a designed collection
*
* @param key a String representing the new key to be inserted
* @param value a Any object type representing the value to be inserted
* with associated key, default to Array[Byte] type
* @param update a Boolean flag, define the insert behavior (with or without
* updating the value)
*
*/
case ins: InsertItem =>
/**
* private method that insert an item to the collection, can allow the update of the item or not
* changing the param update
*
* @param update boolean. 1 if the insert allow an update, 0 otherwise
* @param key String representing the key of the item
*/
def insertOrUpdate(update: Boolean, key: String): Boolean = {
var done = true
if (!update && !data.contains(key)) {
insertWithoutUpdate
}
else if (!update && data.contains(key)) {
done = false
}
else if (update && !data.contains(key)){
insertWithoutUpdate
}
done
}
/**
* Private method used to insert an item without overwriting. This method update the size
* of the collection and proceed to ask the manager to create another Storekeeper if
* this is full
*/
def insertWithoutUpdate: Unit = {
// log.info("SK: Got work!")
val w = ins.value.length.toLong + ins.key.getBytes("UTF-8").length.toLong
ins.parentRef ! UpdateCollectionSize(w, true)
// if (data.size > indicativeSize && !checked) {
// checked = true
// manager map (_ ! OneMore) getOrElse (checked = false)
// }
}
if (insertOrUpdate(ins.update, ins.key) == true) {
sender ! "OK"
context become running(data + (ins.key -> ins.value))
} else sender ! "DuplicatedKey"
/**
* Persist data to disk
*/
case Persist => if (data.size > 0 && collectionOwner != "anonymous") warehouseman ! Save( data )
}
}
}
|
ScalateKids/Actorbase-Server
|
src/main/scala/com/actorbase/actorsystem/actors/storekeeper/Storekeeper.scala
|
Scala
|
mit
| 7,654 |
package com.politach
import akka.actor._
import akka.stream.FlowMaterializer
import akka.http.Http
import akka.http.server._
import akka.http.server.Directives._
import akka.http.model._
import com.typesafe.config.Config
class HttpApi(config: Config)(implicit system: ActorSystem, materializer: FlowMaterializer) {
import system.dispatcher
val interface = config.getString("rest-api.interface")
val port = config.getInt("rest-api.port")
val routes: Route =
pathPrefix("api"/ "v1") {
path("hello") {
complete("test")
}
}
def bind() = Http(system).bind(interface = interface, port = port).startHandlingWith(routes)
}
object HttpApi {
def start(config: Config)
(implicit system: ActorSystem, materializer: FlowMaterializer): Unit =
new HttpApi(config).bind()
}
|
Politach/web-app
|
src/main/scala/com/politach/HttpApi.scala
|
Scala
|
mit
| 819 |
package dbpedia.dataparsers.util.wikiparser
/**
* Represents a page.
*
* @param title The title of this page
* @param id The page ID
* @param revision The revision of this page
* @param timestamp The timestamp of the revision, in milliseconds since 1970-01-01 00:00:00 UTC
* @param contributorID The ID of the latest contributor
* @param contributorName The name of the latest contributor
* @param isRedirect True, if this is a Redirect page
* @param isDisambiguation True, if this is a Disambiguation page
* @param children The contents of this page
*/
class PageNode (
val title: WikiTitle,
val id: Long,
val revision: Long,
val timestamp: Long,
val contributorID: Long,
val contributorName: String,
val isRedirect: Boolean,
val isDisambiguation: Boolean,
children: List[Node] = List.empty
)
extends Node(children, 0)
{
def toWikiText = children.map(_.toWikiText).mkString
def toPlainText = children.map(_.toPlainText).mkString
}
|
FnOio/dbpedia-parsing-functions-scala
|
src/main/scala/dbpedia/dataparsers/util/wikiparser/PageNode.scala
|
Scala
|
gpl-2.0
| 977 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import scala.collection.GenSeq
import scala.collection.GenMap
import scala.collection.GenSet
import scala.collection.GenIterable
import scala.collection.GenTraversable
import scala.collection.GenTraversableOnce
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import org.scalactic.Equality
import org.scalactic.TripleEquals
import Matchers._
class ShouldEqualEqualitySpec extends Spec {
object `the should equal syntax should use the appropriate Equality type class` {
def `for Any` {
() should equal (())
() shouldEqual ()
() should not equal (7)
implicit val e = new Equality[Unit] {
def areEqual(a: Unit, b: Any): Boolean = a != b
}
() should not equal (())
() should equal (7)
() shouldEqual 7
}
def `for String` {
"hi" should equal ("hi")
"hi" shouldEqual "hi"
"hi" should not equal ("ho")
implicit val e = new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
"hi" should not equal ("hi")
"hi" should equal ("ho")
"hi" shouldEqual "ho"
}
def `for Numeric` {
3 should equal (3)
3 shouldEqual 3
3 should not equal (4)
implicit val e = new Equality[Int] {
def areEqual(a: Int, b: Any): Boolean = a != b
}
3 should not equal (3)
3 should equal (4)
3 shouldEqual 4
}
object `for Map` {
def `with default equality` {
Map("I" -> 1, "II" -> 2) should equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("I" -> 1, "II" -> 2)
Map("I" -> 1, "II" -> 2) should not equal (Map("one" -> 1, "two" -> 2))
implicit val e = new Equality[GenMap[String, Int]] {
def areEqual(a: GenMap[String, Int], b: Any): Boolean = a != b
}
Map("I" -> 1, "II" -> 2) should equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("I" -> 1, "II" -> 2)
Map("I" -> 1, "II" -> 2) should not equal (Map("one" -> 1, "two" -> 2))
}
def `with inferred GenMap equality` {
implicit def travEq[T <: GenMap[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
Map("I" -> 1, "II" -> 2) should not equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) should equal (Map("one" -> 1, "two" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("one" -> 1, "two" -> 2)
}
def `with specific Map equality` {
implicit val e = new Equality[Map[String, Int]] {
def areEqual(a: Map[String, Int], b: Any): Boolean = a != b
}
Map("I" -> 1, "II" -> 2) should not equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) should equal (Map("one" -> 1, "two" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("one" -> 1, "two" -> 2)
}
def `with both GenMap and specific Map equality, though I don't know why this compiles` {
implicit val e = new Equality[GenMap[String, Int]] {
def areEqual(a: GenMap[String, Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[Map[String, Int]] { // Should pick the most specific one
def areEqual(a: Map[String, Int], b: Any): Boolean = a != b
}
Map("I" -> 1, "II" -> 2) should not equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) should equal (Map("one" -> 1, "two" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("one" -> 1, "two" -> 2)
}
def `with both inferred GenMap and specific Map equality` {
implicit def travEq[T <: GenMap[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[Map[String, Int]] { // Should pick the most specific one
def areEqual(a: Map[String, Int], b: Any): Boolean = a != b
}
Map("I" -> 1, "II" -> 2) should not equal (Map("I" -> 1, "II" -> 2))
Map("I" -> 1, "II" -> 2) should equal (Map("one" -> 1, "two" -> 2))
Map("I" -> 1, "II" -> 2) shouldEqual Map("one" -> 1, "two" -> 2)
}
}
object `for mutable.Map` {
def `with default equality` {
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("I" -> 1, "II" -> 2)
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("one" -> 1, "two" -> 2))
implicit val e = new Equality[GenMap[String, Int]] {
def areEqual(a: GenMap[String, Int], b: Any): Boolean = a != b
}
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("I" -> 1, "II" -> 2)
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("one" -> 1, "two" -> 2))
}
def `with inferred GenMap equality` {
implicit def travEq[T <: GenMap[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("one" -> 1, "two" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("one" -> 1, "two" -> 2)
}
def `with specific mutable.Map equality` {
implicit val e = new Equality[mutable.Map[String, Int]] {
def areEqual(a: mutable.Map[String, Int], b: Any): Boolean = a != b
}
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("one" -> 1, "two" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("one" -> 1, "two" -> 2)
}
def `with both GenMap and specific mutable.Map equality, though I don't know why this compiles` {
implicit val e = new Equality[GenMap[String, Int]] {
def areEqual(a: GenMap[String, Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[mutable.Map[String, Int]] { // Should pick the most specific one
def areEqual(a: mutable.Map[String, Int], b: Any): Boolean = a != b
}
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("one" -> 1, "two" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("one" -> 1, "two" -> 2)
}
def `with both inferred GenMap and specific mutable.Map equality` {
implicit def travEq[T <: GenMap[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[mutable.Map[String, Int]] { // Should pick the most specific one
def areEqual(a: mutable.Map[String, Int], b: Any): Boolean = a != b
}
mutable.Map("I" -> 1, "II" -> 2) should not equal (mutable.Map("I" -> 1, "II" -> 2))
mutable.Map("I" -> 1, "II" -> 2) should equal (mutable.Map("one" -> 1, "two" -> 2))
mutable.Map("I" -> 1, "II" -> 2) shouldEqual mutable.Map("one" -> 1, "two" -> 2)
}
}
def `for AnyRef` {
case class Person(name: String)
Person("Joe") should equal (Person("Joe"))
Person("Joe") shouldEqual Person("Joe")
Person("Joe") should not equal (Person("Sally"))
implicit val e = new Equality[Person] {
def areEqual(a: Person, b: Any): Boolean = a != b
}
Person("Joe") should not equal (Person("Joe"))
Person("Joe") should equal (Person("Sally"))
Person("Joe") shouldEqual Person("Sally")
}
object `for Traversable` {
def `with default equality` {
Set(1, 2, 3) should equal (Set(1, 2, 3))
Set(1, 2, 3) shouldEqual Set(1, 2, 3)
Set(1, 2, 3) should not equal (Set(1, 2, 4))
implicit val e = new Equality[GenTraversable[Int]] {
def areEqual(a: GenTraversable[Int], b: Any): Boolean = a != b
}
Set(1, 2, 3) should equal (Set(1, 2, 3))
Set(1, 2, 3) shouldEqual Set(1, 2, 3)
Set(1, 2, 3) should not equal (Set(1, 2, 4))
}
def `with inferred GenTraversable equality` {
implicit def travEq[T <: GenTraversable[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
Set(1, 2, 3) should not equal (Set(1, 2, 3))
Set(1, 2, 3) should equal (Set(1, 2, 4))
Set(1, 2, 3) shouldEqual Set(1, 2, 4)
}
def `with specific Traversable equality` {
implicit val e = new Equality[Set[Int]] {
def areEqual(a: Set[Int], b: Any): Boolean = a != b
}
Set(1, 2, 3) should not equal (Set(1, 2, 3))
Set(1, 2, 3) should equal (Set(1, 2, 4))
Set(1, 2, 3) shouldEqual Set(1, 2, 4)
}
def `with both GenTraversable and specific Traversable equality` {
implicit val e = new Equality[GenTraversable[Int]] {
def areEqual(a: GenTraversable[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[Set[Int]] { // Should pick the most specific one
def areEqual(a: Set[Int], b: Any): Boolean = a != b
}
Set(1, 2, 3) should not equal (Set(1, 2, 3))
Set(1, 2, 3) should equal (Set(1, 2, 4))
Set(1, 2, 3) shouldEqual Set(1, 2, 4)
}
def `with both inferred GenTraversable and specific Traversable equality` {
implicit def travEq[T <: GenTraversable[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[Set[Int]] { // Should pick the most specific one
def areEqual(a: Set[Int], b: Any): Boolean = a != b
}
Set(1, 2, 3) should not equal (Set(1, 2, 3))
Set(1, 2, 3) shouldEqual Set(1, 2, 4)
}
}
object `for mutable.Traversable` {
def `with default equality` {
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 3)
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 4))
implicit val e = new Equality[GenTraversable[Int]] {
def areEqual(a: GenTraversable[Int], b: Any): Boolean = a != b
}
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 3)
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 4))
}
def `with inferred GenTraversable equality` {
implicit def travEq[T <: GenTraversable[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 4))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 4)
}
def `with specific mutable.Traversable equality` {
implicit val e = new Equality[mutable.Set[Int]] {
def areEqual(a: mutable.Set[Int], b: Any): Boolean = a != b
}
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 4))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 4)
}
def `with both GenTraversable and specific Traversable equality` {
implicit val e = new Equality[GenTraversable[Int]] {
def areEqual(a: GenTraversable[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[mutable.Set[Int]] { // Should pick the most specific one
def areEqual(a: mutable.Set[Int], b: Any): Boolean = a != b
}
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 4))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 4)
}
def `with both inferred GenTraversable and specific Traversable equality` {
implicit def travEq[T <: GenTraversable[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[mutable.Set[Int]] { // Should pick the most specific one
def areEqual(a: mutable.Set[Int], b: Any): Boolean = a != b
}
mutable.Set(1, 2, 3) should not equal (mutable.Set(1, 2, 3))
mutable.Set(1, 2, 3) should equal (mutable.Set(1, 2, 4))
mutable.Set(1, 2, 3) shouldEqual mutable.Set(1, 2, 4)
}
}
object `for Java Collection` {
val javaSet123: java.util.Set[Int] = new java.util.HashSet
javaSet123.add(1)
javaSet123.add(2)
javaSet123.add(3)
val javaSet124: java.util.Set[Int] = new java.util.HashSet
javaSet124.add(1)
javaSet124.add(2)
javaSet124.add(4)
def `with default equality` {
javaSet123 should equal (javaSet123)
javaSet123 shouldEqual javaSet123
javaSet123 should not equal (javaSet124)
implicit val e = new Equality[java.util.Collection[Int]] {
def areEqual(a: java.util.Collection[Int], b: Any): Boolean = a != b
}
javaSet123 should equal (javaSet123)
javaSet123 shouldEqual javaSet123
javaSet123 should not equal (javaSet124)
}
def `with inferred Collection equality` {
implicit def travEq[T <: java.util.Collection[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
javaSet123 should not equal (javaSet123)
javaSet123 should equal (javaSet124)
javaSet123 shouldEqual javaSet124
}
def `with specific Collection equality` {
implicit val e = new Equality[java.util.Set[Int]] {
def areEqual(a: java.util.Set[Int], b: Any): Boolean = a != b
}
javaSet123 should not equal (javaSet123)
javaSet123 should equal (javaSet124)
javaSet123 shouldEqual javaSet124
}
def `with both Collection and specific Collection equality` {
implicit val e = new Equality[java.util.Collection[Int]] {
def areEqual(a: java.util.Collection[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[java.util.Set[Int]] { // Should pick the most specific one
def areEqual(a: java.util.Set[Int], b: Any): Boolean = a != b
}
javaSet123 should not equal (javaSet123)
javaSet123 should equal (javaSet124)
javaSet123 shouldEqual javaSet124
}
def `with both inferred Collection and specific Collection equality` {
implicit def travEq[T <: java.util.Collection[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[java.util.Set[Int]] { // Should pick the most specific one
def areEqual(a: java.util.Set[Int], b: Any): Boolean = a != b
}
javaSet123 should not equal (javaSet123)
javaSet123 should equal (javaSet124)
javaSet123 shouldEqual javaSet124
}
}
object `for Java Map` {
val javaMap123: java.util.HashMap[String, Int] = new java.util.HashMap
javaMap123.put("one",1)
javaMap123.put("two", 2)
javaMap123.put("three", 3)
val javaMap124: java.util.HashMap[String, Int] = new java.util.HashMap
javaMap124.put("one",1)
javaMap124.put("two", 2)
javaMap124.put("four", 4)
def `with default equality` {
javaMap123 should equal (javaMap123)
javaMap123 shouldEqual javaMap123
javaMap123 should not equal (javaMap124)
implicit val e = new Equality[java.util.Map[String, Int]] {
def areEqual(a: java.util.Map[String, Int], b: Any): Boolean = a != b
}
javaMap123 should equal (javaMap123)
javaMap123 shouldEqual javaMap123
javaMap123 should not equal (javaMap124)
}
def `with inferred Map equality` {
implicit def travEq[T <: java.util.Map[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
javaMap123 should not equal (javaMap123)
javaMap123 should equal (javaMap124)
javaMap123 shouldEqual javaMap124
}
def `with specific HashMap equality` {
implicit val e = new Equality[java.util.HashMap[String, Int]] {
def areEqual(a: java.util.HashMap[String, Int], b: Any): Boolean = a != b
}
javaMap123 should not equal (javaMap123)
javaMap123 should equal (javaMap124)
javaMap123 shouldEqual javaMap124
}
def `with both Map and specific HashMap equality` {
implicit val e = new Equality[java.util.Map[String, Int]] {
def areEqual(a: java.util.Map[String, Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[java.util.HashMap[String, Int]] { // Should pick this because it is an exact match
def areEqual(a: java.util.HashMap[String, Int], b: Any): Boolean = a != b
}
javaMap123 should not equal (javaMap123)
javaMap123 should equal (javaMap124)
javaMap123 shouldEqual javaMap124
}
def `with both inferred Map and specific HashMap equality` {
implicit def travEq[T <: java.util.Map[String, Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
implicit val e2 = new Equality[java.util.HashMap[String, Int]] { // Should pick the most specific one
def areEqual(a: java.util.HashMap[String, Int], b: Any): Boolean = a != b
}
javaMap123 should not equal (javaMap123)
javaMap123 should equal (javaMap124)
javaMap123 shouldEqual javaMap124
}
}
object `for Seq` {
def `with default equality` {
Vector(1, 2, 3) should equal (Vector(1, 2, 3))
Vector(1, 2, 3) shouldEqual Vector(1, 2, 3)
Vector(1, 2, 3) should not equal (Vector(1, 2, 4))
}
def `with inferred GenSeq equality` {
implicit def travEq[T <: GenSeq[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
Vector(1, 2, 3) should not equal (Vector(1, 2, 3))
Vector(1, 2, 3) should equal (Vector(1, 2, 4))
Vector(1, 2, 3) shouldEqual Vector(1, 2, 4)
}
def `with specific Seq equality` {
implicit val e = new Equality[Vector[Int]] {
def areEqual(a: Vector[Int], b: Any): Boolean = a != b
}
Vector(1, 2, 3) should not equal (Vector(1, 2, 3))
Vector(1, 2, 3) should equal (Vector(1, 2, 4))
Vector(1, 2, 3) shouldEqual Vector(1, 2, 4)
}
def `with both GenSeq and specific Seq equality` {
implicit val e = new Equality[GenSeq[Int]] {
def areEqual(a: GenSeq[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[Vector[Int]] { // Should pick the exact one
def areEqual(a: Vector[Int], b: Any): Boolean = a != b
}
Vector(1, 2, 3) should not equal (Vector(1, 2, 3))
Vector(1, 2, 3) should equal (Vector(1, 2, 4))
Vector(1, 2, 3) shouldEqual Vector(1, 2, 4)
}
def `with both inferred GenSeq and specific Seq equality` {
implicit def travEq[T <: GenSeq[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a == b
}
implicit val e2 = new Equality[Vector[Int]] { // Should pick the exact one
def areEqual(a: Vector[Int], b: Any): Boolean = a != b
}
Vector(1, 2, 3) should not equal (Vector(1, 2, 3))
Vector(1, 2, 3) should equal (Vector(1, 2, 4))
Vector(1, 2, 3) shouldEqual Vector(1, 2, 4)
}
}
object `for mutable.Seq` {
def `with default equality` {
ListBuffer(1, 2, 3) should equal (ListBuffer(1, 2, 3))
ListBuffer(1, 2, 3) shouldEqual ListBuffer(1, 2, 3)
ListBuffer(1, 2, 3) should not equal (ListBuffer(1, 2, 4))
}
def `with inferred GenSeq equality` {
implicit def travEq[T <: GenSeq[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
ListBuffer(1, 2, 3) should not equal (ListBuffer(1, 2, 3))
ListBuffer(1, 2, 3) should equal (ListBuffer(1, 2, 4))
ListBuffer(1, 2, 3) shouldEqual ListBuffer(1, 2, 4)
}
def `with specific Seq equality` {
implicit val e = new Equality[ListBuffer[Int]] {
def areEqual(a: ListBuffer[Int], b: Any): Boolean = a != b
}
ListBuffer(1, 2, 3) should not equal (ListBuffer(1, 2, 3))
ListBuffer(1, 2, 3) should equal (ListBuffer(1, 2, 4))
ListBuffer(1, 2, 3) shouldEqual ListBuffer(1, 2, 4)
}
def `with both GenSeq and specific Seq equality` {
implicit val e = new Equality[GenSeq[Int]] {
def areEqual(a: GenSeq[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[ListBuffer[Int]] { // Should pick the exact one
def areEqual(a: ListBuffer[Int], b: Any): Boolean = a != b
}
ListBuffer(1, 2, 3) should not equal (ListBuffer(1, 2, 3))
ListBuffer(1, 2, 3) should equal (ListBuffer(1, 2, 4))
ListBuffer(1, 2, 3) shouldEqual ListBuffer(1, 2, 4)
}
def `with both inferred GenSeq and specific Seq equality` {
implicit def travEq[T <: GenSeq[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a == b
}
implicit val e2 = new Equality[ListBuffer[Int]] { // Should pick the exact one
def areEqual(a: ListBuffer[Int], b: Any): Boolean = a != b
}
ListBuffer(1, 2, 3) should not equal (ListBuffer(1, 2, 3))
ListBuffer(1, 2, 3) shouldEqual ListBuffer(1, 2, 4)
}
}
def `for Array` {
Array(1, 2, 3) should equal (Array(1, 2, 3))
Array(1, 2, 3) shouldEqual Array(1, 2, 3)
Array(1, 2, 3) should not equal (Array(1, 2, 4))
implicit val e = new Equality[Array[Int]] {
def areEqual(a: Array[Int], b: Any): Boolean = a.deep != b.asInstanceOf[Array[Int]].deep
}
Array(1, 2, 3) should not equal (Array(1, 2, 3))
Array(1, 2, 3) should equal (Array(1, 2, 4))
Array(1, 2, 3) shouldEqual Array(1, 2, 4)
}
object `for Java List` {
val javaList123: java.util.List[Int] = new java.util.ArrayList
javaList123.add(1)
javaList123.add(2)
javaList123.add(3)
val javaList124: java.util.List[Int] = new java.util.ArrayList
javaList124.add(1)
javaList124.add(2)
javaList124.add(4)
def `with default equality` {
javaList123 should equal (javaList123)
javaList123 shouldEqual javaList123
javaList123 should not equal (javaList124)
implicit val e = new Equality[java.util.Collection[Int]] {
def areEqual(a: java.util.Collection[Int], b: Any): Boolean = a != b
}
javaList123 should equal (javaList123)
javaList123 shouldEqual javaList123
javaList123 should not equal (javaList124)
}
def `with inferred java.util.Collection equality` {
implicit def travEq[T <: java.util.Collection[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a != b
}
javaList123 should not equal (javaList123)
javaList123 should equal (javaList124)
javaList123 shouldEqual javaList124
}
def `with specific java.util.List equality` {
implicit val e = new Equality[java.util.List[Int]] {
def areEqual(a: java.util.List[Int], b: Any): Boolean = a != b
}
javaList123 should not equal (javaList123)
javaList123 should equal (javaList124)
javaList123 shouldEqual javaList124
}
def `with both java.util.Collection and java.util.List equality` {
implicit val e = new Equality[java.util.Collection[Int]] {
def areEqual(a: java.util.Collection[Int], b: Any): Boolean = a == b
}
implicit val e2 = new Equality[java.util.List[Int]] { // Should pick the exact one
def areEqual(a: java.util.List[Int], b: Any): Boolean = a != b
}
javaList123 should not equal (javaList123)
javaList123 should equal (javaList124)
javaList123 shouldEqual javaList124
}
def `with both inferred java.util.List and specific java.util.List equality` {
implicit def travEq[T <: java.util.List[Int]] = new Equality[T] {
def areEqual(a: T, b: Any): Boolean = a == b
}
implicit val e2 = new Equality[java.util.List[Int]] { // Should pick the exact one
def areEqual(a: java.util.List[Int], b: Any): Boolean = a != b
}
javaList123 should not equal (javaList123)
javaList123 should equal (javaList124)
javaList123 shouldEqual javaList124
}
}
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalatest/ShouldEqualEqualitySpec.scala
|
Scala
|
apache-2.0
| 25,632 |
// Copyright 2016 Jim Pivarski
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test.scala.scaroot.reader
import scala.collection.mutable
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import org.scalatest.Matchers
import org.dianahep.scaroot.reader._
import org.dianahep.scaroot.reader.schema._
import org.dianahep.scaroot.reader.factory._
///////////////////////////////////////////////////////////////// Event.root classes
case class Tree(event: Event)
case class Event(
fType: String,
fEventName: Option[String],
fNtrack: Int,
fNseg: Int,
fNvertex: Int,
fFlag: Long,
fTemperature: Double,
fMeasures: Seq[Int],
fMatrix: Seq[Seq[Double]],
fClosestDistance: Option[Double],
fEvtHdr: EventHeader,
fTracks: Option[Seq[Track]],
fTriggerBits: TBits,
fIsValid: Boolean)
case class EventHeader(fEvtNum: Int, fRun: Int, fDate: Int)
case class Track(
fPx: Float,
fPy: Float,
fPz: Float,
fRandom: Float,
fMass2: Float,
fBx: Float,
fBy: Float,
fMeanCharge: Float,
fXfirst: Float,
fXlast: Float,
fYfirst: Float,
fYlast: Float,
fZfirst: Float,
fZlast: Float,
fCharge: Double,
fVertex: Seq[Double],
fNpoint: Int,
fValid: Short,
fNsp: Int,
fPointValue: Option[Double],
fTriggerBits: TBits)
case class TBits(fNbits: Long, fNbytes: Long, fAllBits: Option[Short])
///////////////////////////////////////////////////////////////// Bacon.root classes
case class Tree2(Info: baconhep.TEventInfo,
GenEvtInfo: baconhep.TGenEventInfo,
GenParticle: Seq[baconhep.TGenParticle],
LHEWeight: Seq[baconhep.TLHEWeight],
Electron: Seq[baconhep.TElectron],
Muon: Seq[baconhep.TMuon],
Tau: Seq[baconhep.TTau],
Photon: Seq[baconhep.TPhoton],
PV: Seq[baconhep.TVertex],
AK4CHS: Seq[baconhep.TJet],
AK8CHS: Seq[baconhep.TJet],
AddAK8CHS: Seq[baconhep.TAddJet],
CA15CHS: Seq[baconhep.TJet],
AddCA15CHS: Seq[baconhep.TAddJet],
AK4Puppi: Seq[baconhep.TJet],
CA8Puppi: Seq[baconhep.TJet],
AddCA8Puppi: Seq[baconhep.TAddJet],
CA15Puppi: Seq[baconhep.TJet],
AddCA15Puppi: Seq[baconhep.TAddJet]) extends Serializable
package baconhep {
class TEventInfo(val runNum: Long, // run number, event number, lumi section in data
val evtNum: Long, // run number, event number, lumi section in data
val lumiSec: Long, // run number, event number, lumi section in data
val metFilterFailBits: Long, // MET filter fail bits
val nPU: Long, // number of reconstructed pile-up vertices
val nPUm: Long, // number of reconstructed pile-up vertices
val nPUp: Long, // number of reconstructed pile-up vertices
val nPUmean: Float, // expected number of reconstructed pile-up vertices
val nPUmeanm: Float, // expected number of reconstructed pile-up vertices
val nPUmeanp: Float, // expected number of reconstructed pile-up vertices
val pvx: Float, // best primary vertex
val pvy: Float, // best primary vertex
val pvz: Float, // best primary vertex
val bsx: Float, // beamspot
val bsy: Float, // beamspot
val bsz: Float, // beamspot
val caloMET: Float, // calo MET
val caloMETphi: Float, // calo MET
val caloMETCov00: Float,
val caloMETCov01: Float,
val caloMETCov11: Float,
val pfMET: Float, // particle flow MET
val pfMETphi: Float, // particle flow MET
val pfMETCov00: Float,
val pfMETCov01: Float,
val pfMETCov11: Float,
val pfMETC: Float, // corrected particle flow MET
val pfMETCphi: Float, // corrected particle flow MET
val pfMETCCov00: Float,
val pfMETCCov01: Float,
val pfMETCCov11: Float,
val mvaMET: Float, // MVA MET
val mvaMETphi: Float, // MVA MET
val mvaMETCov00: Float,
val mvaMETCov01: Float,
val mvaMETCov11: Float,
val puppET: Float, // PUPPI MET
val puppETphi: Float, // PUPPI MET
val puppETCov00: Float,
val puppETCov01: Float,
val puppETCov11: Float,
val puppETC: Float, // Type1 PUPPI MET
val puppETCphi: Float, // Type1 PUPPI MET
val puppETCCov00: Float,
val puppETCCov01: Float,
val puppETCCov11: Float,
val pfMET30: Float, // particle flow MET
val pfMET30phi: Float, // particle flow MET
val pfMETC30: Float, // corrected particle flow MET
val pfMETC30phi: Float, // corrected particle flow MET
val mvaMET30: Float, // MVA MET
val mvaMET30phi: Float, // MVA MET
val puppET30: Float, // PUPPI MET
val puppET30phi: Float, // PUPPI MET
val puppETC30: Float, // Type1 PUPPI MET
val puppETC30phi: Float, // Type1 PUPPI MET
val alpacaMET: Float, // Alpaca MET
val alpacaMETphi: Float, // Alpaca MET
val pcpMET: Float, // Alapaca + PUPPI MET
val pcpMETphi: Float, // Alapaca + PUPPI MET
val trkMET: Float, // track MET
val trkMETphi: Float, // track MET
val rhoIso: Float, // event energy density
val rhoJet: Float, // event energy density
val hasGoodPV: Boolean) // event has a good PV?
extends Serializable
}
package baconhep {
case class TGenEventInfo(id_1: Int, // parton flavor PDG ID
id_2: Int, // parton flavor PDG ID
x_1: Float, // parton momentum fraction
x_2: Float, // parton momentum fraction
scalePDF: Float, // Q-scale used for PDF evaluation
xs: Float, // cross section from LHE file
weight: Float) // generator-level event weight
extends Serializable
}
package baconhep {
case class TGenParticle(parent: Int,
pdgId: Int,
status: Int,
pt: Float,
eta: Float,
phi: Float,
mass: Float,
y: Float) extends Serializable
}
package baconhep {
case class TLHEWeight(id: Int, // parton flavor PDG ID
weight: Float) // generator-level event weight
extends Serializable
}
package baconhep {
class TElectron(val pt: Float, // kinematics
val eta: Float, // kinematics
val phi: Float, // kinematics
val scEt: Float, // supercluster kinematics
val scEta: Float, // supercluster kinematics
val scPhi: Float, // supercluster kinematics
val ecalEnergy: Float, // ECAL energy
val pfPt: Float, // matching PF-candidate kinematics
val pfEta: Float, // matching PF-candidate kinematics
val pfPhi: Float, // matching PF-candidate kinematics
val trkIso: Float, // detector isolation
val ecalIso: Float, // detector isolation
val hcalIso: Float, // detector isolation
val hcalDepth1Iso: Float, // detector isolation
val chHadIso: Float, // PF isolation variables
val gammaIso: Float, // PF isolation variables
val neuHadIso: Float, // PF isolation variables
val puIso: Float, // PF isolation variables
val ecalPFClusIso: Float, // PF cluster isolation variables
val hcalPFClusIso: Float, // PF cluster isolation variables
val puppiChHadIso: Float, // Puppi Isolation R=0.4
val puppiGammaIso: Float, // Puppi Isolation R=0.4
val puppiNeuHadIso: Float, // Puppi Isolation R=0.4
val puppiChHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiGammaIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiNeuHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val d0: Float, // impact parameter
val dz: Float, // impact parameter
val sip3d: Float, // impact parameter
val sieie: Float, // shower shape
val e1x5: Float, // shower shape
val e2x5: Float, // shower shape
val e5x5: Float, // shower shape
val r9: Float, // shower shape
val eoverp: Float, // E/p
val hovere: Float, // H/E
val fbrem: Float, // brem fraction
val dEtaInSeed: Float, // track-supercluster matching
val dEtaIn: Float, // track-supercluster matching
val dPhiIn: Float, // track-supercluster matching
val mva: Float, // electron ID MVA value
val q: Int, // charge
val isConv: Boolean, // identified by track fit based conversion finder?
val nMissingHits: Long, // number of missing expected inner hits
val typeBits: Long, // electron type
val fiducialBits: Long, // ECAL fiducial region bits
val classification: Int, // electron classification
val scID: Int, // supercluster ID number (unique per event)
val trkID: Int) // track ID number (unique per event)
extends Serializable
}
package baconhep {
class TMuon(val pt: Float, // kinematics
val eta: Float, // kinematics
val phi: Float, // kinematics
val ptErr: Float, // kinematics
val staPt: Float, // STA track kinematics
val staEta: Float, // STA track kinematics
val staPhi: Float, // STA track kinematics
val pfPt: Float, // matched PFCandidate
val pfEta: Float, // matched PFCandidate
val pfPhi: Float, // matched PFCandidate
val trkIso: Float, // detector isolation (R=0.3)
val ecalIso: Float, // detector isolation (R=0.3)
val hcalIso: Float, // detector isolation (R=0.3)
val chHadIso: Float, // PF isolation variables (R=0.4)
val gammaIso: Float, // PF isolation variables (R=0.4)
val neuHadIso: Float, // PF isolation variables (R=0.4)
val puIso: Float, // PF isolation variables (R=0.4)
val puppiChHadIso: Float, // Puppi Isolation R=0.4
val puppiGammaIso: Float, // Puppi Isolation R=0.4
val puppiNeuHadIso: Float, // Puppi Isolation R=0.4
val puppiChHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiGammaIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiNeuHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val d0: Float, // impact parameter
val dz: Float, // impact parameter
val sip3d: Float, // impact parameter
val tkNchi2: Float, // track fit normalized chi-square
val muNchi2: Float, // track fit normalized chi-square
val trkKink: Float, // track kink
val glbKink: Float, // track kink
val trkHitFrac: Float, // fraction of valid tracker hits
val chi2LocPos: Float, // TRK-STA position match
val segComp: Float, // compatibility of tracker track with muon segment
val caloComp: Float, // muon hypothesis compatibility with calo energy
val q: Int, // charge
val nValidHits: Int, // number of valid muon hits in global fit
val typeBits: Long, // muon type bits
val selectorBits: Long, // MuonSelector bits
val pogIDBits: Long, // POG muon IDs from CMSSW
val nTkHits: Long, // number of hits in tracker
val nPixHits: Long, // number of hits in tracker
val nTkLayers: Long, // number of hit layers in tracker
val nPixLayers: Long, // number of hit layers in tracker
val nMatchStn: Long, // number of stations with muon segments
val trkID: Int) // tracker track ID (unique per event)
extends Serializable
}
package baconhep {
class TTau(val pt: Float, // kinematics
val eta: Float, // kinematics
val phi: Float, // kinematics
val m: Float, // kinematics
val e: Float, // kinematics
val q: Int, // charge
val dzLeadChHad: Float, // longitudinal impact parameter of leading charged hadron
val nSignalChHad: Long, // number of charged hadrons in tau
val nSignalGamma: Long, // number of photons in tau
val antiEleMVA5: Float, // anti-electron MVA5 for Run 2
val antiEleMVA5Cat: Float, // anti-electron MVA5 category for Run 2
val rawMuonRejection: Float, // raw MVA output of BDT based anti-muon discriminator
val rawIso3Hits: Float, // raw Iso 3 hits
val rawIsoMVA3oldDMwoLT: Float, // raw Iso MVA3, trained on prong 1 and 3, without lifetime info
val rawIsoMVA3oldDMwLT: Float, // raw Iso MVA3, trained on prong 1 and 3, with lifetime info
val rawIsoMVA3newDMwoLT: Float, // raw Iso MVA3, trained on prong 1, 2, and 3, without lifetime info
val rawIsoMVA3newDMwLT: Float, // raw Iso MVA3, trained on prong 1, 2, and 3, with lifetime info
val puppiChHadIso: Float, // Puppi Isolation R=0.4
val puppiGammaIso: Float, // Puppi Isolation R=0.4
val puppiNeuHadIso: Float, // Puppi Isolation R=0.4
val puppiChHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiGammaIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val puppiNeuHadIsoNoLep: Float, // Puppi Isolation R=0.4 no lep
val hpsDisc: Double) // HPS tau discriminators
extends Serializable
}
package baconhep {
class TPhoton(val pt: Float, // kinematics
val eta: Float, // kinematics
val phi: Float, // kinematics
val scEt: Float, // SuperCluster kinematics
val scEta: Float, // SuperCluster kinematics
val scPhi: Float, // SuperCluster kinematics
val trkIso: Float, // detector isolation
val ecalIso: Float, // detector isolation
val hcalIso: Float, // detector isolation
val chHadIso: Float, // PF isolation variables
val gammaIso: Float, // PF isolation variables
val neuHadIso: Float, // PF isolation variables
val mva: Float, // Photon MVA ID
val hovere: Float, // H/E
val sthovere: Float, // Single tower H/E (https://twiki.cern.ch/twiki/bin/viewauth/CMS/HoverE2012)
val sieie: Float, // shower shape
val sipip: Float, // shower shape
val r9: Float, // shower shape
val fiducialBits: Long, // ECAL fiducial region
val typeBits: Long, // photon type
val scID: Int, // supercluster ID (unique per event)
val hasPixelSeed: Boolean, // has pixel seed?
val passElectronVeto: Boolean, // if false, its not a photon
val isConv: Boolean) extends Serializable
}
package baconhep {
case class TVertex(nTracksFit: Long, // number of tracks in vertex fit
ndof: Float, // vertex fit number of degrees of freedom
chi2: Float, // vertex fit chi-square
x: Float, // position
y: Float, // position
z: Float) // position
extends Serializable
}
package baconhep {
class TJet(val pt: Float, // kinematics
val eta: Float, // kinematics
val phi: Float, // kinematics
val mass: Float, // kinematics
val ptRaw: Float, // kinematics
val unc: Float, // kinematics
val area: Float, // jet area (from FastJet)
val d0: Float, // impact parameter of leading charged constituent
val dz: Float, // impact parameter of leading charged constituent
val csv: Float, // CSV b-tagger for the jet and subjets
val qgid: Float, // q/g discriminator and input variables
val axis2: Float, // q/g discriminator and input variables
val ptD: Float, // q/g discriminator and input variables
val mult: Int,
val q: Float, // Charge for jet and subjets
val mva: Float, // PU discriminator MVA
val beta: Float, // input variables for PU and q/g discriminators
val betaStar: Float, // input variables for PU and q/g discriminators
val dR2Mean: Float, // input variables for PU and q/g discriminators
val pullY: Float, // Jet pull
val pullPhi: Float, // Jet pull
val chPullY: Float,
val chPullPhi: Float,
val neuPullY: Float,
val neuPullPhi: Float,
val chEmFrac: Float, // fractional energy contribution by type
val neuEmFrac: Float, // fractional energy contribution by type
val chHadFrac: Float, // fractional energy contribution by type
val neuHadFrac: Float, // fractional energy contribution by type
val muonFrac: Float, // fractional energy contribution by type
val genpt: Float, // Matched GenJet
val geneta: Float, // Matched GenJet
val genphi: Float, // Matched GenJet
val genm: Float, // Matched GenJet
val partonFlavor: Int, // Flavor
val hadronFlavor: Int, // Flavor
val nCharged: Long, // constituent multiplicity
val nNeutrals: Long, // constituent multiplicity
val nParticles: Long) // constituent multiplicity
extends Serializable
}
package baconhep {
class TAddJet(val index: Long, // index in original jet collection
val mass_prun: Float, // groomed jet masses
val mass_trim: Float, // groomed jet masses
val mass_sd0: Float, // groomed jet masses
val pt_sd0: Float, // soft drop
val eta_sd0: Float, // soft drop
val phi_sd0: Float, // soft drop
val c2_0: Float, // Correlation function with various exponents
val c2_0P2: Float, // Correlation function with various exponents
val c2_0P5: Float, // Correlation function with various exponents
val c2_1P0: Float, // Correlation function with various exponents
val c2_2P0: Float, // Correlation function with various exponents
val qjet: Float, // Q-jet volatility
val tau1: Float, // N-subjettiness
val tau2: Float, // N-subjettiness
val tau3: Float, // N-subjettiness
val tau4: Float, // N-subjettiness
val doublecsv: Float, // Double b-tag
val sj1_pt: Float,
val sj1_eta: Float,
val sj1_phi: Float,
val sj1_m: Float,
val sj1_csv: Float,
val sj1_qgid: Float,
val sj1_q: Float,
val sj2_pt: Float,
val sj2_eta: Float,
val sj2_phi: Float,
val sj2_m: Float,
val sj2_csv: Float,
val sj2_qgid: Float,
val sj2_q: Float,
val sj3_pt: Float,
val sj3_eta: Float,
val sj3_phi: Float,
val sj3_m: Float,
val sj3_csv: Float,
val sj3_qgid: Float,
val sj3_q: Float,
val sj4_pt: Float,
val sj4_eta: Float,
val sj4_phi: Float,
val sj4_m: Float,
val sj4_csv: Float,
val sj4_qgid: Float,
val sj4_q: Float,
val pullAngle: Float,
val nTracks: Float, // Number of tracks associated to the jet
val trackSip3dSig_3: Float, // 3D SIP (IP value/error) for the most displaced tracks associated to the jet
val trackSip3dSig_2: Float, // 3D SIP (IP value/error) for the most displaced tracks associated to the jet
val trackSip3dSig_1: Float, // 3D SIP (IP value/error) for the most displaced tracks associated to the jet
val trackSip3dSig_0: Float, // 3D SIP (IP value/error) for the most displaced tracks associated to the jet
val trackSip2dSigAboveCharm: Float, // 2D SIP of the first tracks that raises the Mass above the bottom (charm) threshold 5.2GeV (1.5GeV)
val trackSip2dSigAboveBottom: Float, // 2D SIP of the first tracks that raises the Mass above the bottom (charm) threshold 5.2GeV (1.5GeV)
val tau1_trackSip3dSig_0: Float, // 3D SIP of the two tracks with the highest SIP associated to the closest tau axis to the track
val tau1_trackSip3dSig_1: Float, // 3D SIP of the two tracks with the highest SIP associated to the closest tau axis to the track
val nSV: Float,
val tau_SVmass_nSecondaryVertices: Float, // Number of SVs (cross check), 2D flight distance significance
val tau_SVmass_flightDistance2dSig: Float, // Number of SVs (cross check), 2D flight distance significance
val tau_SVmass_vertexDeltaR: Float, // DeltaR between the secondary vertex flight direction and the jet axis
val tau_SVmass_vertexNTracks: Float, // DeltaR between the secondary vertex flight direction and the jet axis
val tau_SVmass_trackEtaRel_2: Float, // Pseudorapidity of the tracks at the vertex with respect to the tau axis closest to the leading SV in mass
val tau_SVmass_trackEtaRel_1: Float, // Pseudorapidity of the tracks at the vertex with respect to the tau axis closest to the leading SV in mass
val tau_SVmass_trackEtaRel_0: Float, // Pseudorapidity of the tracks at the vertex with respect to the tau axis closest to the leading SV in mass
val tau_SVmass_vertexEnergyRatio: Float, // Energy ratio and vertexMass
val tau_SVmass_vertexMass: Float, // Energy ratio and vertexMass
val tau_SVmass_vertexMass_corrected: Float, // Energy ratio and vertexMass
val tau_SVmass_zratio: Float, // z-ratio
val tau_SVfd_nSecondaryVertices: Float,
val tau_SVfd_flightDistance2dSig: Float,
val tau_SVfd_vertexDeltaR: Float,
val tau_SVfd_vertexNTracks: Float,
val tau_SVfd_trackEtaRel_2: Float,
val tau_SVfd_trackEtaRel_1: Float,
val tau_SVfd_trackEtaRel_0: Float,
val tau_SVfd_vertexEnergyRatio: Float,
val tau_SVfd_vertexMass: Float,
val tau_SVfd_vertexMass_corrected: Float,
val tau_SVfd_zratio: Float,
val topTagType: Long,
val top_n_subjets: Long,
val top_m_min: Float,
val top_m_123: Float,
val top_fRec: Float,
val topchi2: Float) extends Serializable
}
///////////////////////////////////////////////////////////////// actual tests
class DefaultSuite extends FlatSpec with Matchers {
"Event.root" must "work" in {
val myclasses = Map("Event" -> My[Event], "EventHeader" -> My[EventHeader], "Track" -> My[Track], "TBits" -> My[TBits], "Tree" -> My[Tree])
val iterator = RootTreeIterator[Tree](List("../root2avro/test_Event/Event.root"), "T", inferTypes = true, myclasses = myclasses, end = 10L)
while (iterator.hasNext)
println(iterator.next().event.fEventName)
val iterator2 = external.RootTreeIterator[Tree](List("../root2avro/test_Event/Event.root"), "T", inferTypes = true, myclasses = myclasses, command = "../root2avro/build/root2avro", end = 10L)
while (iterator2.hasNext)
println(iterator2.next().event.fEventName)
}
// "Bacon.root" must "work" in {
// val myclasses = Map("Events" -> My[Tree2], "baconhep::TEventInfo" -> My[baconhep.TEventInfo], "baconhep::TGenEventInfo" -> My[baconhep.TGenEventInfo], "baconhep::TGenParticle" -> My[baconhep.TGenParticle], "baconhep::TLHEWeight" -> My[baconhep.TLHEWeight], "baconhep::TElectron" -> My[baconhep.TElectron], "baconhep::TMuon" -> My[baconhep.TMuon], "baconhep::TTau" -> My[baconhep.TTau], "baconhep::TPhoton" -> My[baconhep.TPhoton], "baconhep::TVertex" -> My[baconhep.TVertex], "baconhep::TJet" -> My[baconhep.TJet], "baconhep::TAddJet" -> My[baconhep.TAddJet])
// val iterator = RootTreeIterator[Tree2](List("../root2avro/test_Bacon/Output.root"), "Events", inferTypes = true, myclasses = myclasses, end = 10L)
// while (iterator.hasNext) {
// val x = iterator.next()
// println(x.PV)
// println(x.Photon)
// }
// val iterator2 = external.RootTreeIterator[Tree2](List("../root2avro/test_Bacon/Output.root"), "Events", inferTypes = true, myclasses = myclasses, command = "../root2avro/build/root2avro", end = 10L)
// while (iterator2.hasNext) {
// val x = iterator2.next()
// println(x.PV)
// println(x.Photon)
// }
// }
}
|
diana-hep/rootconverter
|
scaroot-reader/src/test/scala/test.scala
|
Scala
|
apache-2.0
| 27,796 |
package org.apache.spark.ml.bundle.ops.feature
import ml.combust.bundle.BundleContext
import ml.combust.bundle.dsl._
import ml.combust.bundle.op.OpModel
import org.apache.spark.ml.attribute.{Attribute, BinaryAttribute, NominalAttribute, NumericAttribute}
import org.apache.spark.ml.bundle._
import org.apache.spark.ml.feature.OneHotEncoderModel
import org.apache.spark.sql.types.StructField
import scala.util.{Failure, Try}
object OneHotEncoderOp {
def sizeForField(field: StructField): Int = {
val attr = Attribute.fromStructField(field)
(attr match {
case nominal: NominalAttribute =>
if (nominal.values.isDefined) {
Try(nominal.values.get.length)
} else if (nominal.numValues.isDefined) {
Try(nominal.numValues.get)
} else {
Failure(new RuntimeException(s"invalid nominal value for field ${field.name}"))
}
case binary: BinaryAttribute =>
Try(2)
case _: NumericAttribute =>
Failure(new RuntimeException(s"invalid numeric attribute for field ${field.name}"))
case _ =>
Failure(new RuntimeException(s"unsupported attribute for field ${field.name}")) // optimistic about unknown attributes
}).get
}
}
class OneHotEncoderOp extends SimpleSparkOp[OneHotEncoderModel] {
override val Model: OpModel[SparkBundleContext, OneHotEncoderModel] = new OpModel[SparkBundleContext, OneHotEncoderModel] {
override val klazz: Class[OneHotEncoderModel] = classOf[OneHotEncoderModel]
override def opName: String = Bundle.BuiltinOps.feature.one_hot_encoder
override def store(model: Model, obj: OneHotEncoderModel)
(implicit context: BundleContext[SparkBundleContext]): Model = {
assert(context.context.dataset.isDefined, BundleHelper.sampleDataframeMessage(klazz))
val df = context.context.dataset.get
val categorySizes = obj.getInputCols.map { f ⇒ OneHotEncoderOp.sizeForField(df.schema(f)) }
model.withValue("category_sizes", Value.intList(categorySizes))
.withValue("drop_last", Value.boolean(obj.getDropLast))
.withValue("handle_invalid", Value.string(obj.getHandleInvalid))
}
override def load(model: Model)
(implicit context: BundleContext[SparkBundleContext]): OneHotEncoderModel = {
new OneHotEncoderModel(uid = "", categorySizes = model.value("category_sizes").getIntList.toArray)
.setDropLast(model.value("drop_last").getBoolean)
.setHandleInvalid(model.value("handle_invalid").getString)
}
}
override def sparkLoad(uid: String, shape: NodeShape, model: OneHotEncoderModel): OneHotEncoderModel = {
new OneHotEncoderModel(uid = uid, categorySizes = model.categorySizes)
.setDropLast(model.getDropLast)
.setHandleInvalid(model.getHandleInvalid)
}
override def sparkInputs(obj: OneHotEncoderModel): Seq[ParamSpec] = Seq(ParamSpec("input", obj.inputCols))
override def sparkOutputs(obj: OneHotEncoderModel): Seq[ParamSpec] = Seq(ParamSpec("output", obj.outputCols))
}
|
combust-ml/mleap
|
mleap-spark/src/main/scala/org/apache/spark/ml/bundle/ops/feature/OneHotEncoderOp.scala
|
Scala
|
apache-2.0
| 3,061 |
/*
* Copyright 2001-2015 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools.scalasbt
import org.scalatest._
class FaulthyBeforeAndAfterAllSuite extends FunSuite with BeforeAndAfterAll {
test("test 1") {}
override protected def afterAll() = {
throw new RuntimeException("oops!")
}
}
|
dotty-staging/scalatest
|
scalatest-test/src/test/scala/org/scalatest/tools/scalasbt/FaulthyBeforeAndAfterAllSuite.scala
|
Scala
|
apache-2.0
| 849 |
package com.lyrx.markdown
import java.io.File
import com.lyrx.text.StringGenerator._
import com.lyrx.text._;
/**
* Created by alex on 17.10.16.
*/
trait MarkdownGenerator {
//implicit def toBold()
implicit class StringToMarkdown(aString: String) {
def code()(implicit cb: StringCollector) = cb.collect(
aString.split("\\n").foldLeft("\\n\\n")((old:String,line:String)=>{
old + " "+line+"\\n"
}) + "\\n\\n"
)
def code(language:String="")(implicit cb: StringCollector) = cb.collect(
s"\\n```${language}\\n" +
aString + "\\n" +
"```\\n"
)
def bold()(implicit cb: StringCollector) = cb.collect(s"**${aString}**")
def bold(aName:String):String = aString.replaceAllLiterally(aName,s"**${aName}**")
def italic(aName:String):String = aString.replaceAllLiterally(aName,s"*${aName}*")
def italic()(implicit cb: StringCollector) = cb.collect(s"*${aString}*")
def inlineCode(aName:String):String = aString.replaceAllLiterally(aName,s"`${aName}`")
def inlineCode()(implicit cb: StringCollector) = cb.collect(s"`${aString}`")
def link(aDescr:String,aUrl:String):String = aString.replaceAllLiterally(aDescr,s"[${aDescr}](${aUrl})")
def quote()(implicit cb: StringCollector) = cb.collect(
aString.split("\\n").foldLeft("\\n")((old:String,line:String)=>{
old + "> "+line.trim()+"\\n"
}) + "\\n")
def h1()(implicit cb: StringCollector) = cb.collect(s"# ${aString}\\n\\n")
def h2()(implicit cb: StringCollector) = cb.collect(s"## ${aString}\\n\\n")
def h3()(implicit cb: StringCollector) = cb.collect(s"### ${aString}\\n\\n")
def h4()(implicit cb: StringCollector) = cb.collect(s"#### ${aString}\\n\\n")
def md()(implicit cb: StringCollector) = cb.collect(aString)
def item()(implicit cb: StringCollector) = cb.collect(s"- ${aString}\\n")
def subitem()(implicit cb: StringCollector) = cb.collect(s" - ${aString}\\n")
def item(num:Int)(implicit cb: StringCollector) = cb.collect(s"${num}. ${aString}\\n")
}
def mdFile(aName:String)(implicit cb: StringCollector,ctx:Context) ={
cb.collect(new SimpleFileReader(aName))
}
def codeFile(aName:String)(implicit cb: StringCollector,ctx:Context) ={
val index = aName.lastIndexOf(".")
val aLanguage=if ( index >= 0) {
aName.substring(index+1) match {
case "scala"=> "scala"
case "java"=> "java"
case "js"=> "javascript"
case "pl"=> "perl"
case "html"=> "html"
case "xml"=> "xml"
case "json"=> "json"
case _ => ""
}
}
else {
""
}
cb.collect(s"\\n```${aLanguage}\\n")
cb.collect(new SimpleFileReader(aName))
cb.collect("\\n```")
}
def img(descr:String,url:String)(implicit cb: StringCollector)={
cb.collect(s"")
}
}
|
lyrx/lyrxgenerator
|
src/main/scala/com/lyrx/markdown/MarkdownGenerator.scala
|
Scala
|
gpl-3.0
| 2,859 |
package com.github.andr83.parsek.pipe
import com.github.andr83.parsek._
import org.scalatest.{FlatSpec, Inside, Matchers}
/**
* @author andr83
*/
class ParseJsonSpec extends FlatSpec with Matchers with Inside {
implicit val context = new PipeContext()
"Json string" should "be parsed to PMap value" in {
val json = PString(
"""
|{
| "fieldStr": "Some string",
| "fieldInt": 10,
| "fieldDouble": 1.5,
| "fieldArray": [2,3,4],
| "fieldMap": {
| "innerField": "Other value"
| }
|}
""".stripMargin)
val parser = ParseJsonPipe()
val result = parser.run(json)
result shouldBe Some(PMap(
"fieldStr" -> PString("Some string"),
"fieldInt" -> PLong(10),
"fieldDouble" -> PDouble(1.5),
"fieldArray" -> PList(PLong(2) :: PLong(3) :: PLong(4) :: Nil),
"fieldMap" -> PMap(
"innerField" -> PString("Other value")
)
))
}
}
|
andr83/parsek
|
core/src/test/scala/com/github/andr83/parsek/pipe/ParseJsonSpec.scala
|
Scala
|
mit
| 969 |
package spire
package math
import org.scalatest.FunSuite
import spire.implicits.{eqOps => _, _}
import java.math.MathContext
class ComplexTest extends FunSuite {
test("create Complex[Double]") {
val (real, imag) = (3.0, 44.0)
val c = Complex(real, imag)
assert(c.real === real)
assert(c.imag === imag)
assert(c === c)
}
test("create Complex[BigDecimal]") {
implicit val mc = MathContext.DECIMAL128
val (real, imag) = (BigDecimal(222.0), BigDecimal(3483.0))
val c = Complex(real, imag)
assert(c.real === real)
assert(c.imag === imag)
assert(c === c)
}
test("some basic equality stuff") {
val one = Complex.one[Double]
val i = Complex.i[Double]
assert(one === 1)
assert(one === 1.0)
assert(one === Complex.one[Double])
assert(1 === one)
assert(1.0 === one)
assert(Complex.one[Double] === one)
assert(1 != i)
assert(1.0 != i)
assert(one != i)
assert(i != 1)
assert(i != 1.0)
assert(i != one)
}
test("complex arithmetic") {
val i = Complex.i[Double]
val a = 4.0 + 3.0*i
val b = 1.0 + 2.0*i
val c = 2.0 + 0.0*i
assert(a + b === 5.0+5.0*i)
assert(b + c === Complex(3.0, 2.0))
assert(b + c === Complex(3.0, 2.0))
assert(a - b === Complex(3.0, 1.0))
assert(b - c === Complex(-1.0, 2.0))
assert(a - c === Complex(2.0, 3.0))
assert(a * b === Complex(-2.0, 11.0))
assert(b * c === Complex(2.0, 4.0))
assert(a * c === Complex(8.0, 6.0))
assert(a / b === Complex(2.0, -1.0))
assert(b / c === Complex(0.5, 1.0))
assert(a / c === Complex(2.0, 1.5))
}
test("test e^(i * pi) with Double") {
val e = Complex(scala.math.E, 0.0)
val pi = Complex(scala.math.Pi, 0.0)
val i = Complex.i[Double]
val one = Complex.one[Double]
val z = e.pow(i * pi) + one
assert (z.real === 0.0)
assert (z.imag < 0.000000000000001) // sigh...
assert (z.imag > -0.000000000000001)
}
test("test roots of unity") {
val one = Complex.one[Double]
val i = Complex.i[Double]
assert(Complex.rootsOfUnity[Double](2) === Array(one, -one))
assert(Complex.rootOfUnity[Double](2, 0) === one)
assert(Complex.rootOfUnity[Double](2, 1) === -one)
assert(Complex.rootsOfUnity[Double](4) === Array(one, i, -one, -i))
assert(Complex.rootOfUnity[Double](4, 0) === one)
assert(Complex.rootOfUnity[Double](4, 1) === i)
assert(Complex.rootOfUnity[Double](4, 2) === -one)
assert(Complex.rootOfUnity[Double](4, 3) === -i)
val theta = 2.0 * scala.math.Pi / 3.0
val c1 = math.cos(theta) + math.sin(theta) * i
val c2 = -one - c1
assert(Complex.rootsOfUnity[Double](3) === Array(one, c1, c2))
}
test("try using FastComplex") {
val fc = FastComplex
val a = fc(3.0, -2.0)
val b = fc(2.0, 1.0)
assert(fc.add(a, b) === fc(5.0, -1.0))
assert(fc.subtract(a, b) === fc(1.0, -3.0))
assert(fc.multiply(a, b) === fc(8.0, -1.0))
val e = fc(scala.math.E, 0.0)
val pi = fc(scala.math.Pi, 0.0)
val ipi = fc.multiply(fc.i, pi)
val e_ipi = fc.pow(e, ipi)
val z = fc.add(e_ipi, fc.one)
assert(fc.real(z) == 0.0F)
assert(fc.imag(z) < 0.000000001F)
assert(fc.multiply(fc.i, fc.i) === fc(-1f, 0f))
assert(fc.imag(fc(-1f, 0f)) === 0f)
}
test("try using FloatComplex") {
val fc = FastComplex
val a = FloatComplex(3.0, -2.0)
val b = FloatComplex(2.0, 1.0)
assert(a + b === FloatComplex(5.0, -1.0))
assert(a - b === FloatComplex(1.0, -3.0))
assert(a * b === FloatComplex(8.0, -1.0))
val i = FloatComplex.i
val one = FloatComplex.one
val e = FloatComplex(scala.math.E, 0.0)
val pi = FloatComplex(scala.math.Pi, 0.0)
val z = e.pow(i * pi) + one
assert(z.real == 0.0F)
assert(z.imag < 0.000000001F)
}
test("complex trigonometry") {
// these are just a spot check to avoid sign errors
assert(Complex(3.0, 5.0).sin == Complex(10.472508533940392, -73.46062169567367))
assert(Complex(3.0, -5.0).sin == Complex(10.472508533940392, 73.46062169567367))
assert(Complex(-3.0, 5.0).sin == Complex(-10.472508533940392, -73.46062169567367))
assert(Complex(-3.0, -5.0).sin == Complex(-10.472508533940392, 73.46062169567367))
assert(Complex(3.0, 5.0).cos == Complex(-73.46729221264526, -10.471557674805572))
assert(Complex(3.0, -5.0).cos == Complex(-73.46729221264526, 10.471557674805572))
assert(Complex(-3.0, 5.0).cos == Complex(-73.46729221264526, 10.471557674805572))
assert(Complex(-3.0, -5.0).cos == Complex(-73.46729221264526, -10.471557674805572))
}
test("complex norm") {
assert(Complex(3.0, 4.0).norm == 5.0)
// check against overflow
assert(Complex((3e20).toFloat, (4e20).toFloat).norm == (5e20).toFloat)
}
}
|
adampingel/spire
|
tests/src/test/scala/spire/math/ComplexTest.scala
|
Scala
|
mit
| 4,797 |
package org.maikalal.seccam.videos
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
case class SecCamVideoUploadSettings(conf:Config) {
conf.checkValid(ConfigFactory.load("ref-video-upload.config"),"dailymotion" )
val dailymotionApiKey = conf.getString("dailymotion.developer.api.key")
val dailymotionApiSecret = conf.getString("dailymotion.developer.api.secret")
val dailymotionAccountUserId = conf.getString("dailymotion.account.user.id")
val dailymotionAccountUserPassword = conf.getString("dailymotion.account.user.password")
val dailymotionApiOauth2AccessTokenURI = conf.getString("dailymotion.api.oauth2.access.token.uri")
val dailymotionApiOauth2AccessTokenPermittedAgeInSeconds = conf.getLong("dailymotion.api.oauth2.access.token.permitted.age.inSeconds")
val dailymotionApiOauth2AccessTokenDownloadJsonFile = conf.getString("dailymotion.api.oauth2.access.token.download.json.file")
val dailymotionApiVideoUploadScope = conf.getStringList("dailymotion.api.video.upload.scopes").toList
val dailymotionApiVideoUploadRequestUri = conf.getString("dailymotion.api.video.upload.request.uri")
val dailymotionApiVideoUploadApprovalUri = conf.getString("dailymotion.api.video.upload.approval.uri")
val dailymotionApiVideoUploadPublishUri = conf.getString("dailymotion.api.video.upload.publish.uri")
val dailymotionApiVideoListUri = conf.getString("dailymotion.api.video.list.uri")
val securityCameraVideosSourceFolder = conf.getString("security.camera.video.source.folder")
val securityCameraVideosArchiveFolder = conf.getString("security.camera.video.archive.folder")
val approvedVideoFileExtensions = conf.getStringList("approved.video.file.extensions").toList.map(_.toLowerCase())
}
|
pratimsc/scalaRestInteraction
|
src/main/scala/org/maikalal/seccam/videos/SecCamVideoUploadSettings.scala
|
Scala
|
gpl-3.0
| 1,793 |
/**
* (c) Copyright 2012 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell
import org.specs2.mutable._
import org.kiji.schema.KijiURI
import org.kiji.schema.avro.RowKeyEncoding
import org.kiji.schema.avro.RowKeyFormat
import org.kiji.schema.avro.TableLayoutDesc
import org.kiji.schema.KConstants
import org.kiji.schema.layout.KijiTableLayout
import org.kiji.schema.util.VersionInfo
import java.util.ArrayList
import org.kiji.schema.shell.ddl.CreateTableCommand
import org.kiji.schema.shell.input.NullInputSource
class TestMockKijiSystem extends SpecificationWithJUnit {
val defaultURI = KijiURI.newBuilder().withInstanceName(KConstants.DEFAULT_INSTANCE_NAME).build()
"MockKijiSystem" should {
"include three instances" in {
val instances = new MockKijiSystem().listInstances()
instances.size mustEqual 3
instances.contains(KConstants.DEFAULT_INSTANCE_NAME) mustEqual true
instances.contains("foo") mustEqual true
instances.contains("a-missing-instance") mustEqual false
}
"allow create table" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
avro.setName("t")
avro.setDescription("desc")
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
val layout = KijiTableLayout.newLayout(avro)
val sys = new MockKijiSystem
sys.createTable(defaultURI, "t", layout)
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc")).toArray)
}
"support the Environment.containsTable operation" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
avro.setName("t")
avro.setDescription("desc")
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
val layout = KijiTableLayout.newLayout(avro)
val sys = new MockKijiSystem
sys.createTable(defaultURI, "t", layout)
new Environment(defaultURI, Console.out,
sys, new NullInputSource).containsTable("t") mustEqual true
}
"allow drop table" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
avro.setName("t")
avro.setDescription("desc")
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
val layout = KijiTableLayout.newLayout(avro)
val sys = new MockKijiSystem
sys.createTable(defaultURI, "t", layout)
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc")).toArray)
sys.dropTable(defaultURI, "t")
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List[(String, String)]().toArray)
}
"disallow create table twice on the same name" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
val sys = new MockKijiSystem
avro.setName("t")
avro.setDescription("desc")
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
val layout = KijiTableLayout.newLayout(avro)
sys.createTable(defaultURI, "t", layout)
(sys.createTable(defaultURI, "t", layout)
must throwA[RuntimeException])
}
"disallow drop table on missing table" in {
val sys = new MockKijiSystem
sys.dropTable(defaultURI, "t") must throwA[RuntimeException]
}
"disallow apply layout on missing table" in {
val sys = new MockKijiSystem
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
avro.setName("t")
avro.setDescription("meep")
// Verify that this is a valid layout
KijiTableLayout.newLayout(avro)
// .. but you can't apply it to a missing table.
(sys.applyLayout(defaultURI, "t", avro)
must throwA[RuntimeException])
}
"createTable should fail on malformed input records" in {
val sys = new MockKijiSystem
val avro: TableLayoutDesc = new TableLayoutDesc // Missing the localityGroups list, etc.
KijiTableLayout.newLayout(avro) must throwA[RuntimeException]
}
"update layout with applyLayout" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
avro.setName("t")
avro.setDescription("desc1")
val layout: KijiTableLayout = KijiTableLayout.newLayout(avro)
val sys = new MockKijiSystem
sys.createTable(defaultURI, "t", layout)
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc1")).toArray)
val avro2: TableLayoutDesc = new TableLayoutDesc
avro2.setLocalityGroups(new ArrayList())
avro2.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
avro2.setName("t")
avro2.setDescription("desc2")
avro2.setKeysFormat(rowKeyFormat)
sys.applyLayout(defaultURI, "t", avro2)
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc2")).toArray)
}
"getTableLayout() should deep copy Avro records given to client" in {
val avro: TableLayoutDesc = new TableLayoutDesc
avro.setLocalityGroups(new ArrayList())
avro.setVersion(CreateTableCommand.DDL_LAYOUT_VERSION.toString())
avro.setName("t")
avro.setDescription("desc1")
val rowKeyFormat = new RowKeyFormat
rowKeyFormat.setEncoding(RowKeyEncoding.HASH)
avro.setKeysFormat(rowKeyFormat)
val layout: KijiTableLayout = KijiTableLayout.newLayout(avro)
val sys = new MockKijiSystem
sys.createTable(defaultURI, "t", layout)
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc1")).toArray)
val maybeLayout2 = sys.getTableLayout(defaultURI, "t")
maybeLayout2 must beSome[KijiTableLayout]
val layout2 = maybeLayout2 match {
case Some(layout) => layout
case None => throw new RuntimeException("Missing!")
}
layout2.getDesc().setDescription("desc2") // Prove that this updates a copy...
// By verifying that the MockKijiSystem returns the original description.
(sys.getTableNamesDescriptions(defaultURI)
mustEqual List(("t", "desc1")).toArray)
}
}
}
|
alexandre-normand/kiji-schema-shell
|
src/test/scala/org/kiji/schema/shell/TestMockKijiSystem.scala
|
Scala
|
apache-2.0
| 7,833 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.