code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.variables
/** Enumeration of possible variable reuse options, used by variable scopes and variable stores.
*
* The supported options are:
* - [[ReuseExistingOnly]]: Reuse existing variables only and throw an exception if no appropriate variable exists.
* - [[CreateNewOnly]]: Create new variables only and throw an exception if a variable with the same name exists.
* - [[ReuseOrCreateNew]]: Reuse existing variables or create new ones, if no variable with the provided name exists.
*
* @author Emmanouil Antonios Platanios
*/
sealed trait Reuse
/** Trait marking the variable reuse modes that allow reusing existing variables. */
sealed trait ReuseAllowed extends Reuse
/** Reuse existing variables only and throw an exception if no appropriate variable exists. */
case object ReuseExistingOnly extends ReuseAllowed
/** Create new variables only and throw an exception if a variable with the same name exists. */
case object CreateNewOnly extends Reuse
/** Reuse existing variables or create new ones, if no variable with the provided name exists. */
case object ReuseOrCreateNew extends ReuseAllowed
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/variables/Reuse.scala | Scala | apache-2.0 | 1,806 |
package bifrost.transaction
import com.google.common.primitives.Longs
import bifrost.transaction.box.{Box, BoxUnlocker}
import bifrost.transaction.box.proposition.Proposition
abstract class BoxTransaction[P <: Proposition, BX <: Box[P]] extends Transaction[P] {
val unlockers: Traversable[BoxUnlocker[P]]
val newBoxes: Traversable[BX]
override lazy val messageToSign: Array[Byte] =
(if(newBoxes.nonEmpty) newBoxes.map(_.bytes).reduce(_ ++ _) else Array[Byte]()) ++
unlockers.map(_.closedBoxId).reduce(_ ++ _) ++
Longs.toByteArray(timestamp) ++
Longs.toByteArray(fee)
}
| Topl/Project-Bifrost | src/main/scala/bifrost/transaction/BoxTransaction.scala | Scala | mpl-2.0 | 620 |
package iota.pure.v2
import android.app.Activity
import android.os.Bundle
import android.view._
/** beware:
* https://youtrack.jetbrains.com/issue/SCL-9888
* https://issues.scala-lang.org/browse/SI-9658
*/
trait PureActivity[S] extends Activity {
private[this] var state: S = _
sealed trait ActivityState[T] extends PureState[T,S]
trait ActivityStateUnit extends ActivityState[Unit] {
val zero = ()
}
trait ActivityStateBoolean extends ActivityState[Boolean] {
val zero = false
}
case class OnPreCreate(state: S) extends ActivityStateUnit
case class OnCreate(state: S) extends ActivityStateUnit
case class OnDestroy(state: S) extends ActivityStateUnit
case class OnStart(state: S) extends ActivityStateUnit
case class OnStop(state: S) extends ActivityStateUnit
case class OnResume(state: S) extends ActivityStateUnit
case class OnPause(state: S) extends ActivityStateUnit
case class OnCreateOptionsMenu(state: S, menu: Menu) extends ActivityStateBoolean
case class OnOptionsItemSelected(state: S, item: MenuItem) extends ActivityStateBoolean
case class TransformState(state: S, oldState: S) extends ActivityStateUnit
case class SaveState(state: S, bundle: Bundle) extends ActivityStateUnit
def initialState(b: Option[Bundle]): S
def applyState[T]: PartialFunction[ActivityState[T],(T,S)]
def transformState(f: S => S): S = {
state = doApplyState(TransformState(f(state),state))._2
state
}
private[this] def doApplyState[T](s: ActivityState[T]): (T,S) =
applyState[T].applyOrElse(s, defaultApplyState[T])
def defaultApplyState[T](s: ActivityState[T]): (T,S) = s.zero -> s.state
final override def onCreate(savedInstanceState: Bundle) = {
state = doApplyState(OnPreCreate(initialState(Option(savedInstanceState))))._2
super.onCreate(savedInstanceState)
state = doApplyState(OnCreate(state))._2
}
final override def onCreateOptionsMenu(m: Menu): Boolean = {
val b = super.onCreateOptionsMenu(m)
val (r,st) = doApplyState(OnCreateOptionsMenu(state, m))
state = st
b || r
}
override def onOptionsItemSelected(item: MenuItem) = {
val (r,st) = doApplyState(OnOptionsItemSelected(state, item))
state = st
r || super.onOptionsItemSelected(item)
}
final override def onSaveInstanceState(outState: Bundle) = {
super.onSaveInstanceState(outState)
state = doApplyState(SaveState(state, outState))._2
}
final override def onStart() = {
super.onStart()
state = doApplyState(OnStart(state))._2
}
final override def onResume() = {
super.onResume()
state = doApplyState(OnResume(state))._2
}
final override def onPause() = {
super.onPause()
state = doApplyState(OnPause(state))._2
}
final override def onStop() = {
super.onStop()
state = doApplyState(OnStop(state))._2
}
final override def onDestroy() = {
super.onDestroy()
state = doApplyState(OnDestroy(state))._2
}
}
| pfn/iota-pure | src/main/scala/v2/activity.scala | Scala | apache-2.0 | 3,132 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.audit.http
import uk.gov.hmrc.play.audit.AuditExtensions
import uk.gov.hmrc.play.audit.model.DataEvent
import uk.gov.hmrc.play.http.HeaderCarrier
trait HttpAuditEvent {
import play.api.mvc.RequestHeader
def appName: String
object auditDetailKeys {
val Input = "input"
val Method = "method"
val UserAgentString = "userAgentString"
val Referrer = "referrer"
}
object headers {
val UserAgent = "User-Agent"
val Referer = "Referer"
}
protected[http] def dataEvent(eventType: String, transactionName: String, request: RequestHeader)
(implicit hc: HeaderCarrier = HeaderCarrier.fromHeadersAndSession(request.headers)) = {
import auditDetailKeys._
import headers._
import uk.gov.hmrc.play.audit.http.HeaderFieldsExtractor._
import AuditExtensions._
val requiredFields = hc.toAuditDetails(Input -> s"Request to ${request.path}",
Method -> request.method.toUpperCase,
UserAgentString -> request.headers.get(UserAgent).getOrElse("-"),
Referrer -> request.headers.get(Referer).getOrElse("-"))
val tags = hc.toAuditTags(transactionName, request.path)
DataEvent(appName, eventType, detail = requiredFields ++ optionalAuditFieldsSeq(request.headers.toMap), tags = tags)
}
}
object HeaderFieldsExtractor {
private val SurrogateHeader = "Surrogate"
def optionalAuditFields(headers : Map[String, String]) : Map[String, String] = {
val map = headers map (t => t._1 -> Seq(t._2))
optionalAuditFieldsSeq(map)
}
def optionalAuditFieldsSeq(headers : Map[String, Seq[String]]) : Map[String, String] = {
headers.foldLeft(Map[String, String]()) { (existingList : Map[String, String], tup: (String, Seq[String])) =>
tup match {
case (SurrogateHeader, _) => existingList + ("surrogate" -> tup._2.mkString(","))
// Add more optional here
case _ => existingList
}
}
}
}
| beyond-code-github/play-auditing | src/main/scala/uk/gov/hmrc/play/audit/http/HttpAuditEvent.scala | Scala | apache-2.0 | 2,563 |
package async_http_client
import core.HttpBinResponse
import sttp.client._
import sttp.client.akkahttp._
import sttp.client.circe._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
object HttpBin extends App {
val backend = AkkaHttpBackend()
val request = basicRequest
.body("Hello, world!")
.post(uri"https://httpbin.org/post?hello=world")
.response(asJson[HttpBinResponse])
val f = backend
.send(request)
Await.ready(f, Duration.Inf)
for {
r <- f
} {
println(r.body.fold(identity, identity))
}
backend.close()
}
| t-mochizuki/scala-study | sttp-example/slow-server-experience/async-http-client/src/main/scala/async-http-client/HttpBin.scala | Scala | mit | 654 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package controllers
import play.api._
import play.api.mvc._
import play.api.libs._
/**
* Default actions ready to use as is from your routes file.
*
* Example:
* {{{
* GET /google controllers.Default.redirect(to = "http://www.google.com")
* GET /favicon.ico controllers.Default.notFound
* GET /admin controllers.Default.todo
* GET /xxx controllers.Default.error
* }}}
*/
object Default extends Controller {
/**
* Returns a 501 NotImplemented response.
*
* Example:
* {{{
* GET /admin controllers.Default.todo
* }}}
*/
def todo: Action[AnyContent] = TODO
/**
* Returns a 404 NotFound response.
*
* Example:
* {{{
* GET /favicon.ico controllers.Default.notFound
* }}}
*/
def notFound: Action[AnyContent] = Action {
NotFound
}
/**
* Returns a 302 Redirect response.
*
* Example:
* {{{
* GET /google controllers.Default.redirect(to = "http://www.google.com")
* }}}
*/
def redirect(to: String): Action[AnyContent] = Action {
Redirect(to)
}
/**
* Returns a 500 InternalServerError response.
*
* Example:
* {{{
* GET /xxx controllers.Default.error
* }}}
*/
def error: Action[AnyContent] = Action {
InternalServerError
}
} | jyotikamboj/container | pf-framework/src/play/src/main/scala/play/api/controllers/Default.scala | Scala | mit | 1,409 |
package com.sksamuel.elastic4s
object ConnectingToLocal extends App {
import ElasticDsl._
import scala.concurrent.ExecutionContext.Implicits.global
val local = ElasticClient.data
local.execute {
create index "got"
} map { _ =>
local.execute {
index into "got" fields "name" -> "tyrion"
}
}
Thread.sleep(3000)
val remote = ElasticClient.remote("elasticsearch://127.0.0.1:9300")
remote.execute {
search in "got"
}.map { resp =>
println(resp.hits)
}
}
| alexander-svendsen/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/ConnectingToLocal.scala | Scala | apache-2.0 | 503 |
/*
* Copyright 2012 ๆจๅ (Yang Bo)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dongxiguo.commons.continuations
class FunctionQueue extends AutoConsumableQueue[()=>Unit] {
protected final def consume(task: () => Unit) {
task()
}
@throws(classOf[ShuttedDownException])
final def shutDown[U](task: => U) {
enqueueAndShutDown(task _: () => Unit)
}
@throws(classOf[ShuttedDownException])
final def post[U](task: => U) {
enqueue(task _: () => Unit)
}
@deprecated("่ฏทๆๅจไฝฟ็จshiftๅpost็็ปๅไปฅ็ฒพ็กฎๆงๅถ้็็ฒๅบฆ", "0.1.2")
@inline
final def send[A](): Unit @util.continuations.suspendable = {
util.continuations.shift { (continue: Unit => Unit) =>
post {
continue()
}
}
}
}
| Atry/commons-continuations | src/main/scala/com/dongxiguo/commons/continuations/FunctionQueue.scala | Scala | apache-2.0 | 1,290 |
package org.jetbrains.plugins.scala.lang.refactoring.rename
import java.util
import com.intellij.psi.PsiElement
import org.jetbrains.annotations.ApiStatus
import org.jetbrains.plugins.scala.ExtensionPointDeclaration
@ApiStatus.Internal
abstract class ScalaElementToRenameContributor {
def addElements(original: PsiElement, newName: String, allRenames: util.Map[PsiElement, String]): Unit
}
object ScalaElementToRenameContributor
extends ExtensionPointDeclaration[ScalaElementToRenameContributor](
"org.intellij.scala.scalaElementToRenameContributor"
) {
def addAllElements(original: PsiElement, newName: String, allRenames: util.Map[PsiElement, String]): Unit =
implementations.foreach(_.addElements(original, newName, allRenames))
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/rename/ScalaElementToRenameContributor.scala | Scala | apache-2.0 | 754 |
import scala.xml._
object DictXmlToTxt {
def main(args: Array[String]) {
val dict:Node = XML.loadFile("dict.xml")
println(this(dict))
}
def apply(dict:Node) = {
def convertEntries = for {
entry <- dict \\ "entry"
val en = entry \\ "en" \\ "word" text
val fr = entry \\ "fr" \\ "word" text
val str = entry.attribute("kind") match {
case Some(n) if n.text == "noun" =>
val frGender = entry \\ "fr" \\ "gender" text;
"[noun] " + en + " - " + fr + " (" + frGender + ")"
case Some(n) if n.text == "verb" =>
"[verb] " + en + " - " + fr
case _ => ""
}
} yield str
convertEntries.mkString("\\n")
}
}
| grzegorzbalcerek/scala-book-examples | examples/DictXmlToTxt.scala | Scala | mit | 697 |
package org.atnos.eff
import cats._
import cats.syntax.traverse._
import cats.syntax.either._
import Eff._
import Interpret._
/**
* Effect for computation which can fail
*/
trait EitherEffect extends
EitherCreation with
EitherInterpretation
object EitherEffect extends EitherEffect
trait EitherCreation {
type ThrowableEither[A] = Throwable Either A
type _ThrowableEither[R] = ThrowableEither <= R
type _throwableEither[R] = ThrowableEither |= R
/** create an Either effect from a single Option value */
def optionEither[R, E, A](option: Option[A], e: => E)(implicit member: (E Either *) |= R): Eff[R, A] =
option.fold[Eff[R, A]](left[R, E, A](e))(right[R, E, A])
/** create an Either effect from a single Either value */
def fromEither[R, E, A](Either: E Either A)(implicit member: (E Either *) |= R): Eff[R, A] =
Either.fold[Eff[R, A]](left[R, E, A], right[R, E, A])
/** create a failed value */
def left[R, E, A](e: E)(implicit member: (E Either *) |= R): Eff[R, A] =
send[E Either *, R, A](Left(e))
/** create a correct value */
def right[R, E, A](a: A)(implicit member: (E Either *) |= R): Eff[R, A] =
send[E Either *, R, A](Right(a))
/** create an Either effect from a value possibly throwing an exception */
def fromCatchNonFatal[R, E, A](a: =>A)(onThrowable: Throwable => E)(implicit member: (E Either *) |= R): Eff[R, A] =
fromEither(Either.catchNonFatal(a).leftMap(onThrowable))
/** create an Either effect from a value possibly throwing a Throwable */
def catchNonFatalThrowable[R, A](a: =>A)(implicit member: (Throwable Either *) |= R): Eff[R, A] =
fromCatchNonFatal(a)(identity)
}
object EitherCreation extends EitherCreation
trait EitherInterpretation {
/** run the Either effect, yielding E Either A */
def runEither[R, U, E, A](effect: Eff[R, A])(implicit m: Member.Aux[(E Either *), R, U]): Eff[U, E Either A] =
interpretEither(effect)(cats.instances.either.catsStdInstancesForEither[E])
/** run the Either effect, yielding E Either A and combine all Es */
def runEitherCombine[R, U, E, A](effect: Eff[R, A])(implicit m: Member.Aux[(E Either *), R, U], s: Semigroup[E]): Eff[U, E Either A] =
interpretEither(effect)(EitherApplicative[E])
private def interpretEither[R, U, E, A](effect: Eff[R, A])(ap: Applicative[E Either *])(implicit m: Member.Aux[(E Either *), R, U]): Eff[U, E Either A] =
Interpret.recurse(effect)(eitherRecurser[U, E, A, E Either A](a => Right(a), e => EffMonad[U].pure(Left(e)))(ap))
/** catch possible left values */
def attemptEither[R, E, A](effect: Eff[R, A])(implicit member: (E Either *) /= R): Eff[R, E Either A] =
catchLeft[R, E, E Either A](effect.map(a => Either.right(a)))(e => pure(Either.left(e)))
/** catch and handle a possible left value */
def catchLeft[R, E, A](effect: Eff[R, A])(handle: E => Eff[R, A])(implicit member: (E Either *) /= R): Eff[R, A] =
catchLeftEither[R, E, A](effect)(handle)(cats.instances.either.catsStdInstancesForEither[E])
/** run the Either effect, handling E (with effects) and yielding A */
def runEitherCatchLeft[R, U, E, A](r: Eff[R, A])(handle: E => Eff[U, A])(implicit m: Member.Aux[(E Either *), R, U]): Eff[U, A] =
runEither(r).flatMap(_.fold(handle, pure))
/** catch and handle a possible left value. The value is the combination of all failures in case of an applicative */
def catchLeftCombine[R, E, A](effect: Eff[R, A])(handle: E => Eff[R, A])(implicit member: (E Either *) /= R, s: Semigroup[E]): Eff[R, A] =
catchLeftEither[R, E, A](effect)(handle)(EitherApplicative[E])
private def catchLeftEither[R, E, A](effect: Eff[R, A])(handle: E => Eff[R, A])(ap: Applicative[E Either *])(implicit member: (E Either *) /= R): Eff[R, A] =
Interpret.intercept(effect)(Interpreter.fromRecurser(eitherRecurser[R, E, A, A](a => a, handle)(ap)))
private def eitherRecurser[R, E, A, B](pureValue: A => B, handle: E => Eff[R, B])(ap: Applicative[E Either *]): Recurser[E Either *, R, A, B] =
new Recurser[E Either *, R, A, B] {
def onPure(a: A): B =
pureValue(a)
def onEffect[X](m: E Either X): X Either Eff[R, B] =
m match {
case Left(e) => Right(handle(e))
case Right(a) => Left(a)
}
def onApplicative[X, T[_]: Traverse](ms: T[E Either X]): T[X] Either (E Either T[X]) = {
implicit val eitherAp = ap
Right(ms.sequence)
}
}
/**
* Modify the type of the read value
*
* This changes the stack of the Eff computation
*/
def zoomEither[SR, BR, U1, U2, E1, E2, A](r: Eff[SR, A], getter: E1 => E2)(
implicit sr: Member.Aux[E1 Either *, SR, U1],
br: Member.Aux[E2 Either *, BR, U2],
into: IntoPoly[U1, U2]): Eff[BR, A] =
transform[SR, BR, U1, U2, E1 Either *, E2 Either *, A](r,
new ~>[E1 Either *, E2 Either *] {
def apply[X](r: E1 Either X): E2 Either X =
r.leftMap(getter)
})
/**
* Translate an error effect to another one in the same stack
* a computation over a "bigger" error (for the full application)
*/
def translateEither[R, U, E1, E2, A](r: Eff[R, A], getter: E1 => E2)
(implicit sr: Member.Aux[E1 Either *, R, U], br: (E2 Either *) |= U): Eff[U, A] =
translate(r) { new Translate[E1 Either *, U] {
def apply[X](ex: E1 Either X): Eff[U, X] =
ex match {
case Left(e1) => EitherEffect.left[U, E2, X](getter(e1))
case Right(x) => pure(x)
}
}}
/**
* Update the error value, the stack of the Eff computation stays the same
*/
def localEither[R, E, A](e: Eff[R, A])(modify: E => E)(implicit m: (E Either *) /= R): Eff[R, A] =
interceptNat(e)(new ~>[E Either *, E Either *] {
def apply[X](ex: E Either X): E Either X =
ex.leftMap(modify)
})
def EitherApplicative[E](implicit s: Semigroup[E]): Applicative[E Either *] = new Applicative[E Either *] {
def pure[A](a: A) = Right(a)
def ap[A, B](ff: E Either (A => B))(fa: E Either A): E Either B =
fa match {
case Right(a) => ff.map(_(a))
case Left(e1) => ff match {
case Right(_) => Left(e1)
case Left(e2) => Left(s.combine(e1, e2))
}
}
}
}
trait EitherImplicits {
implicit final def errorTranslate[R, E1, E2](implicit m: MemberIn[E1 Either *, R], map: E2 => E1): MemberIn[E2 Either *, R] =
m.transform(errorTranslateNat(map))
final def errorTranslateNat[E1, E2](map: E2 => E1): (E2 Either *) ~> (E1 Either *) = new ((E2 Either *) ~> (E1 Either *)) {
def apply[X](x2: E2 Either X): E1 Either X = x2.leftMap(map)
}
}
object EitherImplicits extends EitherImplicits
object EitherInterpretation extends EitherInterpretation
| atnos-org/eff-cats | shared/src/main/scala/org/atnos/eff/EitherEffect.scala | Scala | mit | 6,791 |
package org.modelfun
/**
*
*/
trait Parameters {
def apply(name: Symbol, default: Double = 0.0): Double
} | zzorn/modelfun | src/main/scala/org/modelfun/Parameters.scala | Scala | lgpl-3.0 | 115 |
//
// Codex - a multi-language code indexer and grokker
// http://github.com/samskivert/codex
package codex.extract
import java.io.File
import scala.xml.{Node, NodeSeq, XML}
import codex._
import codex.data.Depend
/** Utilities for interacting with Visual Studio/MonoDevelop .csproj files. */
object CSProj {
/** Models a reference to a DLL. */
case class Reference (name :String, file :Option[File]) {
/** Converts this reference to a Codex dependency. */
def toDepend (forTest :Boolean) = {
// TODO: have the project give us hints as to where to look for system DLLs?
val dll = (file orElse Dll.find(name))
val version = dll map(Monodis.assemblyInfo) map(_.version) getOrElse("0.0.0.0")
Depend(name, name, version, "dll", forTest, dll map(_.getAbsolutePath))
}
}
/** Contains info extracted from a .csproj file. */
case class Info (rootNamespace :String, assemblyName :String, version :String,
refs :Seq[Reference], sources :Seq[File])
/** Extracts info from the supplied .csproj file. */
def parse (csproj :File) :Info = {
def toFile (node :Node) = file(csproj.getParentFile, node.text.split("""\\\\""") :_*)
val xml = XML.loadFile(csproj)
Info(text(xml \\ "PropertyGroup" \\ "RootNamespace"),
text(xml \\ "PropertyGroup" \\ "AssemblyName"),
"0.0.0.0", // TODO
xml \\ "ItemGroup" \\ "Reference" flatMap { ref =>
ref \\ "@Include" map(dll => Reference(dll.text.trim, ref \\ "HintPath" map(toFile) match {
case Seq() => None
case Seq(file, _*) => Some(file)
}))
},
xml \\ "ItemGroup" \\ "Compile" \\\\ "@Include" map(toFile))
}
private def text (nodes :NodeSeq) = nodes.headOption map(_.text.trim) getOrElse("missing")
}
| samskivert/codex | src/main/scala/codex/extract/CSProj.scala | Scala | bsd-3-clause | 1,794 |
package tuner.gui
import akka.actor.Actor
import akka.actor.Actor._
import scala.swing.BoxPanel
import scala.swing.Button
import scala.swing.CheckBox
import scala.swing.Dialog
import scala.swing.Label
import scala.swing.Orientation
import scala.swing.ProgressBar
import scala.swing.event.ButtonClicked
import scala.swing.event.UIElementResized
import tuner.ConsoleLine
import tuner.Progress
import tuner.ProgressComplete
import tuner.ProgressWarning
import tuner.project.InProgress
import tuner.project.Project
import tuner.project.Saved
class SamplingProgressBar(project:InProgress) extends Window(project) {
//modal = true
//width = 800
//height = 75
var errors = false
val progressBar = new ProgressBar {
min = 0
}
val alwaysBackgroundCheckbox = new CheckBox("Always Background") {
selected = project.buildInBackground
enabled = false
}
val backgroundButton = new Button("Background")
backgroundButton.enabled = false
val stopButton = new Button("Stop")
val progressLabel = new Label {
text = " "
}
val console = new HideableConsole
listenTo(project)
listenTo(backgroundButton)
listenTo(stopButton)
listenTo(console)
contents = new BoxPanel(Orientation.Vertical) {
contents += new BoxPanel(Orientation.Horizontal) {
contents += new BoxPanel(Orientation.Vertical) {
contents += progressBar
contents += progressLabel
}
contents += alwaysBackgroundCheckbox
contents += backgroundButton
contents += stopButton
}
contents += console
}
reactions += {
case ButtonClicked(`backgroundButton`) =>
project.buildInBackground = alwaysBackgroundCheckbox.selected
project match {
case s:Saved => s.save
case _ =>
}
dispose
case ButtonClicked(`stopButton`) =>
//project.stop
dispose
case UIElementResized(_) =>
this.pack
case Progress(currentTime, totalTime, msg, ok) =>
updateProgress(currentTime, totalTime, msg, ok)
case ProgressWarning(msg) =>
Dialog.showMessage(contents.head, msg, "Build warning",
Dialog.Message.Warning)
case ConsoleLine(line) =>
console.text += line
console.text += "\n"
case ProgressComplete =>
if(!errors) dispose
}
this.pack
def updateProgress(cur:Int, max:Int, msg:String, ok:Boolean) = {
if(ok) {
progressLabel.foreground = java.awt.Color.black
progressLabel.text = msg
if(max > 0) {
progressBar.indeterminate = false
progressBar.max = max
progressBar.value = cur
} else {
progressBar.indeterminate = true
}
} else {
errors = true
progressLabel.foreground = java.awt.Color.red
progressLabel.text = "Error: " + msg
console.text += msg + "\n"
progressBar.indeterminate = true
}
this.pack
}
}
| gabysbrain/tuner | src/main/scala/tuner/gui/SamplingProgressBar.scala | Scala | mit | 2,905 |
package kipsigman.domain.entity
import play.api.data.validation.ValidationError
import play.api.libs.json._
abstract class Status(val name: String) {
override def toString: String = name
}
object Status {
// Content
case object Deleted extends Status("deleted")
case object Draft extends Status("draft")
case object Public extends Status("public")
case object Unlisted extends Status("unlisted")
// Reviewed entity
case object Approved extends Status("approved")
case object Pending extends Status("pending")
case object Rejected extends Status("rejected")
val all: Set[Status] = Set(Deleted, Draft, Public, Unlisted, Approved, Pending, Rejected)
val activeValues: Set[Status] = Set(Draft, Public, Unlisted, Approved, Pending)
val publishValues: Set[Status] = Set(Public, Unlisted)
def apply(name: String): Status = {
all.find(s => s.name == name) match {
case Some(status) => status
case None => throw new IllegalArgumentException(s"Invalid Status: $name")
}
}
implicit val reads: Reads[Status] = new Reads[Status] {
def reads(json: JsValue) = json match {
case JsString(s) => JsSuccess(Status(s))
case _ => JsError(Seq(JsPath() -> Seq(ValidationError("error.expected.jsstring"))))
}
}
implicit val writes: Writes[Status] = new Writes[Status] {
def writes(status: Status) = JsString(status.name)
}
} | kipsigman/scala-domain-model | src/main/scala/kipsigman/domain/entity/Status.scala | Scala | apache-2.0 | 1,400 |
/**
* Copyright (c) 2013, The National Archives <[email protected]>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.csv.validator.schema.v1_0
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import uk.gov.nationalarchives.csv.validator.metadata.{Cell, Row}
import uk.gov.nationalarchives.csv.validator.schema.{ColumnDefinition, NamedColumnIdentifier, Schema, TotalColumns}
import scalaz.{Failure, Success}
@RunWith(classOf[JUnitRunner])
class NotEmptyRuleSpec extends Specification {
val globalDirsOne = List(TotalColumns(1))
"NotEmptyRule" should {
"File if cell is empty" in {
val notEmptyRule = NotEmptyRule()
notEmptyRule.evaluate(0, Row(List(Cell("")), 1), Schema(globalDirsOne, List(ColumnDefinition(NamedColumnIdentifier("column1"))))) must beLike {
case Failure(messages) => messages.head mustEqual """notEmpty fails for line: 1, column: column1, value: """""
}
}
"Succeed if cell is NOT empty" in {
val notEmptyRule = NotEmptyRule()
notEmptyRule.evaluate(0, Row(List(Cell("something")), 1), Schema(globalDirsOne, List(ColumnDefinition(NamedColumnIdentifier("column1"))))) mustEqual Success(true)
}
}
}
| valydia/csv-validator | csv-validator-core/src/test/scala/uk/gov/nationalarchives/csv/validator/schema/v1_0/NotEmptyRuleSpec.scala | Scala | mpl-2.0 | 1,513 |
package scroll.tests.parameterized
import scroll.internal.dispatch.DispatchQuery
import scroll.internal.dispatch.DispatchQuery._
import scroll.tests.mocks.CompartmentUnderTest
import scroll.tests.mocks.CoreA
class RoleSortingTest extends AbstractParameterizedSCROLLTest {
test("Adding roles and sorting them") {
forAll(PARAMS) { (c: Boolean, cc: Boolean) =>
val someCore = new CoreA()
new CompartmentUnderTest(c, cc) {
case class SomeRoleA() {
def method(): String = "A"
}
case class SomeRoleB() {
def method(): String = "B"
}
case class SomeRoleC() {
def method(): String = "C"
}
val roleA = SomeRoleA()
val roleB = SomeRoleB()
val roleC = SomeRoleC()
someCore play roleA
someCore play roleB
someCore play roleC
{
given DispatchQuery = DispatchQuery()
val r1: String = (+someCore).method()
r1 shouldBe "C"
}
{
given DispatchQuery = DispatchQuery().sortedWith(reverse)
val r2: String = (+someCore).method()
r2 shouldBe "A"
}
{
given DispatchQuery =
DispatchQuery().sortedWith { case (_: SomeRoleB, _: SomeRoleC) =>
swap
}
val r3: String = (+someCore).method()
r3 shouldBe "B"
}
{
given DispatchQuery =
Bypassing(_.isInstanceOf[SomeRoleA]).sortedWith { case (_: SomeRoleB, _: SomeRoleC) =>
swap
}
val r4: String = (+someCore).method()
r4 shouldBe "B"
}
}
}
}
class SomeCore {
def method(): String = "Core"
}
test("Adding roles with cyclic calls and sorting them") {
forAll(PARAMS) { (c: Boolean, cc: Boolean) =>
val someCore = new SomeCore()
new CompartmentUnderTest(c, cc) {
case class SomeRoleA() {
def method(): String = {
given DispatchQuery = Bypassing(_.isInstanceOf[SomeRoleA])
(+this).method()
}
}
case class SomeRoleB() {
def method(): String = {
given DispatchQuery = DispatchQuery().sortedWith(reverse)
(+this).method()
}
}
val roleA = SomeRoleA()
val roleB = SomeRoleB()
someCore play roleA
someCore play roleB
val r1: String = (+someCore).method()
r1 shouldBe "Core"
}
}
}
}
| max-leuthaeuser/SCROLL | tests/src/test/scala/scroll/tests/parameterized/RoleSortingTest.scala | Scala | lgpl-3.0 | 2,537 |
import scalaz.{ \\/, Monoid, StateT }
import shapeless._
import ops.hlist.{Prepend, RightFolder, Init, Last, Length, Split}
import scodec.bits._
/**
* Combinator library for working with binary data.
*
* The primary abstraction of this library is [[Codec]], which provides the ability to encode/decode values to/from binary.
*
* There are more general abstractions though, such as [[Encoder]] and [[Decoder]]. There's also [[GenCodec]] which extends
* both `Encoder` and `Decoder` but allows the types to vary. Given these more general abstractions, a `Codec[A]` can be
* represented as a `GenCodec[A, A]`.
*
* The more general abstractions are important because they allow operations on codecs that would not otherwise be possible.
* For example, given a `Codec[A]`, mapping a function `A => B` over the codec yields a `GenCodec[A, B]`. Without the
* more general abstractions, `map` is impossible to define (e.g., how would `codec.map(f).encode(b)` be implemented?).
* Given a `GenCodec[A, B]`, the encoding functionality can be ignored by treating it as a `Decoder[B]`, or the encoding
* type can be changed via `contramap`. If after further transformations, the two types to `GenCodec` are equal, we can
* reconstitue a `Codec` from the `GenCodec` by calling `fuse`.
*
* See the [[codecs]] package object for pre-defined codecs for many common data types and combinators for building larger
* codecs out of smaller ones.
*
* For the categorically minded, note the following:
* - `Decoder` is a monad
* - `Encoder` is a contravariant functor
* - `GenCodec` is a profunctor
* - `Codec` is an invariant functor
*
* Each type has the corresponding Scalaz typeclass defined in its companion object.
*/
package object scodec {
/** Alias for state/either transformer that simplifies calling decode on a series of codecs, wiring the remaining bit vector of each in to the next entry. */
type DecodingContext[A] = StateT[({type ฮป[a] = String \\/ a})#ฮป, BitVector, A]
implicit val bitVectorMonoidInstance: Monoid[BitVector] = Monoid.instance(_ ++ _, BitVector.empty)
implicit val byteVectorMonoidInstance: Monoid[ByteVector] = Monoid.instance(_ ++ _, ByteVector.empty)
/** Provides common operations on a `Codec[HList]`. */
final implicit class HListCodecEnrichedWithHListSupport[L <: HList](val self: Codec[L]) extends AnyVal {
import codecs.HListCodec
/**
* When called on a `Codec[L]` for some `L <: HList`, returns a new codec representing `Codec[B :: L]`.
* That is, this operator is a codec-level `HList` prepend operation.
* @param codec codec to prepend
* @group hlist
*/
def ::[B](codec: Codec[B]): Codec[B :: L] = HListCodec.prepend(codec, self)
/**
* When called on a `Codec[L]` for some `L <: HList`, returns a new codec that encodes/decodes
* `B :: L` but only returns `L`. HList equivalent of `~>`.
* @group hlist
*/
def :~>:[B](codec: Codec[B])(implicit ev: Unit =:= B): Codec[L] = codec.dropLeft(self)
/**
* When called on a `Codec[L]` for some `L <: HList`, returns a new codec that encodes/decodes
* the `HList L` followed by a `B`.
* That is, this operator is a codec-level `HList` append operation.
* @group hlist
*/
def :+[B, LB <: HList](codec: Codec[B])(implicit
prepend: Prepend.Aux[L, B :: HNil, LB],
init: Init.Aux[LB, L],
last: Last.Aux[LB, B]
): Codec[LB] = HListCodec.append(self, codec)
/**
* When called on a `Codec[L]` for some `L <: HList`, returns a new codec the encodes/decodes
* the `HList K` followed by the `HList L`.
* @group hlist
*/
def :::[K <: HList, KL <: HList, KLen <: Nat](k: Codec[K])(implicit
prepend: Prepend.Aux[K, L, KL],
lengthK: Length.Aux[K, KLen],
split: Split.Aux[KL, KLen, (K, L)]
): Codec[KL] = HListCodec.concat(k, self)
}
/** Provides `HList` related syntax for codecs of any type. */
final implicit class ValueCodecEnrichedWithHListSupport[A](val self: Codec[A]) extends AnyVal {
import codecs.HListCodec
/**
* When called on a `Codec[A]` where `A` is not a subytpe of `HList`, creates a new codec that encodes/decodes an `HList` of `B :: A :: HNil`.
* For example, {{{uint8 :: utf8}}} has type `Codec[Int :: String :: HNil]`.
* @group hlist
*/
def ::[B](codecB: Codec[B]): Codec[B :: A :: HNil] =
codecB :: self :: HListCodec.hnilCodec
/**
* Creates a new codec that encodes/decodes an `HList` type of `A :: L` given a function `A => Codec[L]`.
* This allows later parts of an `HList` codec to be dependent on earlier values.
* @group hlist
*/
def flatPrepend[L <: HList](f: A => Codec[L]): Codec[A :: L] = HListCodec.flatPrepend(self, f)
/**
* Creates a new codec that encodes/decodes an `HList` type of `A :: L` given a function `A => Codec[L]`.
* This allows later parts of an `HList` codec to be dependent on earlier values.
* Operator alias for `flatPrepend`.
* @group hlist
*/
def >>:~[L <: HList](f: A => Codec[L]): Codec[A :: L] = flatPrepend(f)
/**
* Creates a new codec that encodes/decodes an `HList` type of `A :: B :: HNil` given a function `A => Codec[B]`.
* If `B` is an `HList` type, consider using `flatPrepend` instead, which avoids nested `HLists`.
* This is the direct `HList` equivalent of `flatZip`.
* @group hlist
*/
def flatZipHList[B](f: A => Codec[B]): Codec[A :: B :: HNil] = flatPrepend(f andThen (_.hlist))
}
}
| ceedubs/scodec | src/main/scala/scodec/package.scala | Scala | bsd-3-clause | 5,565 |
package uk.org.nbn.nbnv.importer.darwin
import uk.org.nbn.nbnv.importer.darwin.NbnFields._
import org.gbif.dwc.text.StarRecord
import scala.collection.JavaConversions._
class StarRecordExtensions(starRecord: StarRecord) {
//Maps NBN Field ot DWC Term
def getCoreField(fieldName: NbnFields) = {
FieldMaps.coreFieldMap.get(fieldName) match {
case Some(term) => starRecord.core.value(term)
case None => throw new Exception("Unmapped core term")
}
}
def getExtensionField(fieldName: NbnFields) = {
FieldMaps.extensionFieldMap.get(fieldName) match {
case Some(term) => {
val extension = starRecord.extension("http://rs.nbn.org.uk/dwc/nxf/0.1/terms/nxfOccurrence").head
extension.value(FieldMaps.extensionTermUri + term)
}
case None => throw new Exception("Unmapped extension term")
}
}
}
| JNCC-dev-team/nbn-importer | importer/src/main/scala/uk/org/nbn/nbnv/importer/darwin/StarRecordExtensions.scala | Scala | apache-2.0 | 864 |
package im.actor.server.presences
import akka.actor.PoisonPill
import akka.testkit.TestProbe
import akka.util.Timeout
import im.actor.server.ActorSuite
import im.actor.server.db.DbExtension
import org.scalatest.time.{ Seconds, Span }
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
class GroupPresenceManagerSpec extends ActorSuite {
behavior of "GroupPresenceManager"
it should "subscribe/unsubscribe to group presences" in e1
it should "not consider presence change on second device online when first is online" in e2
it should "not consider presence change on second device offline when first is online" in e3
implicit val ec: ExecutionContext = system.dispatcher
override implicit val patienceConfig = PatienceConfig(timeout = Span(5, Seconds))
implicit val timeout: Timeout = Timeout(5.seconds)
DbExtension(system).clean()
DbExtension(system).migrate()
implicit val presenceExt = PresenceExtension(system)
implicit val groupPresenceExt = GroupPresenceExtension(system)
def e1() = {
val userId = scala.util.Random.nextInt
val groupId = scala.util.Random.nextInt
val probe = TestProbe()
whenReady(groupPresenceExt.subscribe(groupId, probe.ref)) { _ โ }
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
groupPresenceExt.notifyGroupUserAdded(groupId, userId)
presenceExt.presenceSetOnline(userId, 1L, 1000)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId โ
}
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
whenReady(groupPresenceExt.unsubscribe(groupId, probe.ref)) { _ โ }
probe.expectNoMsg()
probe.ref ! PoisonPill
}
def e2() = {
val userId = scala.util.Random.nextInt
val groupId = scala.util.Random.nextInt
val probe = TestProbe()
groupPresenceExt.notifyGroupUserAdded(groupId, userId)
whenReady(groupPresenceExt.subscribe(groupId, probe.ref)) { _ โ }
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
presenceExt.presenceSetOnline(userId, 1L, 300)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId โ
}
presenceExt.presenceSetOnline(userId, 2L, 600)
probe.expectNoMsg(400.millis)
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
}
def e3() = {
val userId = scala.util.Random.nextInt
val groupId = scala.util.Random.nextInt
val probe = TestProbe()
groupPresenceExt.notifyGroupUserAdded(groupId, userId)
whenReady(groupPresenceExt.subscribe(groupId, probe.ref)) { _ โ }
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
presenceExt.presenceSetOnline(userId, 1L, 300)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId โ
}
presenceExt.presenceSetOnline(userId, 2L, 300)
presenceExt.presenceSetOffline(userId, 2L, 300)
// should not consuder user offline as the first device is still online
probe.expectNoMsg(200.millis)
// finally consider user offline as first device's online is timed out
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId โ
}
}
}
| actorapp/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/presences/GroupPresenceManagerSpec.scala | Scala | agpl-3.0 | 3,301 |
package org.shapelogic.sc.imageprocessing
import org.shapelogic.sc.util.Constants
import org.shapelogic.sc.polygon.CPointInt
import org.shapelogic.sc.image.BufferImage
/**
* LineVectorizer is a vectorizer using short line of default length 5.
*
* You do a sequence of small lines, if 2 consecutive lines are close in direction you can merge them.
* But if they are far away in angle you create a new line.
* Also you can only have 2 direction within the same line, they are stored by cycle index.
*
* @author Sami Badawi
*
*/
class LineVectorizer(imageIn: BufferImage[Byte]) extends ShortLineBasedVectorizer(imageIn) {
/**
* Test that the current direction is close to the last direction.
*/
def multiLineHasGlobalFitness(): Boolean = {
//do big test
if (_pointsInCurrentShortLine >= _maxPointsInShortLine) {
_currentVectorDirection = _currentPoint.copy().minus(_startOfShortLinePoint).asInstanceOf[CPointInt]
val currentAngel: Double = _currentVectorDirection.angle()
if (!currentAngel.isNaN)
_currentCircleInterval.addClosestAngle(currentAngel)
newShortLine()
return currentAngel.isNaN || _currentCircleInterval.intervalLength() < _angleLimit
}
if (_currentDirection != _firstUsedDirection) {
if (_secondUsedDirection == Constants.DIRECTION_NOT_USED) {
_secondUsedDirection = _currentDirection
} else if (_currentDirection != _secondUsedDirection) {
return false
}
}
true
}
}
| sami-badawi/shapelogic-scala | src/main/scala/org/shapelogic/sc/imageprocessing/LineVectorizer.scala | Scala | mit | 1,499 |
package mr.merc.map.terrain
import mr.merc.image.MImage
import mr.merc.util.MercUtils
import TerrainKind._
import mr.merc.map.objects.view.{Keep, Simple, WallStructure}
object TerrainType {
def helperTypesList:List[TerrainType] = List(BankInside, BankOutside)
case object GreenGrass extends TerrainType("green", GrassKind)
case object DryGrass extends TerrainType("dry", GrassKind)
case object SemidryGrass extends TerrainType("semidry", GrassKind)
case object LeafLitter extends TerrainType(name="leafLitter", GrassKind)
case object Farm extends TerrainType("farm", GrassKind, belowTerrainType = Some(LeafLitter))
case object ShallowWater extends TerrainType("water", WaterKind)
case object OceanWater extends TerrainType("ocean", WaterKind)
case object BasicMountain extends TerrainType("mountain", MountainKind,belowTerrainType = Some(BasicHill))
case object BasicMountainSnow extends TerrainType("mountainSnow", MountainKind,belowTerrainType = Some(BasicHillSnow))
case object DesertSand extends TerrainType("sand", SandKind)
case object BeachSand extends TerrainType("beach", SandKind)
case object Mud extends TerrainType("mud", SandKind)
case object BasicHill extends TerrainType("hill", HillKind)
case object BasicHillSnow extends TerrainType("hillSnow", HillKind)
abstract class RoadTerrainType(name: String) extends TerrainType(name, RoadKind)
case object CleanRoad extends RoadTerrainType("cleanRoad")
case object OldRoad extends RoadTerrainType("oldRoad")
case object GrassyRoad extends RoadTerrainType("grassyRoad")
case object DecForestSpring extends TerrainType("decForest", ForestKind, belowTerrainType = Some(GreenGrass))
case object DecForestSummer extends TerrainType("decForest", ForestKind, belowTerrainType = Some(SemidryGrass))
case object DecForestFall extends TerrainType("decForestFall", ForestKind, belowTerrainType = Some(DryGrass))
case object DecForestWinter extends TerrainType("decForestWinter", ForestKind, belowTerrainType = Some(Snow))
case object PineForest extends TerrainType("pineForest", ForestKind, belowTerrainType = Some(GreenGrass))
case object MixedForest extends TerrainType("mixedForest", ForestKind, belowTerrainType = Some(GreenGrass))
case object Snow extends TerrainType("snow", SnowKind)
case object Ice extends TerrainType("ice", IceKind)
case object SwampWater extends TerrainType("swampWater", WaterKind)
case object Swamp extends TerrainType("swampReed", SwampKind, belowTerrainType = Some(SwampWater))
abstract class CityCastle(name: String, centerTile: String) extends TerrainType(name, WallsKind) {
override lazy val imagePaths: Vector[MImage] = {
Vector(s"/images/terrain/$name/$centerTile.png").map(MImage.apply)
}
def structureName(wallStructure:WallStructure): String = wallStructure match {
case Simple => "castle"
case Keep => "keep"
}
}
case object HumanCastle extends CityCastle("humanCastle", "cobbles-keep")
case object HumanCastleSnow extends CityCastle("humanCastleSnow", "cobbles")
case object ElvenCastle extends CityCastle("elvenCastle", "keepClean")
case object ElvenCastleRuin extends CityCastle("elvenCastleRuin", "keepClean")
case object OrcishCastle extends CityCastle("orcishCastle", "keep") {
override def structureName(wallStructure: WallStructure): String = wallStructure match {
case Simple => "fort"
case Keep => "keep"
}
}
case object OrcishCastleSnow extends CityCastle("orcishCastleSnow", "keep") {
override def structureName(wallStructure: WallStructure): String = wallStructure match {
case Simple => "fort"
case Keep => "keep"
}
}
case object SandCastle extends CityCastle("sandCastle", "keepClean")
case object SandCastleRuin extends CityCastle("sandCastleRuin", "keepClean") {
override def structureName(wallStructure:WallStructure): String = wallStructure match {
case Simple => "ruin-castle"
case Keep => "ruin-keep"
}
}
case object TrollCastle extends CityCastle("trollCastle", "mud") {
override def structureName(wallStructure:WallStructure): String = wallStructure match {
case Simple => "regular"
case Keep => "keep"
}
}
case object DwarfCastle extends CityCastle("dwarfCastle", "dwarven-castle-floor") {
override def structureName(wallStructure:WallStructure): String = wallStructure match {
case Simple => "dwarven-castle"
case Keep => "dwarven-keep"
}
}
case object CampCastle extends CityCastle("campCastle", "keep") {
override def structureName(wallStructure: WallStructure): String = wallStructure match {
case Simple => "regular"
case Keep => "tall-keep"
}
}
case object CampCastleSnow extends CityCastle("campCastleSnow", "keep") {
override def structureName(wallStructure: WallStructure): String = wallStructure match {
case Simple => "regular"
case Keep => "tall-keep"
}
}
// helper types
case object BankInside extends TerrainType("bankInside", WaterKind)
case object BankOutside extends TerrainType("bankOutside", WaterKind)
// THIS TYPES ARE FORBIDDEN TO USE ON MAP
case object Empty extends TerrainType("void", EmptyKind) {
}
}
abstract sealed class TerrainType(val name: String, val kind:TerrainKind, val belowTerrainType:Option[TerrainType] = None) {
lazy val imagePaths:Vector[MImage] = {
val result = LazyList.from(1).map { i =>
val path = s"/images/terrain/$name/$i.png"
Option(getClass.getResource(path)).map(_ => path)
}.takeWhile(_.nonEmpty).flatten.toVector
require(result.nonEmpty, s"Failed to load images for map object $name")
result.map(MImage.apply)
}
def image(x: Int, y: Int):MImage = {
val i = MercUtils.stablePseudoRandomIndex(x, y, imagePaths.size)
imagePaths(i)
}
def is(kind:TerrainKind):Boolean = kind == this.kind
def isNot(kind:TerrainKind):Boolean = !is(kind)
def isOneOf(kinds:TerrainKind*):Boolean = kinds.exists(is)
def isNotOneOf(kinds:TerrainKind*):Boolean = !isOneOf(kinds:_*)
}
object TerrainKind {
case object GrassKind extends TerrainKind
case object WaterKind extends TerrainKind
case object MountainKind extends TerrainKind
case object SandKind extends TerrainKind
case object HillKind extends TerrainKind
case object RoadKind extends TerrainKind
case object ForestKind extends TerrainKind
case object WallsKind extends TerrainKind
case object SwampKind extends TerrainKind
case object SnowKind extends TerrainKind
case object IceKind extends TerrainKind
case object EmptyKind extends TerrainKind
}
sealed abstract class TerrainKind
| RenualdMarch/merc | src/main/scala/mr/merc/map/terrain/TerrainType.scala | Scala | gpl-3.0 | 6,682 |
package org.nlogo.extensions.webview
import java.io.File
import java.net.URL
import javafx.embed.swing.JFXPanel
class WebViewStateManager(bridge: JavascriptBridge, browserFactory: WebViewFactory = JavaFXWebView) {
val _ = new JFXPanel()
var container: Option[Container] = None
val webView = browserFactory.browser(bridge)
def close(): Unit = {
container.foreach(_.close())
container = None
}
def executeJS(s: String) = {
if (container.isEmpty)
throw new IllegalStateException("cannot eval js before opening webview")
webView.executeJS(s)
}
def executeJSForResult(js: String): AnyRef = {
if (container.isEmpty)
throw new IllegalStateException("cannot eval js before opening webview")
webView.executeJSForResult(js)
}
def addModule(name: String, obj: AnyRef): Unit = {
webView.bind(name, obj, true)
}
def load(url: URL): Unit = webView.load(url)
def reload(): Unit = webView.reload()
def frame(frameContainer: Container, doc: URL) =
openContainer(frameContainer, doc, "frame")
def tab(tabContainer: Container, doc: URL) =
openContainer(tabContainer, doc, "tab")
protected def openContainer(newContainer: Container, doc: URL, containerName: String) = {
if (container.nonEmpty)
throw new IllegalStateException("webview window already open")
else {
webView.load(doc)
newContainer.show(webView)
container = Some(newContainer)
}
}
}
| NetLogo/webview | src/main/scala/WebViewStateManager.scala | Scala | cc0-1.0 | 1,484 |
/*
* Copyright 2017 Workday, Inc.
*
* This software is available under the MIT license.
* Please see the LICENSE.txt file in this project.
*/
package com.workday.esclient.actions
import io.searchbox.action.{AbstractMultiTypeActionBuilder, GenericResultAbstractAction}
/**
* Build for [[com.workday.esclient.actions.SnapshotDeleteAction]].
* @param repository String repository name.
* @param name String snapshot name.
*/
class SnapshotDeleteBuilder(repository: String, name: String)
extends AbstractMultiTypeActionBuilder[SnapshotDeleteAction, SnapshotDeleteBuilder] {
var indexList : Seq[String] = Nil
val snapshotRepository = repository
val snapName = name
/**
* Builds [[com.workday.esclient.actions.SnapshotDeleteAction]].
* @return [[com.workday.esclient.actions.SnapshotDeleteAction]].
*/
override def build: SnapshotDeleteAction = new SnapshotDeleteAction(this)
}
/**
* Action class for deleting snapshots using the Elasticsearch Snapshot API.
* @param builder [[com.workday.esclient.actions.SnapshotDeleteBuilder]].
*/
class SnapshotDeleteAction(builder: SnapshotDeleteBuilder) extends GenericResultAbstractAction(builder) {
val repository = builder.snapshotRepository
val snapName = builder.snapName
setURI(buildURI)
/**
* Gets REST method name.
* @return String "DELETE".
*/
override def getRestMethodName: String = "DELETE"
/**
* Builds the URI for hitting the Delete Snapshot API.
* @return String URI.
*/
protected override def buildURI: String = s"_snapshot/$repository/$snapName"
}
| Workday/escalar | src/main/scala/com/workday/esclient/actions/SnapshotDeleteAction.scala | Scala | mit | 1,586 |
package co.blocke.scalajack
package typeadapter
import model._
import scala.collection.mutable
import co.blocke.scala_reflection._
import co.blocke.scala_reflection.impl.Clazzes._
import co.blocke.scala_reflection.info._
object SealedTraitTypeAdapterFactory extends TypeAdapterFactory:
def matches(concrete: RType): Boolean =
concrete match {
case _: SealedTraitInfo => true
case _ => false
}
def makeTypeAdapter(concrete: RType)(implicit taCache: TypeAdapterCache): TypeAdapter[_] =
if concrete.asInstanceOf[SealedTraitInfo].children.head.isInstanceOf[ObjectInfo] then
CaseObjectTypeAdapter(
concrete,
concrete.asInstanceOf[SealedTraitInfo].children.map(_.asInstanceOf[ObjectInfo].infoClass.getSimpleName).toList)
else
val typeAdapters = concrete.asInstanceOf[SealedTraitInfo].children.map(c => c -> taCache.typeAdapterOf(c)).toMap
SealedTraitTypeAdapter(taCache.jackFlavor, concrete, typeAdapters)
case class SealedTraitTypeAdapter[T](
jackFlavor: JackFlavor[_],
info: RType,
typeAdapters: Map[RType, TypeAdapter[_]]
) extends TypeAdapter[T]:
val sealedInfo = info.asInstanceOf[SealedTraitInfo]
def read(parser: Parser): T =
val savedReader = parser.mark()
if (parser.peekForNull)
null.asInstanceOf[T]
else {
val readFieldNames = parser.expectMap[String, Any, Map[String, Any]](
jackFlavor.stringTypeAdapter,
jackFlavor.anyTypeAdapter,
Map.newBuilder[String,Any]
).keySet
sealedInfo.children.filter(
_.asInstanceOf[ScalaCaseClassInfo].fields.map(_.name).toSet.intersect(readFieldNames).size == readFieldNames.size
) match {
case setOfOne if setOfOne.size == 1 =>
parser.revertToMark(savedReader)
typeAdapters(setOfOne.head).read(parser).asInstanceOf[T]
case emptySet if emptySet.isEmpty =>
parser.backspace()
throw new ScalaJackError(
parser.showError(
s"No sub-classes of ${info.name} match field names $readFieldNames"
)
)
case _ =>
// $COVERAGE-OFF$Should be impossible--here for safety. Something to trigger this would be ambiguous and would then be detected as a WrappedSealedTraitTypeAdapter, not here.
parser.backspace()
throw new ScalaJackError(
parser.showError(
s"Multiple sub-classes of ${info.name} match field names $readFieldNames"
)
)
// $COVERAGE-ON$
}
}
def write[WIRE](
t: T,
writer: Writer[WIRE],
out: mutable.Builder[WIRE, WIRE]): Unit =
t match {
case null => writer.writeString(null, out)
case _ =>
sealedInfo.children.find(f => t.getClass <:< f.infoClass) match {
case Some(implementation) => typeAdapters(implementation).asInstanceOf[TypeAdapter[T]]write(t, writer, out)
// $COVERAGE-OFF$Should be impossible, but including here for safety. Can't think of how to actaully trigger this for testing.
case None =>
throw new IllegalStateException(
s"Given object ($t) doesn't seem to be a sealed trait."
)
// $COVERAGE-ON$
}
}
case class CaseObjectTypeAdapter[T](
info: RType,
values: List[String]
) extends TypeAdapter[T]:
val sealedInfo = info.asInstanceOf[SealedTraitInfo]
def read(parser: Parser): T =
parser.expectString() match {
case null => null.asInstanceOf[T]
case s: String if values.contains(s) =>
val simpleNameLen = info.infoClass.getSimpleName.length+1
val clazz = Class.forName(info.infoClass.getName.dropRight(simpleNameLen) + "." + s + "$")
val objInstance = clazz.getField("MODULE$").get(null).asInstanceOf[T]
objInstance
case x =>
parser.backspace()
throw new ScalaJackError(parser.showError(s"Expected a valid subclass of ${info.name} but got $x")
)
}
def write[WIRE](
t: T,
writer: Writer[WIRE],
out: mutable.Builder[WIRE, WIRE]): Unit =
t match {
case null => writer.writeString(null, out)
case _ => writer.writeString(t.toString, out)
} | gzoller/ScalaJack | core/src/main/scala/co.blocke.scalajack/typeadapter/SealedTraitTypeAdapter.scala | Scala | mit | 4,396 |
package com.neilconcepts.battlespace.domain
import java.util.UUID
import com.neilconcepts.battlespace.domain.bst.Player
import org.scalatest.{ Matchers, WordSpec }
class BSTSpec extends WordSpec with Matchers {
"BattleSpaceTypes" when {
"generating a player" should {
"create a unique player" in {
val idStr = "7a68a829-af66-4f7e-9b34-03d9c9f79a3f"
val id = UUID.fromString(idStr)
val player = Player(id)
player.id should be(id)
}
}
}
}
| bneil/battlespace | src/test/scala/com/neilconcepts/battlespace/domain/BSTSpec.scala | Scala | mit | 497 |
package temportalist.esotericraft.emulation.common.ability
import net.minecraft.entity.EntityLivingBase
import net.minecraft.nbt.NBTTagByte
import temportalist.esotericraft.api.emulation.IAbility
import temportalist.esotericraft.api.emulation.IAbility.Ability
import temportalist.esotericraft.api.emulation.ability.IAbilityFloat
import temportalist.esotericraft.galvanization.common.Galvanize
/**
*
* Created by TheTemportalist on 5/18/2016.
*
* @author TheTemportalist
*/
@Ability(id = "float")
class AbilityFloat extends AbilityBase[NBTTagByte] with IAbilityFloat {
private var terminalVelocity = -1000D
private var negateFallDistance = false
// ~~~~~ Naming
override def getName: String = "Float"
// ~~~~~ Map Parsing
override def parseMappingArguments(args: Array[AnyRef], entry: String): Unit = {
try {
this.terminalVelocity = args(0).toString.toLowerCase.toDouble
this.negateFallDistance = args(1).toString.toLowerCase.toBoolean
}
catch {
case e: Exception =>
Galvanize.log("[AbilityFloat] Error parsing mapping arguments.")
e.printStackTrace()
}
}
override def encodeMappingArguments(): Array[String] = {
Array[String](
this.terminalVelocity + "D",
if (this.negateFallDistance) "true" else "false"
)
}
// ~~~~~ Entity Handling
override def onUpdate(entity: EntityLivingBase): Unit = {
if (!this.isFlying(entity)) {
if (entity.motionY < this.terminalVelocity)
entity.motionY = this.terminalVelocity
if (this.negateFallDistance)
entity.fallDistance = 0F
}
}
}
| TheTemportalist/EsoTeriCraft | src/main/scala/temportalist/esotericraft/emulation/common/ability/AbilityFloat.scala | Scala | apache-2.0 | 1,553 |
package uk.ac.surrey.xw.extension.prim
import org.nlogo.api.Argument
import org.nlogo.api.Context
import org.nlogo.api.Reporter
import org.nlogo.core.Syntax.StringType
import org.nlogo.core.Syntax.reporterSyntax
trait SimpleStringReporter extends Reporter {
override def getSyntax = reporterSyntax(ret = StringType)
val string: String
def report(args: Array[Argument], context: Context): AnyRef = string
} | CRESS-Surrey/eXtraWidgets | xw/src/main/scala/uk/ac/surrey/xw/extension/prim/SimpleStringReporter.scala | Scala | mit | 413 |
package views.vrm_retention
import composition.TestHarness
import helpers.vrm_retention.CookieFactoryForUISpecs
import org.openqa.selenium.By
import org.openqa.selenium.WebDriver
import org.openqa.selenium.WebElement
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.IntegrationPatience
import org.scalatest.selenium.WebBrowser.{click, currentUrl, go}
import pages.common.MainPanel.back
import pages.vrm_retention.{PaymentPage, BeforeYouStartPage, ConfirmPage, LeaveFeedbackPage, VehicleLookupPage}
import uk.gov.dvla.vehicles.presentation.common.testhelpers.{UiSpec, UiTag}
import views.vrm_retention.Confirm.ConfirmCacheKey
class ConfirmIntegrationSpec extends UiSpec with TestHarness with Eventually with IntegrationPatience {
"go to page" should {
"display the page" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
cacheSetup()
go to ConfirmPage
currentUrl should equal(ConfirmPage.url)
}
"contain the hidden csrfToken field" taggedAs UiTag in new WebBrowserForSelenium {
go to VehicleLookupPage
val csrf: WebElement = webDriver.findElement(
By.name(uk.gov.dvla.vehicles.presentation.common.filters.CsrfPreventionAction.TokenName)
)
csrf.getAttribute("type") should equal("hidden")
csrf.getAttribute("name") should
equal(uk.gov.dvla.vehicles.presentation.common.filters.CsrfPreventionAction.TokenName)
csrf.getAttribute("value").length > 0 should equal(true)
}
"display the page with blank keeper title" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
CookieFactoryForUISpecs
.vehicleAndKeeperLookupFormModel()
.vehicleAndKeeperDetailsModel(title = None)
.businessDetails()
.transactionId()
.eligibilityModel()
go to ConfirmPage
currentUrl should equal(ConfirmPage.url)
}
"display the page with blank keeper surname" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
CookieFactoryForUISpecs
.vehicleAndKeeperLookupFormModel()
.vehicleAndKeeperDetailsModel(lastName = None)
.businessDetails()
.transactionId()
.eligibilityModel()
go to ConfirmPage
currentUrl should equal(ConfirmPage.url)
}
"display the page with blank address" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
CookieFactoryForUISpecs
.vehicleAndKeeperLookupFormModel()
.vehicleAndKeeperDetailsModel(emptyAddress = true)
.businessDetails()
.transactionId()
.eligibilityModel()
go to ConfirmPage
currentUrl should equal(ConfirmPage.url)
}
}
"confirm button" should {
"redirect to paymentPage when confirm link is clicked" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
cacheSetup()
CookieFactoryForUISpecs.paymentTransNo()
ConfirmPage.happyPath
currentUrl should equal(PaymentPage.url)
}
}
"exit" should {
"display feedback page when exit link is clicked" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
cacheSetup()
go to ConfirmPage
click on ConfirmPage.exit
currentUrl should equal(LeaveFeedbackPage.url)
}
"delete the Confirm cookie" taggedAs UiTag in new WebBrowserForSeleniumWithPhantomJsLocal {
go to BeforeYouStartPage
cacheSetup().confirmFormModel()
go to ConfirmPage
click on ConfirmPage.exit
webDriver.manage().getCookieNamed(ConfirmCacheKey) should equal(null)
}
}
"back button" should {
"redirect to VehicleLookupPage page" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
cacheSetup()
go to ConfirmPage
click on back
currentUrl should equal(VehicleLookupPage.url)
}
"redirect to VehicleLookupPage page with ceg identifier" taggedAs UiTag in new WebBrowserForSelenium {
go to BeforeYouStartPage
cacheSetup().withIdentifier("CEG")
go to ConfirmPage
click on back
currentUrl should equal(VehicleLookupPage.cegUrl)
}
}
private def cacheSetup()(implicit webDriver: WebDriver) =
CookieFactoryForUISpecs
.vehicleAndKeeperLookupFormModel()
.vehicleAndKeeperDetailsModel()
.businessDetails()
.transactionId()
.eligibilityModel()
}
| dvla/vrm-retention-online | test/views/vrm_retention/ConfirmIntegrationSpec.scala | Scala | mit | 4,452 |
/*
* Copyright (C) 2011-2016 Mathias Doenitz
* Adapted and extended in 2016 by Eugene Yokota
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sjsonnew
package support.spray
import spray.json.{ JsValue, JsNumber, JsString, JsNull, JsTrue, JsFalse }
object PritimiveFormatsSpec extends verify.BasicTestSuite with BasicJsonProtocol {
test("The IntJsonFormat") {
// "convert an Int to a JsNumber"
Predef.assert(Converter.toJsonUnsafe[Int](42) == JsNumber(42))
// "convert a JsNumber to an Int"
Predef.assert(Converter.fromJsonUnsafe[Int](JsNumber(42)) == 42)
}
test("The LongJsonFormat") {
// "convert a Long to a JsNumber"
Predef.assert(Converter.toJsonUnsafe[Long](7563661897011259335L) == JsNumber(7563661897011259335L))
// "convert a JsNumber to a Long"
Predef.assert(Converter.fromJsonUnsafe[Long](JsNumber(7563661897011259335L)) == 7563661897011259335L)
}
test("The FloatJsonFormat") {
// "convert a Float to a JsNumber"
// Predef.assert(Converter.toJsonUnsafe(4.2f) == JsNumber(4.2f))
// "convert a Float.NaN to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Float.NaN) == JsNull)
// "convert a Float.PositiveInfinity to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Float.PositiveInfinity) == JsNull)
// "convert a Float.NegativeInfinity to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Float.NegativeInfinity) == JsNull)
// "convert a JsNumber to a Float"
Predef.assert(Converter.fromJsonUnsafe[Float](JsNumber(4.2f)) == 4.2f)
// "convert a JsNull to a Float"
Predef.assert(Converter.fromJsonUnsafe[Float](JsNull).isNaN == Float.NaN.isNaN)
}
test("The DoubleJsonFormat") {
// "convert a Double to a JsNumber"
Predef.assert(Converter.toJsonUnsafe(4.2) == JsNumber(4.2))
// "convert a Double.NaN to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Double.NaN) == JsNull)
// "convert a Double.PositiveInfinity to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Double.PositiveInfinity) == JsNull)
// "convert a Double.NegativeInfinity to a JsNull"
Predef.assert(Converter.toJsonUnsafe(Double.NegativeInfinity) == JsNull)
// "convert a JsNumber to a Double"
Predef.assert(Converter.fromJsonUnsafe[Double](JsNumber(4.2)) == 4.2)
// "convert a JsNull to a Double"
Predef.assert(Converter.fromJsonUnsafe[Double](JsNull).isNaN == Double.NaN.isNaN)
}
test("The ByteJsonFormat") {
// "convert a Byte to a JsNumber"
Predef.assert(Converter.toJsonUnsafe(42.asInstanceOf[Byte]) == JsNumber(42))
// "convert a JsNumber to a Byte"
Predef.assert(Converter.fromJsonUnsafe[Byte](JsNumber(42)) == 42)
}
test("The ShortJsonFormat") {
// "convert a Short to a JsNumber"
Predef.assert(Converter.toJsonUnsafe(42.asInstanceOf[Short]) == JsNumber(42))
// "convert a JsNumber to a Short"
Predef.assert(Converter.fromJsonUnsafe[Short](JsNumber(42)) == 42)
}
test("The BigDecimalJsonFormat") {
// "convert a BigDecimal to a JsNumber"
Predef.assert(Converter.toJsonUnsafe(BigDecimal(42)) == JsNumber(42))
// "convert a JsNumber to a BigDecimal"
Predef.assert(Converter.fromJsonUnsafe[BigDecimal](JsNumber(42)) == BigDecimal(42))
}
test("The BigIntJsonFormat") {
// "convert a BigInt to a JsNumber"
Predef.assert(Converter.toJsonUnsafe(BigInt(42)) == JsNumber(42))
// "convert a JsNumber to a BigInt"
Predef.assert(Converter.fromJsonUnsafe[BigDecimal](JsNumber(42)) == BigInt(42))
}
test("The UnitJsonFormat") {
// "convert Unit to a JsNumber(1)"
Predef.assert(Converter.toJsonUnsafe(()) == JsNumber(1))
// "convert a JsNumber to Unit"
Predef.assert(Converter.fromJsonUnsafe[Unit](JsNumber(1)) == (()))
}
test("The BooleanJsonFormat") {
// "convert true to a JsTrue"
Predef.assert(Converter.toJsonUnsafe(true) == JsTrue)
// "convert false to a JsFalse"
Predef.assert(Converter.toJsonUnsafe(false) == JsFalse)
// "convert a JsTrue to true"
Predef.assert(Converter.fromJsonUnsafe[Boolean](JsTrue) == true)
// "convert a JsFalse to false"
Predef.assert(Converter.fromJsonUnsafe[Boolean](JsFalse) == false)
}
test("The CharJsonFormat") {
// "convert a Char to a JsString"
Predef.assert(Converter.toJsonUnsafe('c') == JsString("c"))
// "convert a JsString to a Char"
Predef.assert(Converter.fromJsonUnsafe[Char](JsString("c")) == 'c')
}
test("The StringJsonFormat") {
// "convert a String to a JsString"
Predef.assert(Converter.toJsonUnsafe("Hello") == JsString("Hello"))
// "convert a JsString to a String"
Predef.assert(Converter.fromJsonUnsafe[String](JsString("Hello")) == "Hello")
}
test("The SymbolJsonFormat") {
// "convert a Symbol to a JsString"
Predef.assert(Converter.toJsonUnsafe(Symbol("Hello")) == JsString("Hello"))
// "convert a JsString to a Symbol"
Predef.assert(Converter.fromJsonUnsafe[Symbol](JsString("Hello")) == Symbol("Hello"))
}
}
| eed3si9n/sjson-new | support/spray/src/test/scala/sjsonnew/support/spray/PrimitiveFormatsSpec.scala | Scala | apache-2.0 | 5,524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.oap.expression
import com.google.common.collect.Lists
import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.DateUnit
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.{InternalRow, expressions}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.execution.BaseSubqueryExec
import org.apache.spark.sql.execution.ExecSubqueryExpression
import org.apache.spark.sql.execution.ScalarSubquery
import org.apache.spark.sql.types._
import scala.collection.mutable.ListBuffer
class ColumnarScalarSubquery(
query: ScalarSubquery)
extends Expression with ColumnarExpression {
override def dataType: DataType = query.dataType
override def children: Seq[Expression] = Nil
override def nullable: Boolean = true
override def toString: String = query.toString
override def eval(input: InternalRow): Any = query.eval(input)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = query.doGenCode(ctx, ev)
override def canEqual(that: Any): Boolean = query.canEqual(that)
override def productArity: Int = query.productArity
override def productElement(n: Int): Any = query.productElement(n)
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val value = query.eval(null)
val resultType = CodeGeneration.getResultType(query.dataType)
query.dataType match {
case t: StringType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
(TreeBuilder.makeStringLiteral(value.toString().asInstanceOf[String]), resultType)
}
case t: IntegerType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
(TreeBuilder.makeLiteral(value.asInstanceOf[Integer]), resultType)
}
case t: LongType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
(TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Long]), resultType)
}
case t: DoubleType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
(TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Double]), resultType)
}
case d: DecimalType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
val v = value.asInstanceOf[Decimal]
(TreeBuilder.makeDecimalLiteral(v.toString, v.precision, v.scale), resultType)
}
case d: DateType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
val origIntNode = TreeBuilder.makeLiteral(value.asInstanceOf[Integer])
val dateNode = TreeBuilder.makeFunction("castDATE", Lists.newArrayList(origIntNode), new ArrowType.Date(DateUnit.DAY))
(dateNode, new ArrowType.Date(DateUnit.DAY))
}
case b: BooleanType =>
value match {
case null =>
(TreeBuilder.makeNull(resultType), resultType)
case _ =>
(TreeBuilder.makeLiteral(value.asInstanceOf[java.lang.Boolean]), resultType)
}
}
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/main/scala/com/intel/oap/expression/ColumnarSubquery.scala | Scala | apache-2.0 | 4,504 |
package com.twitter.finagle.netty3.param
import com.twitter.finagle.{util, Stack}
import org.jboss.netty.util.Timer
/**
* A class eligible for configuring a netty3 timer.
*/
private[finagle] case class Netty3Timer(timer: Timer) {
def mk(): (Netty3Timer, Stack.Param[Netty3Timer]) =
(this, Netty3Timer.param)
}
private[finagle] object Netty3Timer {
implicit val param = Stack.Param(Netty3Timer(util.DefaultTimer.netty))
}
| sveinnfannar/finagle | finagle-core/src/main/scala/com/twitter/finagle/netty3/param/Params.scala | Scala | apache-2.0 | 433 |
package insult
import _root_.vocab._
abstract class SingleItem extends ItemSet {
override def get = Seq(this)
def getWord: String
}
abstract class SingleVocabItem(vocab: Vocab) extends SingleItem {
override def getWord = vocab.get
}
object NounThing extends SingleVocabItem(NounGenericableVocab)
object NounActivity extends SingleVocabItem(NounVocab)
object NounActivityGeneric extends SingleVocabItem(NounGenericVocab)
object Adjective extends SingleVocabItem(AdjectiveVocab)
object AdjectiveGeneric extends SingleVocabItem(AdjectiveGenericVocab)
object Adverb extends SingleVocabItem(AdverbVocab)
object AdverbGeneric extends SingleVocabItem(AdverbGenericVocab)
object Preposition extends SingleVocabItem(PrepositionVocab)
object Participle extends SingleVocabItem(ParticipleVocab)
object Special extends SingleVocabItem(SpecialVocab)
case class Conjonction(str: String) extends SingleItem {
override def getWord = str
} | HiinoFW/InsultGenerator | insult/insult/SingleItem.scala | Scala | mit | 938 |
package org.openstack.api.restful
/**
* Exception that's thrown when the json syntax is malformed
* @author Antonio Murgia
* @version 09/11/14
*/
class MalformedJsonException(s: String = "Malformed Json") extends java.text.ParseException(s, 0)
| tmnd1991/ceilometerAPI4s | src/main/scala/org/openstack/api/restful/MalformedJsonException.scala | Scala | apache-2.0 | 250 |
package mesosphere.marathon
package raml
import mesosphere.UnitTest
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.core.health.{MesosCommandHealthCheck, MesosHttpHealthCheck, PortReference}
import mesosphere.marathon.core.instance.{Goal, Reservation}
import mesosphere.marathon.core.pod.{ContainerNetwork, MesosContainer, PodDefinition}
import mesosphere.marathon.core.task.state.NetworkInfoPlaceholder
import mesosphere.marathon.state.{PathId, Timestamp}
import mesosphere.marathon.stream.Implicits._
import org.apache.mesos.Protos
import scala.concurrent.duration._
class PodStatusConversionTest extends UnitTest {
import PodStatusConversionTest._
"PodStatusConversion" should {
"multiple tasks with multiple container networks convert to proper network status" in {
def fakeContainerNetworks(netmap: Map[String, String]): Seq[Protos.NetworkInfo] = netmap.map { entry =>
val (name, ip) = entry
Protos.NetworkInfo.newBuilder()
.setName(name)
.addIpAddresses(Protos.NetworkInfo.IPAddress.newBuilder().setIpAddress(ip))
.build()
}(collection.breakOut)
val tasksWithNetworks: Seq[core.task.Task] = Seq(
fakeTask(fakeContainerNetworks(Map("abc" -> "1.2.3.4", "def" -> "5.6.7.8"))),
fakeTask(fakeContainerNetworks(Map("abc" -> "1.2.3.4", "def" -> "5.6.7.8")))
)
val result: Seq[NetworkStatus] = networkStatuses(tasksWithNetworks)
val expected: Seq[NetworkStatus] = Seq(
NetworkStatus(name = Some("abc"), addresses = Seq("1.2.3.4")),
NetworkStatus(name = Some("def"), addresses = Seq("5.6.7.8"))
)
result.size should be(expected.size)
result.toSet should be(expected.toSet)
}
"multiple tasks with multiple host networks convert to proper network status" in {
def fakeHostNetworks(ips: Seq[String]): Seq[Protos.NetworkInfo] = ips.map { ip =>
Protos.NetworkInfo.newBuilder()
.addIpAddresses(Protos.NetworkInfo.IPAddress.newBuilder().setIpAddress(ip))
.build()
}(collection.breakOut)
val tasksWithNetworks: Seq[core.task.Task] = Seq(
fakeTask(fakeHostNetworks(Seq("1.2.3.4", "5.6.7.8"))),
fakeTask(fakeHostNetworks(Seq("1.2.3.4", "5.6.7.8")))
)
val result: Seq[NetworkStatus] = networkStatuses(tasksWithNetworks)
val expected: Seq[NetworkStatus] = Seq(
// host network IPs are consolidated since they are nameless
NetworkStatus(addresses = Seq("1.2.3.4", "5.6.7.8"))
)
result.size should be(expected.size)
result should be(expected)
}
"ephemeral pod launched, no official Mesos status yet" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = provisionedInstance(pod)
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.specReference should be(Option(s"/v2/pods/foo::versions/${pod.version.toOffsetDateTime}"))
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Pending)
status.resources should be(Some(PodDefinition.DefaultExecutorResources))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_STAGING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
}
"ephemeral pod launched, received STAGING status from Mesos" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = stagingInstance(pod)
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Staging)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_STAGING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks should be('empty)
}
"ephemeral pod launched, received STARTING status from Mesos" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = startingInstance(pod)
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Staging)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_STARTING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, no task endpoint health info" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = runningInstance(pod)
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Degraded)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "false",
Some(PodStatusConversion.HEALTH_UNREPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, task endpoint health is failing" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = runningInstance(pod = pod, maybeHealthy = Some(false)) // task status will say unhealthy
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Degraded)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "false",
Some(PodStatusConversion.HEALTH_REPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web", healthy = Some(false))
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, task endpoint health looks great" in {
implicit val clock = new SettableClock()
val pod = basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now()))
clock += 1.seconds
val fixture = runningInstance(pod = pod, maybeHealthy = Some(true)) // task status will say healthy
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Stable)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "true",
Some(PodStatusConversion.HEALTH_REPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web", healthy = Some(true))
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, task command-line health is missing" in {
implicit val clock = new SettableClock()
val pod = withCommandLineHealthChecks(basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now())))
clock += 1.seconds
val fixture = runningInstance(pod = pod) // mesos task status health is missing
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Degraded)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "false",
Some(PodStatusConversion.HEALTH_UNREPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, task command-line health is failing" in {
implicit val clock = new SettableClock()
val pod = withCommandLineHealthChecks(basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now())))
clock += 1.seconds
val fixture = runningInstance(pod = pod, maybeHealthy = Some(false)) // task status will say unhealthy
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Degraded)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "false",
Some(PodStatusConversion.HEALTH_REPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"ephemeral pod launched, received RUNNING status from Mesos, task command-line health is passing" in {
implicit val clock = new SettableClock()
val pod = withCommandLineHealthChecks(basicOneContainerPod.copy(versionInfo = state.VersionInfo.OnlyVersion(clock.now())))
clock += 1.seconds
val fixture = runningInstance(pod = pod, maybeHealthy = Some(true)) // task status will say healthy
val status = PodStatusConversion.podInstanceStatusRamlWriter((pod, fixture.instance))
status.id should be(fixture.instance.instanceId.idString)
status.agentHostname should be(Some("agent1"))
status.agentId should be (Some("agentId1"))
status.status should be(PodInstanceState.Stable)
status.resources should be(Some(pod.aggregateResources()))
status.containers should be(Seq(
ContainerStatus(
name = "ct1",
status = "TASK_RUNNING",
statusSince = fixture.since.toOffsetDateTime,
containerId = Some(fixture.taskIds.head.idString),
conditions = Seq(
StatusCondition("healthy", fixture.since.toOffsetDateTime, fixture.since.toOffsetDateTime, "true",
Some(PodStatusConversion.HEALTH_REPORTED))
),
endpoints = Seq(
ContainerEndpointStatus(name = "admin", allocatedHostPort = Some(1001)),
ContainerEndpointStatus(name = "web")
),
resources = pod.container("ct1").map(_.resources),
lastUpdated = fixture.since.toOffsetDateTime,
lastChanged = fixture.since.toOffsetDateTime
)
))
status.networks.toSet should be(Set(
NetworkStatus(Some("dcos"), Seq("1.2.3.4")),
NetworkStatus(Some("bigdog"), Seq("2.3.4.5"))
))
}
"a stateful pod with one container and one persistent volume" in {
val localVolumeId = core.instance.LocalVolumeId(
PathId("/persistent"), "volume", "5425cbaa-8fd3-45f0-afa4-74ef4fcc594b")
val reservation = core.instance.Reservation(
volumeIds = Seq(localVolumeId),
state = core.instance.Reservation.State.New(timeout = None),
Reservation.SimplifiedId(core.instance.Instance.Id.forRunSpec(PathId("/persistent")))
)
implicit val clock = new SettableClock()
val fixture = fakeInstance(
podWithPersistentVolume, core.condition.Condition.Running, core.condition.Condition.Running,
maybeReservation = Some(reservation))
val status = PodStatusConversion.podInstanceStatusRamlWriter((podWithPersistentVolume, fixture.instance))
status.localVolumes should be(Seq(
LocalVolumeId(
"/persistent", "volume", "5425cbaa-8fd3-45f0-afa4-74ef4fcc594b",
"persistent#volume#5425cbaa-8fd3-45f0-afa4-74ef4fcc594b")))
}
}
}
object PodStatusConversionTest {
val containerResources = Resources(cpus = 0.01, mem = 100)
val basicOneContainerPod = PodDefinition(
id = PathId("/foo"),
containers = Seq(
MesosContainer(
name = "ct1",
resources = containerResources,
image = Some(Image(kind = ImageType.Docker, id = "busybox")),
endpoints = Seq(
Endpoint(name = "web", containerPort = Some(80)),
Endpoint(name = "admin", containerPort = Some(90), hostPort = Some(0))
),
healthCheck = Some(MesosHttpHealthCheck(portIndex = Some(PortReference("web")), path = Some("/ping")))
)
),
networks = Seq(ContainerNetwork(name = "dcos"), ContainerNetwork("bigdog"))
)
val podWithPersistentVolume = PodDefinition(
id = PathId("/persistent"),
containers = Seq(
MesosContainer(
name = "ct1",
resources = containerResources,
image = Some(Image(kind = ImageType.Docker, id = "busybox")),
volumeMounts = Seq(state.VolumeMount(Some("data"), "mountPath")))),
volumes = Seq(
state.PersistentVolume(
name = Some("volume"),
persistent = state.PersistentVolumeInfo(10))))
case class InstanceFixture(
since: Timestamp,
agentInfo: core.instance.Instance.AgentInfo,
taskIds: Seq[core.task.Task.Id],
instance: core.instance.Instance)
def provisionedInstance(pod: PodDefinition)(implicit clock: SettableClock): InstanceFixture =
fakeInstance(pod, core.condition.Condition.Provisioned, core.condition.Condition.Provisioned)
def stagingInstance(pod: PodDefinition)(implicit clock: SettableClock): InstanceFixture =
fakeInstance(pod, core.condition.Condition.Staging, core.condition.Condition.Staging, Some(Protos.TaskState.TASK_STAGING))
def startingInstance(pod: PodDefinition)(implicit clock: SettableClock): InstanceFixture =
fakeInstance(pod, core.condition.Condition.Starting, core.condition.Condition.Starting, Some(Protos.TaskState.TASK_STARTING),
Some(Map("dcos" -> "1.2.3.4", "bigdog" -> "2.3.4.5")))
def runningInstance(
pod: PodDefinition,
maybeHealthy: Option[Boolean] = None)(implicit clock: SettableClock): InstanceFixture =
fakeInstance(pod, core.condition.Condition.Running, core.condition.Condition.Running, Some(Protos.TaskState.TASK_RUNNING),
Some(Map("dcos" -> "1.2.3.4", "bigdog" -> "2.3.4.5")), maybeHealthy)
def fakeInstance(
pod: PodDefinition,
condition: core.condition.Condition,
taskStatus: core.condition.Condition,
maybeTaskState: Option[Protos.TaskState] = None,
maybeNetworks: Option[Map[String, String]] = None,
maybeHealthy: Option[Boolean] = None,
maybeReservation: Option[Reservation] = None)(implicit clock: SettableClock): InstanceFixture = {
val since = clock.now()
val agentInfo = core.instance.Instance.AgentInfo("agent1", Some("agentId1"), None, None, Seq.empty)
val instanceId = core.instance.Instance.Id.forRunSpec(pod.id)
val taskIds = pod.containers.map { container =>
core.task.Task.Id(instanceId, Some(container))
}
val mesosStatus = maybeTaskState.map { taskState =>
val statusProto = Protos.TaskStatus.newBuilder()
.setState(taskState)
.setTaskId(taskIds.head.mesosTaskId)
maybeNetworks.foreach { networks =>
statusProto.setContainerStatus(Protos.ContainerStatus.newBuilder()
.addAllNetworkInfos(networks.map { entry =>
val (networkName, ipAddress) = entry
Protos.NetworkInfo.newBuilder().addIpAddresses(
Protos.NetworkInfo.IPAddress.newBuilder().setIpAddress(ipAddress)
).setName(networkName).build()
}.asJava).build()
).build()
}
maybeHealthy.foreach(statusProto.setHealthy)
statusProto.build()
}
val instance: core.instance.Instance = core.instance.Instance(
instanceId = instanceId,
agentInfo = Some(agentInfo),
state = core.instance.Instance.InstanceState(
condition = condition,
since = since,
activeSince = if (condition == core.condition.Condition.Provisioned) None else Some(since),
healthy = None,
goal = core.instance.Goal.Running),
tasksMap = Seq[core.task.Task](
core.task.Task(
taskIds.head,
since,
core.task.Task.Status(
stagedAt = since,
startedAt = if (taskStatus == core.condition.Condition.Provisioned) None else Some(since),
mesosStatus = mesosStatus,
condition = taskStatus,
networkInfo = NetworkInfoPlaceholder(hostPorts = Seq(1001))
)
)
).map(t => t.taskId -> t)(collection.breakOut),
runSpec = pod,
reservation = maybeReservation
)
InstanceFixture(since, agentInfo, taskIds, instance)
} // fakeInstance
def fakeTask(networks: Seq[Protos.NetworkInfo]) = {
val instanceId = core.instance.Instance.Id.forRunSpec(PathId.empty)
val taskId = core.task.Task.Id(instanceId)
core.task.Task(
taskId = taskId,
status = core.task.Task.Status(
stagedAt = Timestamp.zero,
mesosStatus = Some(Protos.TaskStatus.newBuilder()
.setTaskId(taskId.mesosTaskId)
.setState(Protos.TaskState.TASK_UNKNOWN)
.setContainerStatus(Protos.ContainerStatus.newBuilder()
.addAllNetworkInfos(networks.asJava).build())
.build()),
condition = core.condition.Condition.Finished,
networkInfo = NetworkInfoPlaceholder()
),
runSpecVersion = Timestamp.zero)
}
def withCommandLineHealthChecks(pod: PodDefinition): PodDefinition = pod.copy(
// swap any endpoint health checks for a command-line health check
containers = basicOneContainerPod.containers.map { ct =>
ct.copy(
healthCheck = Some(MesosCommandHealthCheck(command = state.Command("echo this is a health check command"))))
})
}
| gsantovena/marathon | src/test/scala/mesosphere/marathon/raml/PodStatusConversionTest.scala | Scala | apache-2.0 | 24,272 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkasse
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.scalatest.{ AsyncWordSpec, BeforeAndAfterAll, Matchers }
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
abstract class BaseSpec
extends AsyncWordSpec
with Matchers
with BeforeAndAfterAll {
protected implicit val system = ActorSystem()
protected implicit val ec = system.dispatcher
protected implicit val mat = ActorMaterializer()
override protected def afterAll() = {
Await.ready(system.terminate(), 42.seconds)
super.afterAll()
}
}
| viktorklang/akka-sse | akka-sse/src/test/scala/de/heikoseeberger/akkasse/BaseSpec.scala | Scala | apache-2.0 | 1,214 |
package controllers
import com.bryzek.apidoc.api.v0.models.{Original, OriginalType, Validation}
import com.bryzek.apidoc.api.v0.models.json._
import core.ServiceFetcher
import lib.{DatabaseServiceFetcher, OriginalUtil, ServiceConfiguration}
import javax.inject.{Inject, Singleton}
import builder.OriginalValidator
import play.api.mvc._
import play.api.libs.json._
@Singleton
class Validations @Inject() () extends Controller {
private[this] val config = ServiceConfiguration(
orgKey = "tmp",
orgNamespace = "tmp.validations",
version = "0.0.1-dev"
)
def post() = AnonymousRequest(parse.temporaryFile) { request =>
val contents = scala.io.Source.fromFile(request.body.file, "UTF-8").getLines.mkString("\\n")
OriginalUtil.guessType(contents) match {
case None => {
BadRequest(Json.toJson(Validation(false, Seq("Could not determine the type of file from the content."))))
}
case Some(fileType) => {
OriginalValidator(
config = config,
original = Original(fileType, contents),
fetcher = DatabaseServiceFetcher(request.authorization)
).validate match {
case Left(errors) => {
BadRequest(Json.toJson(Validation(false, errors)))
}
case Right(service) => {
Ok(Json.toJson(Validation(true, Nil)))
}
}
}
}
}
}
| Seanstoppable/apidoc | api/app/controllers/Validations.scala | Scala | mit | 1,387 |
package gapt.examples.tip.prod
import gapt.expr._
import gapt.expr.ty.TBase
import gapt.proofs.context.update.InductiveType
import gapt.proofs.Sequent
import gapt.proofs.gaptic._
import gapt.provers.viper.aip.AnalyticInductionProver
object prop_07 extends TacticsProof {
// Sorts
ctx += TBase( "sk" )
// Inductive types
ctx += InductiveType( ty"list", hoc"'nil' :list", hoc"'cons' :sk>list>list" )
ctx += InductiveType( ty"Nat", hoc"'Z' :Nat", hoc"'S' :Nat>Nat" )
//Function constants
ctx += hoc"'qrev' :list>list>list"
ctx += hoc"'plus' :Nat>Nat>Nat"
ctx += hoc"'length' :list>Nat"
val sequent =
hols"""
def_head: โx0 โx1 head(cons(x0, x1)) = x0,
def_tail: โx0 โx1 tail(cons(x0, x1)) = x1,
def_p: โx0 p(S(x0)) = x0,
def_qrev_0: โy qrev(nil, y) = y,
def_qrev_1: โz โxs โy qrev(cons(z, xs), y) = qrev(xs, cons(z, y)),
def_plus_0: โy plus(Z, y) = y,
def_plus_1: โz โy plus(S(z), y) = S(plus(z, y)),
def_length_0: length(nil) = Z,
def_length_1: โy โxs length(cons(y, xs)) = S(length(xs)),
constr_inj_0: โy0 โy1 ยฌnil = cons(y0, y1),
constr_inj_1: โy0 ยฌZ = S(y0)
:-
goal: โx โy length(qrev(x, y)) = plus(length(x), length(y))
"""
val lem_1 = ( "ap1" -> hof"โy plus(Z, y) = y" ) +:
( "ap2" -> hof"โz โy plus(S(z), y) = S(plus(z, y))" ) +:
Sequent() :+ ( "lem_1" -> hof"โx โy plus(x,S(y)) = S(plus(x,y))" )
val lem_1_proof = AnalyticInductionProver.singleInduction( lem_1, hov"x:Nat" )
val cut_lem = ( "lem_1" -> hof"โx โy plus(x,S(y)) = S(plus(x,y))" ) +: sequent
val cut_lem_proof = AnalyticInductionProver.singleInduction( cut_lem, hov"x:list" )
val proof = Lemma( sequent ) {
cut( "lem_1", hof"โx โy plus(x,S(y)) = S(plus(x,y))" )
insert( lem_1_proof )
insert( cut_lem_proof )
}
val proof2 = Lemma( sequent ) {
cut( "l", hof"""
!x!y (length(qrev x y) = plus(length x, length y) &
plus(length x, S(length y)) = S(plus(length x, length y)))
""" ) right escrgt
forget( "goal" ); allR( hov"x:list" )
induction( hov"x:list" ) onAll escrgt
}
}
| gapt/gapt | examples/tip/prod/prop_07.scala | Scala | gpl-3.0 | 2,211 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class E80(value: Option[Int]) extends CtBoxIdentifier("Income Gifts of real property received") with CtOptionalInteger with Input
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v3/E80.scala | Scala | apache-2.0 | 845 |
package scaps.evaluation
import scaps.settings.Settings
import scaps.searchEngine.SearchEngine
import java.io.File
import scaps.scala.featureExtraction.JarExtractor
import scaps.scala.featureExtraction.CompilerUtils
import scaps.scala.featureExtraction.ExtractionError
import scaps.utils.Logging
import scalaz.std.stream._
import scaps.searchEngine.queries.QueryAnalyzer
import scalaz.Memo
import java.util.regex.Pattern
object Interactive extends Logging {
def extract(settings: Settings = Settings.fromApplicationConf, evaluationSettings: EvaluationSettings = EvaluationSettings.fromApplicationConf) = {
val engine = SearchEngine(settings).get
evaluationSettings.downloadDir.mkdirs()
val classPaths = for {
project <- evaluationSettings.projects
dependency <- project.dependencies
} yield {
val file = new File(evaluationSettings.downloadDir, dependency.name)
if (!file.exists()) {
import sys.process._
(dependency.url #> file).!!
}
file.getAbsolutePath()
}
val compiler = CompilerUtils.createCompiler(classPaths)
val extractor = new JarExtractor(compiler)
engine.resetIndexes().get
evaluationSettings.projects.foreach { project =>
val jar = new File(evaluationSettings.downloadDir, project.name)
if (!jar.exists()) {
import sys.process._
(project.url #> jar).!!
}
def defs = ExtractionError.logErrors(extractor(jar), logger.info(_))
engine.index(defs).get
}
}
def updateStats(settings: Settings = Settings.fromApplicationConf) = {
val engine = SearchEngine(settings).get
engine.finalizeIndex().get
}
def analyze(query: String, settings: Settings = Settings.fromApplicationConf) = {
val engine = SearchEngine(settings).get
def findClassBySuffix(suffix: String) =
engine.typeIndex.findTypeDefsBySuffix(suffix, Set()).get
val analyzer = new QueryAnalyzer(
settings,
Memo.mutableHashMapMemo((findClassBySuffix _)),
Memo.mutableHashMapMemo(engine.typeIndex.termFrequency(_, Set()).get),
Memo.mutableHashMapMemo(engine.viewIndex.findAlternativesWithRetainedInfo(_, 0, Set()).get))
.favorTypesMatching(Pattern.compile("""scala\\..*"""))
.favorTypesMatching(Pattern.compile("""(scala\\.([^\\.#]+))|java\\.lang\\.String"""))
analyzer.apply(query)
}
}
| scala-search/scaps | evaluation/src/main/scala/scaps/evaluation/Interactive.scala | Scala | mpl-2.0 | 2,372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.UUID
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.{StreamingQuery, StreamingQueryException, StreamingQueryProgress, StreamingQueryStatus}
/**
* Wrap non-serializable StreamExecution to make the query serializable as it's easy for it to
* get captured with normal usage. It's safe to capture the query but not use it in executors.
* However, if the user tries to call its methods, it will throw `IllegalStateException`.
*/
class StreamingQueryWrapper(@transient private val _streamingQuery: StreamExecution)
extends StreamingQuery with Serializable {
def streamingQuery: StreamExecution = {
/** Assert the codes run in the driver. */
if (_streamingQuery == null) {
throw new IllegalStateException("StreamingQuery cannot be used in executors")
}
_streamingQuery
}
override def name: String = {
streamingQuery.name
}
override def id: UUID = {
streamingQuery.id
}
override def runId: UUID = {
streamingQuery.runId
}
override def awaitTermination(): Unit = {
streamingQuery.awaitTermination()
}
override def awaitTermination(timeoutMs: Long): Boolean = {
streamingQuery.awaitTermination(timeoutMs)
}
override def stop(): Unit = {
streamingQuery.stop()
}
override def processAllAvailable(): Unit = {
streamingQuery.processAllAvailable()
}
override def isActive: Boolean = {
streamingQuery.isActive
}
override def lastProgress: StreamingQueryProgress = {
streamingQuery.lastProgress
}
override def explain(): Unit = {
streamingQuery.explain()
}
override def explain(extended: Boolean): Unit = {
streamingQuery.explain(extended)
}
/**
* This method is called in Python. Python cannot call "explain" directly as it outputs in the JVM
* process, which may not be visible in Python process.
*/
def explainInternal(extended: Boolean): String = {
streamingQuery.explainInternal(extended)
}
override def sparkSession: SparkSession = {
streamingQuery.sparkSession
}
override def recentProgress: Array[StreamingQueryProgress] = {
streamingQuery.recentProgress
}
override def status: StreamingQueryStatus = {
streamingQuery.status
}
override def exception: Option[StreamingQueryException] = {
streamingQuery.exception
}
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamingQueryWrapper.scala | Scala | apache-2.0 | 3,197 |
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model
import config.MerchantKeys
import org.joda.time.LocalDate
object Merchants {
def apply(config: Map[String, String]) = new Merchants(config)
}
class Merchants (config: Map[String, String]) extends Enumeration {
import MerchantKeys._
lazy val SelfAssessmentForDebitCard = MerchantDetails( config(SaForDebitCardId), config(SaForDebitCardAccount), "SA", false )
lazy val SelfAssessmentForCreditCard = MerchantDetails( config(SaForCreditCardId), config(SaForCreditCardAccount), "SA", true )
lazy val VatForDebitCard = MerchantDetails( config(VatForDebitCardId), config(VatForDebitCardAccount), "VAT", false )
lazy val VatForCreditCard = MerchantDetails( config(VatForCreditCardId), config(VatForCreditCardAccount), "VAT", true )
lazy val CorporationTaxForDebitCard = MerchantDetails( config(CtForDebitCardId), config(CtForDebitCardAccount), "CT", false )
lazy val CorporationTaxForCreditCard = MerchantDetails( config(CtForCreditCardId), config(CtForCreditCardAccount), "CT", true )
lazy val EPayeForDebitCard = MerchantDetails( config(EPayeForDebitCardId), config(EPayeForDebitCardAccount), "EPAYE", false )
lazy val EPayeForCreditCard = MerchantDetails( config(EPayeForCreditCardId), config(EPayeForCreditCardAccount), "EPAYE", true )
lazy val OtherTaxesForDebitCard = MerchantDetails( config(OtherTaxesForDebitCardId), config(OtherTaxesForDebitCardAccount), "OTHER", false )
lazy val OtherTaxesForCreditCard = MerchantDetails( config(OtherTaxesForCreditCardId), config(OtherTaxesForCreditCardAccount), "OTHER", true )
lazy val StampDutyLandTaxForDebitCard = MerchantDetails( config(SdltForDebitCardId), config(SdltForDebitCardAccount), "SDLT", false )
lazy val StampDutyLandTaxForCreditCard = MerchantDetails( config(SdltForCreditCardId), config(SdltForCreditCardAccount), "SDLT", true )
lazy val allMerchants = Seq( SelfAssessmentForDebitCard,
SelfAssessmentForCreditCard,
VatForDebitCard, VatForCreditCard,
CorporationTaxForDebitCard, CorporationTaxForCreditCard,
EPayeForDebitCard, EPayeForCreditCard,
OtherTaxesForDebitCard, OtherTaxesForCreditCard,
StampDutyLandTaxForDebitCard, StampDutyLandTaxForCreditCard)
def merchantIdSupported(merchantId: String): Boolean = withMerchantId(merchantId).isDefined
def withMerchantId(targetMerchantId: String): Option[MerchantDetails] = allMerchants.find(_.merchantId == targetMerchantId)
case class MerchantDetails(merchantId: String, accountNumber: String, taxType: String, isCreditCard: Boolean) extends Val(nextId, merchantId) {
val isDebitCard = !isCreditCard
}
}
case class EmisReport(header: Header, merchants: Seq[Merchant])
case class Header(transactionCount:Int)
case class Merchant(header: Option[MerchantHeader], outlet: MerchantOutlet, transactions: Seq[Transaction])
case class MerchantHeader()
case class MerchantOutlet(merchantId: String, acceptedSalesValue: Long, acceptedSalesCount:Long, tradingDate: LocalDate)
case class Transaction(data: TransactionData, supplementaryData: TransactionSuppData)
case class TransactionData(amountInPence: Int, date: LocalDate, status: PaymentStatus.Value, transactionType:TransactionType.Value)
case class TransactionSuppData(originatorsReference: String)
object PaymentStatus extends Enumeration {
type Status = Value
val Accepted = Value("A")
val Pending = Value("P")
val Rejected = Value("R")
}
object TransactionType extends Enumeration {
type Status = Value
val Purchase = Value(0)
val Refund = Value(5)
val CashBack = Value(3)
val CashAdvance = Value(2)
}
| hmrc/worldpay-downloader | app/model/emisReportModel.scala | Scala | apache-2.0 | 4,185 |
package com.sksamuel.scrimage.canvas
import java.awt.{AlphaComposite, RenderingHints, Graphics2D}
import com.sksamuel.scrimage.{Image, Color, Filter}
/**
* Places a watermark at a given location.
*/
class WatermarkFilter(text: String,
x: Int,
y: Int,
color: Color = Color.White,
antiAlias: Boolean = true,
size: Int = 18,
font: Font = Font.SansSerif,
alpha: Double = 0.1)
extends Filter {
require(size > 0, "Font size must be > 0")
private def setupGraphics(g2: Graphics2D): Unit = {
if (antiAlias)
g2.setRenderingHint(RenderingHints.KEY_TEXT_ANTIALIASING, RenderingHints.VALUE_TEXT_ANTIALIAS_ON)
g2.setColor(color)
val alphaComposite = AlphaComposite.getInstance(AlphaComposite.SRC_OVER, alpha.toFloat)
g2.setComposite(alphaComposite)
g2.setFont(new java.awt.Font(font.name, 0, size))
}
override def apply(image: Image): Unit = {
val g2 = image.awt.getGraphics.asInstanceOf[Graphics2D]
setupGraphics(g2)
g2.drawString(text, x, y)
g2.dispose()
}
}
| carlosFattor/scrimage | scrimage-core/src/main/scala/com/sksamuel/scrimage/canvas/WatermarkFilter.scala | Scala | apache-2.0 | 1,164 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.{Timestamp, Date}
import java.util.{TimeZone, Calendar}
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* Test suite for data type casting expression [[Cast]].
* ๆฐๆฎ็ฑปๅ่ฝฌๆข่กจ่พพๅผ[[Cast]]็ๆต่ฏๅฅไปถ
*/
class CastSuite extends SparkFunSuite with ExpressionEvalHelper {
private def cast(v: Any, targetType: DataType): Cast = {
v match {
case lit: Expression => Cast(lit, targetType)
case _ => Cast(Literal(v), targetType)
}
}
// expected cannot be null
//้ขๆไธ่ฝไธบ็ฉบ
private def checkCast(v: Any, expected: Any): Unit = {
checkEvaluation(cast(v, Literal(expected).dataType), expected)
}
private def checkNullCast(from: DataType, to: DataType): Unit = {
checkEvaluation(Cast(Literal.create(null, from), to), null)
}
test("null cast") {
import DataTypeTestUtils._
// follow [[org.apache.spark.sql.catalyst.expressions.Cast.canCast]] logic
// to ensure we test every possible cast situation here
//้ตๅพช[[org.apache.spark.sql.catalyst.expressions.Cast.canCast]]้ป่พ,ไปฅ็กฎไฟๆไปฌๆต่ฏๆฏไธไธชๅฏ่ฝ็ๆ
ๅตๅจ่ฟ้
atomicTypes.zip(atomicTypes).foreach { case (from, to) =>
checkNullCast(from, to)
}
atomicTypes.foreach(dt => checkNullCast(NullType, dt))
atomicTypes.foreach(dt => checkNullCast(dt, StringType))
checkNullCast(StringType, BinaryType)
checkNullCast(StringType, BooleanType)
checkNullCast(DateType, BooleanType)
checkNullCast(TimestampType, BooleanType)
numericTypes.foreach(dt => checkNullCast(dt, BooleanType))
checkNullCast(StringType, TimestampType)
checkNullCast(BooleanType, TimestampType)
checkNullCast(DateType, TimestampType)
numericTypes.foreach(dt => checkNullCast(dt, TimestampType))
atomicTypes.foreach(dt => checkNullCast(dt, DateType))
checkNullCast(StringType, CalendarIntervalType)
numericTypes.foreach(dt => checkNullCast(StringType, dt))
numericTypes.foreach(dt => checkNullCast(BooleanType, dt))
numericTypes.foreach(dt => checkNullCast(DateType, dt))
numericTypes.foreach(dt => checkNullCast(TimestampType, dt))
for (from <- numericTypes; to <- numericTypes) checkNullCast(from, to)
}
//ๅฐๅญ็ฌฆไธฒ่ฝฌๆขไธบๆฅๆ
test("cast string to date") {
var c = Calendar.getInstance()
c.set(2015, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015"), DateType), new Date(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03"), DateType), new Date(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 18, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18"), DateType), new Date(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 "), DateType), new Date(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 123142"), DateType), new Date(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T123123"), DateType), new Date(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T"), DateType), new Date(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18X"), DateType), null)
checkEvaluation(Cast(Literal("2015/03/18"), DateType), null)
checkEvaluation(Cast(Literal("2015.03.18"), DateType), null)
checkEvaluation(Cast(Literal("20150318"), DateType), null)
checkEvaluation(Cast(Literal("2015-031-8"), DateType), null)
}
//ๅฐๅญ็ฌฆไธฒ่ฝฌๆขไธบๆถ้ดๆณ
test("cast string to timestamp") {
checkEvaluation(Cast(Literal("123"), TimestampType), null)
var c = Calendar.getInstance()
c.set(2015, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 18, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 "), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18 12:03:17"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T12:03:17"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17Z"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 12:03:17Z"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT-01:00"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17-1:0"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T12:03:17-01:00"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17+07:30"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:03"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17+7:3"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance()
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(Cast(Literal("2015-03-18 12:03:17.123"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.123"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("UTC"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 456)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.456Z"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 12:03:17.456Z"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT-01:00"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.123-1:0"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.123-01:00"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.123+07:30"), TimestampType),
new Timestamp(c.getTimeInMillis))
c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:03"))
c.set(2015, 2, 18, 12, 3, 17)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17.123+7:3"), TimestampType),
new Timestamp(c.getTimeInMillis))
checkEvaluation(Cast(Literal("2015-03-18 123142"), TimestampType), null)
checkEvaluation(Cast(Literal("2015-03-18T123123"), TimestampType), null)
checkEvaluation(Cast(Literal("2015-03-18X"), TimestampType), null)
checkEvaluation(Cast(Literal("2015/03/18"), TimestampType), null)
checkEvaluation(Cast(Literal("2015.03.18"), TimestampType), null)
checkEvaluation(Cast(Literal("20150318"), TimestampType), null)
checkEvaluation(Cast(Literal("2015-031-8"), TimestampType), null)
checkEvaluation(Cast(Literal("2015-03-18T12:03:17-0:70"), TimestampType), null)
}
test("cast from int") {
checkCast(0, false)
checkCast(1, true)
checkCast(-5, true)
checkCast(1, 1.toByte)
checkCast(1, 1.toShort)
checkCast(1, 1)
checkCast(1, 1.toLong)
checkCast(1, 1.0f)
checkCast(1, 1.0)
checkCast(123, "123")
checkEvaluation(cast(123, DecimalType.USER_DEFAULT), Decimal(123))
checkEvaluation(cast(123, DecimalType(3, 0)), Decimal(123))
checkEvaluation(cast(123, DecimalType(3, 1)), null)
checkEvaluation(cast(123, DecimalType(2, 0)), null)
}
test("cast from long") {
checkCast(0L, false)
checkCast(1L, true)
checkCast(-5L, true)
checkCast(1L, 1.toByte)
checkCast(1L, 1.toShort)
checkCast(1L, 1)
checkCast(1L, 1.toLong)
checkCast(1L, 1.0f)
checkCast(1L, 1.0)
checkCast(123L, "123")
checkEvaluation(cast(123L, DecimalType.USER_DEFAULT), Decimal(123))
checkEvaluation(cast(123L, DecimalType(3, 0)), Decimal(123))
checkEvaluation(cast(123L, DecimalType(3, 1)), null)
checkEvaluation(cast(123L, DecimalType(2, 0)), null)
}
test("cast from boolean") {
checkEvaluation(cast(true, IntegerType), 1)
checkEvaluation(cast(false, IntegerType), 0)
checkEvaluation(cast(true, StringType), "true")
checkEvaluation(cast(false, StringType), "false")
checkEvaluation(cast(cast(1, BooleanType), IntegerType), 1)
checkEvaluation(cast(cast(0, BooleanType), IntegerType), 0)
}
test("cast from int 2") {
checkEvaluation(cast(1, LongType), 1.toLong)
checkEvaluation(cast(cast(1000, TimestampType), LongType), 1.toLong)
checkEvaluation(cast(cast(-1200, TimestampType), LongType), -2.toLong)
checkEvaluation(cast(123, DecimalType.USER_DEFAULT), Decimal(123))
checkEvaluation(cast(123, DecimalType(3, 0)), Decimal(123))
checkEvaluation(cast(123, DecimalType(3, 1)), null)
checkEvaluation(cast(123, DecimalType(2, 0)), null)
}
test("cast from float") {
checkCast(0.0f, false)
checkCast(0.5f, true)
checkCast(-5.0f, true)
checkCast(1.5f, 1.toByte)
checkCast(1.5f, 1.toShort)
checkCast(1.5f, 1)
checkCast(1.5f, 1.toLong)
checkCast(1.5f, 1.5)
checkCast(1.5f, "1.5")
}
test("cast from double") {
checkCast(0.0, false)
checkCast(0.5, true)
checkCast(-5.0, true)
checkCast(1.5, 1.toByte)
checkCast(1.5, 1.toShort)
checkCast(1.5, 1)
checkCast(1.5, 1.toLong)
checkCast(1.5, 1.5f)
checkCast(1.5, "1.5")
checkEvaluation(cast(cast(1.toDouble, TimestampType), DoubleType), 1.toDouble)
checkEvaluation(cast(cast(1.toDouble, TimestampType), DoubleType), 1.toDouble)
}
//ๅญ็ฌฆไธฒ่ฝฌๆข
test("cast from string") {
assert(cast("abcdef", StringType).nullable === false)
assert(cast("abcdef", BinaryType).nullable === false)
assert(cast("abcdef", BooleanType).nullable === false)
assert(cast("abcdef", TimestampType).nullable === true)
assert(cast("abcdef", LongType).nullable === true)
assert(cast("abcdef", IntegerType).nullable === true)
assert(cast("abcdef", ShortType).nullable === true)
assert(cast("abcdef", ByteType).nullable === true)
assert(cast("abcdef", DecimalType.USER_DEFAULT).nullable === true)
assert(cast("abcdef", DecimalType(4, 2)).nullable === true)
assert(cast("abcdef", DoubleType).nullable === true)
assert(cast("abcdef", FloatType).nullable === true)
}
//ๆฅๆ็ฑปๅ่ฝฌๆข
test("data type casting") {
val sd = "1970-01-01"
val d = Date.valueOf(sd)
val zts = sd + " 00:00:00"
val sts = sd + " 00:00:02"
val nts = sts + ".1"
val ts = Timestamp.valueOf(nts)
var c = Calendar.getInstance()
c.set(2015, 2, 8, 2, 30, 0)
checkEvaluation(cast(cast(new Timestamp(c.getTimeInMillis), StringType), TimestampType),
c.getTimeInMillis * 1000)
c = Calendar.getInstance()
c.set(2015, 10, 1, 2, 30, 0)
checkEvaluation(cast(cast(new Timestamp(c.getTimeInMillis), StringType), TimestampType),
c.getTimeInMillis * 1000)
checkEvaluation(cast("abdef", StringType), "abdef")
checkEvaluation(cast("abdef", DecimalType.USER_DEFAULT), null)
checkEvaluation(cast("abdef", TimestampType), null)
checkEvaluation(cast("12.65", DecimalType.SYSTEM_DEFAULT), Decimal(12.65))
checkEvaluation(cast(cast(sd, DateType), StringType), sd)
checkEvaluation(cast(cast(d, StringType), DateType), 0)
checkEvaluation(cast(cast(nts, TimestampType), StringType), nts)
checkEvaluation(cast(cast(ts, StringType), TimestampType), DateTimeUtils.fromJavaTimestamp(ts))
// all convert to string type to check
//ๅ
จ้จ่ฝฌๆขไธบๅญ็ฌฆไธฒ็ฑปๅ่ฟ่กๆฃๆฅ
checkEvaluation(cast(cast(cast(nts, TimestampType), DateType), StringType), sd)
checkEvaluation(cast(cast(cast(ts, DateType), TimestampType), StringType), zts)
checkEvaluation(cast(cast("abdef", BinaryType), StringType), "abdef")
checkEvaluation(cast(cast(cast(cast(
cast(cast("5", ByteType), ShortType), IntegerType), FloatType), DoubleType), LongType),
5.toLong)
checkEvaluation(
cast(cast(cast(cast(cast(cast("5", ByteType), TimestampType),
DecimalType.SYSTEM_DEFAULT), LongType), StringType), ShortType),
0.toShort)
checkEvaluation(
cast(cast(cast(cast(cast(cast("5", TimestampType), ByteType),
DecimalType.SYSTEM_DEFAULT), LongType), StringType), ShortType),
null)
checkEvaluation(cast(cast(cast(cast(cast(cast("5", DecimalType.SYSTEM_DEFAULT),
ByteType), TimestampType), LongType), StringType), ShortType),
0.toShort)
checkEvaluation(cast("23", DoubleType), 23d)
checkEvaluation(cast("23", IntegerType), 23)
checkEvaluation(cast("23", FloatType), 23f)
checkEvaluation(cast("23", DecimalType.USER_DEFAULT), Decimal(23))
checkEvaluation(cast("23", ByteType), 23.toByte)
checkEvaluation(cast("23", ShortType), 23.toShort)
checkEvaluation(cast("2012-12-11", DoubleType), null)
checkEvaluation(cast(123, IntegerType), 123)
checkEvaluation(cast(Literal.create(null, IntegerType), ShortType), null)
}
test("cast and add") {
checkEvaluation(Add(Literal(23d), cast(true, DoubleType)), 24d)
checkEvaluation(Add(Literal(23), cast(true, IntegerType)), 24)
checkEvaluation(Add(Literal(23f), cast(true, FloatType)), 24f)
checkEvaluation(Add(Literal(Decimal(23)), cast(true, DecimalType.USER_DEFAULT)), Decimal(24))
checkEvaluation(Add(Literal(23.toByte), cast(true, ByteType)), 24.toByte)
checkEvaluation(Add(Literal(23.toShort), cast(true, ShortType)), 24.toShort)
}
test("from decimal") {
checkCast(Decimal(0.0), false)
checkCast(Decimal(0.5), true)
checkCast(Decimal(-5.0), true)
checkCast(Decimal(1.5), 1.toByte)
checkCast(Decimal(1.5), 1.toShort)
checkCast(Decimal(1.5), 1)
checkCast(Decimal(1.5), 1.toLong)
checkCast(Decimal(1.5), 1.5f)
checkCast(Decimal(1.5), 1.5)
checkCast(Decimal(1.5), "1.5")
}
//่ฝฌๆขๅบๅฎ็ฒพๅบฆๅฐๆฐ
test("casting to fixed-precision decimals") {
// Overflow and rounding for casting to fixed-precision decimals:
// - Values should round with HALF_UP mode by default when you lower scale
// - Values that would overflow the target precision should turn into null
// - Because of this, casts to fixed-precision decimals should be nullable
assert(cast(123, DecimalType.USER_DEFAULT).nullable === true)
assert(cast(10.03f, DecimalType.SYSTEM_DEFAULT).nullable === true)
assert(cast(10.03, DecimalType.SYSTEM_DEFAULT).nullable === true)
assert(cast(Decimal(10.03), DecimalType.SYSTEM_DEFAULT).nullable === true)
assert(cast(123, DecimalType(2, 1)).nullable === true)
assert(cast(10.03f, DecimalType(2, 1)).nullable === true)
assert(cast(10.03, DecimalType(2, 1)).nullable === true)
assert(cast(Decimal(10.03), DecimalType(2, 1)).nullable === true)
checkEvaluation(cast(10.03, DecimalType.SYSTEM_DEFAULT), Decimal(10.03))
checkEvaluation(cast(10.03, DecimalType(4, 2)), Decimal(10.03))
checkEvaluation(cast(10.03, DecimalType(3, 1)), Decimal(10.0))
checkEvaluation(cast(10.03, DecimalType(2, 0)), Decimal(10))
checkEvaluation(cast(10.03, DecimalType(1, 0)), null)
checkEvaluation(cast(10.03, DecimalType(2, 1)), null)
checkEvaluation(cast(10.03, DecimalType(3, 2)), null)
checkEvaluation(cast(Decimal(10.03), DecimalType(3, 1)), Decimal(10.0))
checkEvaluation(cast(Decimal(10.03), DecimalType(3, 2)), null)
checkEvaluation(cast(10.05, DecimalType.SYSTEM_DEFAULT), Decimal(10.05))
checkEvaluation(cast(10.05, DecimalType(4, 2)), Decimal(10.05))
checkEvaluation(cast(10.05, DecimalType(3, 1)), Decimal(10.1))
checkEvaluation(cast(10.05, DecimalType(2, 0)), Decimal(10))
checkEvaluation(cast(10.05, DecimalType(1, 0)), null)
checkEvaluation(cast(10.05, DecimalType(2, 1)), null)
checkEvaluation(cast(10.05, DecimalType(3, 2)), null)
checkEvaluation(cast(Decimal(10.05), DecimalType(3, 1)), Decimal(10.1))
checkEvaluation(cast(Decimal(10.05), DecimalType(3, 2)), null)
checkEvaluation(cast(9.95, DecimalType(3, 2)), Decimal(9.95))
checkEvaluation(cast(9.95, DecimalType(3, 1)), Decimal(10.0))
checkEvaluation(cast(9.95, DecimalType(2, 0)), Decimal(10))
checkEvaluation(cast(9.95, DecimalType(2, 1)), null)
checkEvaluation(cast(9.95, DecimalType(1, 0)), null)
checkEvaluation(cast(Decimal(9.95), DecimalType(3, 1)), Decimal(10.0))
checkEvaluation(cast(Decimal(9.95), DecimalType(1, 0)), null)
checkEvaluation(cast(-9.95, DecimalType(3, 2)), Decimal(-9.95))
checkEvaluation(cast(-9.95, DecimalType(3, 1)), Decimal(-10.0))
checkEvaluation(cast(-9.95, DecimalType(2, 0)), Decimal(-10))
checkEvaluation(cast(-9.95, DecimalType(2, 1)), null)
checkEvaluation(cast(-9.95, DecimalType(1, 0)), null)
checkEvaluation(cast(Decimal(-9.95), DecimalType(3, 1)), Decimal(-10.0))
checkEvaluation(cast(Decimal(-9.95), DecimalType(1, 0)), null)
checkEvaluation(cast(Double.NaN, DecimalType.SYSTEM_DEFAULT), null)
checkEvaluation(cast(1.0 / 0.0, DecimalType.SYSTEM_DEFAULT), null)
checkEvaluation(cast(Float.NaN, DecimalType.SYSTEM_DEFAULT), null)
checkEvaluation(cast(1.0f / 0.0f, DecimalType.SYSTEM_DEFAULT), null)
checkEvaluation(cast(Double.NaN, DecimalType(2, 1)), null)
checkEvaluation(cast(1.0 / 0.0, DecimalType(2, 1)), null)
checkEvaluation(cast(Float.NaN, DecimalType(2, 1)), null)
checkEvaluation(cast(1.0f / 0.0f, DecimalType(2, 1)), null)
}
test("cast from date") {
val d = Date.valueOf("1970-01-01")
checkEvaluation(cast(d, ShortType), null)
checkEvaluation(cast(d, IntegerType), null)
checkEvaluation(cast(d, LongType), null)
checkEvaluation(cast(d, FloatType), null)
checkEvaluation(cast(d, DoubleType), null)
checkEvaluation(cast(d, DecimalType.SYSTEM_DEFAULT), null)
checkEvaluation(cast(d, DecimalType(10, 2)), null)
checkEvaluation(cast(d, StringType), "1970-01-01")
checkEvaluation(cast(cast(d, TimestampType), StringType), "1970-01-01 00:00:00")
}
test("cast from timestamp") {
val millis = 15 * 1000 + 3
val seconds = millis * 1000 + 3
val ts = new Timestamp(millis)
val tss = new Timestamp(seconds)
checkEvaluation(cast(ts, ShortType), 15.toShort)
checkEvaluation(cast(ts, IntegerType), 15)
checkEvaluation(cast(ts, LongType), 15.toLong)
checkEvaluation(cast(ts, FloatType), 15.003f)
checkEvaluation(cast(ts, DoubleType), 15.003)
checkEvaluation(cast(cast(tss, ShortType), TimestampType), DateTimeUtils.fromJavaTimestamp(ts))
checkEvaluation(cast(cast(tss, IntegerType), TimestampType),
DateTimeUtils.fromJavaTimestamp(ts))
checkEvaluation(cast(cast(tss, LongType), TimestampType), DateTimeUtils.fromJavaTimestamp(ts))
checkEvaluation(
cast(cast(millis.toFloat / 1000, TimestampType), FloatType),
millis.toFloat / 1000)
checkEvaluation(
cast(cast(millis.toDouble / 1000, TimestampType), DoubleType),
millis.toDouble / 1000)
checkEvaluation(
cast(cast(Decimal(1), TimestampType), DecimalType.SYSTEM_DEFAULT),
Decimal(1))
// A test for higher precision than millis
checkEvaluation(cast(cast(0.000001, TimestampType), DoubleType), 0.000001)
checkEvaluation(cast(Double.NaN, TimestampType), null)
checkEvaluation(cast(1.0 / 0.0, TimestampType), null)
checkEvaluation(cast(Float.NaN, TimestampType), null)
checkEvaluation(cast(1.0f / 0.0f, TimestampType), null)
}
test("cast from array") {
val array = Literal.create(Seq("123", "abc", "", null),
ArrayType(StringType, containsNull = true))
val array_notNull = Literal.create(Seq("123", "abc", ""),
ArrayType(StringType, containsNull = false))
checkNullCast(ArrayType(StringType), ArrayType(IntegerType))
{
val ret = cast(array, ArrayType(IntegerType, containsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Seq(123, null, null, null))
}
{
val ret = cast(array, ArrayType(IntegerType, containsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(array, ArrayType(BooleanType, containsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Seq(true, true, false, null))
}
{
val ret = cast(array, ArrayType(BooleanType, containsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(array_notNull, ArrayType(IntegerType, containsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Seq(123, null, null))
}
{
val ret = cast(array_notNull, ArrayType(IntegerType, containsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(array_notNull, ArrayType(BooleanType, containsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Seq(true, true, false))
}
{
val ret = cast(array_notNull, ArrayType(BooleanType, containsNull = false))
assert(ret.resolved === true)
checkEvaluation(ret, Seq(true, true, false))
}
{
val ret = cast(array, IntegerType)
assert(ret.resolved === false)
}
}
test("cast from map") {
val map = Literal.create(
Map("a" -> "123", "b" -> "abc", "c" -> "", "d" -> null),
MapType(StringType, StringType, valueContainsNull = true))
val map_notNull = Literal.create(
Map("a" -> "123", "b" -> "abc", "c" -> ""),
MapType(StringType, StringType, valueContainsNull = false))
checkNullCast(MapType(StringType, IntegerType), MapType(StringType, StringType))
{
val ret = cast(map, MapType(StringType, IntegerType, valueContainsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Map("a" -> 123, "b" -> null, "c" -> null, "d" -> null))
}
{
val ret = cast(map, MapType(StringType, IntegerType, valueContainsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(map, MapType(StringType, BooleanType, valueContainsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Map("a" -> true, "b" -> true, "c" -> false, "d" -> null))
}
{
val ret = cast(map, MapType(StringType, BooleanType, valueContainsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(map, MapType(IntegerType, StringType, valueContainsNull = true))
assert(ret.resolved === false)
}
{
val ret = cast(map_notNull, MapType(StringType, IntegerType, valueContainsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Map("a" -> 123, "b" -> null, "c" -> null))
}
{
val ret = cast(map_notNull, MapType(StringType, IntegerType, valueContainsNull = false))
assert(ret.resolved === false)
}
{
val ret = cast(map_notNull, MapType(StringType, BooleanType, valueContainsNull = true))
assert(ret.resolved === true)
checkEvaluation(ret, Map("a" -> true, "b" -> true, "c" -> false))
}
{
val ret = cast(map_notNull, MapType(StringType, BooleanType, valueContainsNull = false))
assert(ret.resolved === true)
checkEvaluation(ret, Map("a" -> true, "b" -> true, "c" -> false))
}
{
val ret = cast(map_notNull, MapType(IntegerType, StringType, valueContainsNull = true))
assert(ret.resolved === false)
}
{
val ret = cast(map, IntegerType)
assert(ret.resolved === false)
}
}
test("cast from struct") {
checkNullCast(
StructType(Seq(
StructField("a", StringType),
StructField("b", IntegerType))),
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))))
val struct = Literal.create(
InternalRow(
UTF8String.fromString("123"),
UTF8String.fromString("abc"),
UTF8String.fromString(""),
null),
StructType(Seq(
StructField("a", StringType, nullable = true),
StructField("b", StringType, nullable = true),
StructField("c", StringType, nullable = true),
StructField("d", StringType, nullable = true))))
val struct_notNull = Literal.create(
InternalRow(
UTF8String.fromString("123"),
UTF8String.fromString("abc"),
UTF8String.fromString("")),
StructType(Seq(
StructField("a", StringType, nullable = false),
StructField("b", StringType, nullable = false),
StructField("c", StringType, nullable = false))))
{
val ret = cast(struct, StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", IntegerType, nullable = true),
StructField("c", IntegerType, nullable = true),
StructField("d", IntegerType, nullable = true))))
assert(ret.resolved === true)
checkEvaluation(ret, InternalRow(123, null, null, null))
}
{
val ret = cast(struct, StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", IntegerType, nullable = true),
StructField("c", IntegerType, nullable = false),
StructField("d", IntegerType, nullable = true))))
assert(ret.resolved === false)
}
{
val ret = cast(struct, StructType(Seq(
StructField("a", BooleanType, nullable = true),
StructField("b", BooleanType, nullable = true),
StructField("c", BooleanType, nullable = true),
StructField("d", BooleanType, nullable = true))))
assert(ret.resolved === true)
checkEvaluation(ret, InternalRow(true, true, false, null))
}
{
val ret = cast(struct, StructType(Seq(
StructField("a", BooleanType, nullable = true),
StructField("b", BooleanType, nullable = true),
StructField("c", BooleanType, nullable = false),
StructField("d", BooleanType, nullable = true))))
assert(ret.resolved === false)
}
{
val ret = cast(struct_notNull, StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", IntegerType, nullable = true),
StructField("c", IntegerType, nullable = true))))
assert(ret.resolved === true)
checkEvaluation(ret, InternalRow(123, null, null))
}
{
val ret = cast(struct_notNull, StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", IntegerType, nullable = true),
StructField("c", IntegerType, nullable = false))))
assert(ret.resolved === false)
}
{
val ret = cast(struct_notNull, StructType(Seq(
StructField("a", BooleanType, nullable = true),
StructField("b", BooleanType, nullable = true),
StructField("c", BooleanType, nullable = true))))
assert(ret.resolved === true)
checkEvaluation(ret, InternalRow(true, true, false))
}
{
val ret = cast(struct_notNull, StructType(Seq(
StructField("a", BooleanType, nullable = true),
StructField("b", BooleanType, nullable = true),
StructField("c", BooleanType, nullable = false))))
assert(ret.resolved === true)
checkEvaluation(ret, InternalRow(true, true, false))
}
{
val ret = cast(struct, StructType(Seq(
StructField("a", StringType, nullable = true),
StructField("b", StringType, nullable = true),
StructField("c", StringType, nullable = true))))
assert(ret.resolved === false)
}
{
val ret = cast(struct, IntegerType)
assert(ret.resolved === false)
}
}
test("complex casting") {
val complex = Literal.create(
Row(
Seq("123", "abc", ""),
Map("a" ->"123", "b" -> "abc", "c" -> ""),
Row(0)),
StructType(Seq(
StructField("a",
ArrayType(StringType, containsNull = false), nullable = true),
StructField("m",
MapType(StringType, StringType, valueContainsNull = false), nullable = true),
StructField("s",
StructType(Seq(
StructField("i", IntegerType, nullable = true)))))))
val ret = cast(complex, StructType(Seq(
StructField("a",
ArrayType(IntegerType, containsNull = true), nullable = true),
StructField("m",
MapType(StringType, BooleanType, valueContainsNull = false), nullable = true),
StructField("s",
StructType(Seq(
StructField("l", LongType, nullable = true)))))))
assert(ret.resolved === true)
checkEvaluation(ret, Row(
Seq(123, null, null),
Map("a" -> true, "b" -> true, "c" -> false),
Row(0L)))
}
//ๅญ็ฌฆไธฒไธ้ด้็ๆ
ๅต
test("case between string and interval") {
import org.apache.spark.unsafe.types.CalendarInterval
checkEvaluation(Cast(Literal("interval -3 month 7 hours"), CalendarIntervalType),
new CalendarInterval(-3, 7 * CalendarInterval.MICROS_PER_HOUR))
checkEvaluation(Cast(Literal.create(
new CalendarInterval(15, -3 * CalendarInterval.MICROS_PER_DAY), CalendarIntervalType),
StringType),
"interval 1 years 3 months -3 days")
}
}
| tophua/spark1.52 | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CastSuite.scala | Scala | apache-2.0 | 31,404 |
// Copyright (c) 2016 Marco Marini, [email protected]
//
// Licensed under the MIT License (MIT);
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://opensource.org/licenses/MIT
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
package org.mmarini.actd.samples
import org.apache.commons.math3.random.MersenneTwister
import org.mmarini.actd.Action
import org.mmarini.actd.Feedback
import org.mmarini.actd.Status
import org.mmarini.actd.TDNeuralNet
import org.mmarini.actd.TDParms
import org.mmarini.actd.samples.WallStatus.Direction
import com.typesafe.scalalogging.LazyLogging
import WallStatus.PadAction
import breeze.linalg.DenseVector
import breeze.stats.distributions.RandBasis
/** The status of wall game */
case class WallStatus(ball: (Int, Int), direction: Direction.Value, pad: Int) extends Status {
import PadAction._
import Direction._
import WallStatus._
private val BallDim = Width * Height
private val SpeedDim = 4
private val PadDim = LastPad + 1
private val FinalVector = DenseVector.zeros[Double](BallDim * SpeedDim * PadDim + 1)
FinalVector.update(BallDim * SpeedDim * PadDim, 1.0)
require(ball._2 >= 0)
require(ball._2 <= Width)
require(pad >= 0)
require(pad <= LastPad)
require(ball._1 <= Height)
require(ball._1 >= 0)
require(ball._1 >= 1 || ball._2 == 0 && direction == SE && pad == 1, s"$ball $direction $pad")
/** Transforms the status nto a Vector */
val toDenseVector: DenseVector[Double] = {
if (finalStatus) {
FinalVector
} else {
val v = DenseVector.zeros[Double](BallDim * SpeedDim * PadDim + 1)
val ballIdx = (ball._1 - 1) * Width + ball._2
val speedIdx = direction.id
val idx = ballIdx + speedIdx * BallDim + pad * (BallDim * SpeedDim)
v.update(idx, 1.0)
v
}
}
/** Returns a [[WallStatus]] with changed pad location */
def pad(x: Int): WallStatus = WallStatus(ball, direction, x)
/** Moves the pad by action */
private def movePad(action: Action) = PadAction.apply(action) match {
case Left if pad > 0 => pad - 1
case Right if pad < LastPad => pad + 1
case _ => pad
}
/** Produce the feedback of an applied action */
def apply(action: Action): Feedback = {
val pad1 = movePad(action)
val (s1, reward) = if (finalStatus) {
// Restarts because ball is out of field
(WallStatus.initial, 0.0)
} else {
val nextOpt = StatusMap.get((this, PadAction(action)))
nextOpt.getOrElse(
direction match {
case NO => (WallStatus((ball._1 + 1, ball._2 - 1), direction, pad1), 0.0)
case NE => (WallStatus((ball._1 + 1, ball._2 + 1), direction, pad1), 0.0)
case SO if (ball._1 == 1) => (endStatus, NegativeReward)
case SE if (ball._1 == 1) => (endStatus, NegativeReward)
case SO => (WallStatus((ball._1 - 1, ball._2 - 1), direction, pad1), 0.0)
case SE => (WallStatus((ball._1 - 1, ball._2 + 1), direction, pad1), 0.0)
})
}
Feedback(this, action, reward, s1)
}
/** Returns true if is a final status */
override def finalStatus: Boolean = this == endStatus
}
/** A factory of [[WallStatus]] */
object WallStatus extends LazyLogging {
type TransitionSource = (WallStatus, PadAction.Value)
type TransitionTarget = (WallStatus, Double)
type TransitionMap = Map[TransitionSource, TransitionTarget]
val Height = 10
val Width = 13
val PadSize = 3
val SecondLastRow = Height - 1
val PositiveReward = 5.0
val NegativeReward = -1.0
val LastCol = Width - 1
val SecondLastCol = Width - 2
val LastPad = Width - PadSize
val SecondLastPad = LastPad - 1
val Alpha = 100e-6
val Beta = 0.3
val Gamma = 0.962
val Epsilon = 0.1
// val EpsilonGreedy = 0.9
val EpsilonGreedy = 5e-3
val Lambda = 0e-3
val Eta = 100e-3
val Sigma = 1.0
val Seed = 31415L
val MaxTrainingSamples = 1000
val OutputCount = 3
val HiddenCount = 20
/** MazeAction */
object PadAction extends Enumeration {
val Rest, Left, Right = Value
}
object Direction extends Enumeration {
val NO, NE, SE, SO = Value
}
import PadAction._
import Direction._
val random = new RandBasis(new MersenneTwister(Seed))
val endStatus = WallStatus((0, 0), SE, 1)
/** The state transition map */
val StatusMap = createMap
/** Creates a initial game status */
def initial: WallStatus = {
val b = (1, random.randInt(Width).get)
val s = b match {
case (_, 0) => NE
case (_, LastCol) => NO
case _ => random.choose(Seq(Direction.NE, Direction.NO)).get
}
val pad = b match {
case (_, 0) => 0
case (_, c) if (c >= LastPad) => LastPad
case (_, c) => c - 1
}
WallStatus(b, s, pad)
}
/** Creates a initial environment parameters */
def initEnvParms: (WallStatus, TDParms, TDNeuralNet, TDNeuralNet) = {
val initStatus = WallStatus.initial
val inputCount = initStatus.toDenseVector.length
val parms = TDParms(
alpha = Alpha,
beta = Beta,
gamma = Gamma,
epsilon = EpsilonGreedy,
lambda = Lambda,
eta = Eta,
maxTrainingSamples = MaxTrainingSamples,
random = new RandBasis(new MersenneTwister(Seed)))
val critic = TDNeuralNet(inputCount +: Seq() :+ 1, parms, Sigma)
val actor = TDNeuralNet(inputCount +: Seq() :+ OutputCount, parms, Sigma)
(initStatus, parms, critic, actor)
}
private def validateTx(s: Seq[(TransitionSource, TransitionTarget)]) = {
require(s.size == s.toMap.size, s)
s.toMap
}
private def createTx0 =
validateTx(for {
pad <- (0 to LastPad)
dir <- Seq(NO, NE, SO)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((Height, 0), dir, pad)
val s1 = WallStatus((SecondLastRow, 1), SE, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx1 =
validateTx(for {
pad <- (0 to LastPad)
dir <- Seq(NO, NE, SE)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((Height, LastCol), dir, pad)
val s1 = WallStatus((SecondLastRow, SecondLastCol), SO, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx2 =
validateTx(for {
c <- 1 to SecondLastCol
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((Height, c), NO, pad)
val s1 = WallStatus((SecondLastRow, c - 1), SO, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx3 =
validateTx(for {
c <- 1 to SecondLastCol
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((Height, c), NE, pad)
val s1 = WallStatus((SecondLastRow, c + 1), SE, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx4 =
validateTx(for {
r <- 2 to SecondLastRow
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((r, 0), NO, pad)
val s1 = WallStatus((r + 1, 1), NE, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx5 =
validateTx(for {
r <- 2 to SecondLastRow
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((r, 0), SO, pad)
val s1 = WallStatus((r - 1, 1), SE, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx6 =
validateTx(for {
r <- 2 to SecondLastRow
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((r, LastCol), SE, pad)
val s1 = WallStatus((r - 1, SecondLastCol), SO, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx7 =
validateTx(for {
r <- 2 to SecondLastRow
pad <- (0 to LastPad)
act <- PadAction.values.toSeq
} yield {
val s0 = WallStatus((r, LastCol), NE, pad)
val s1 = WallStatus((r + 1, SecondLastCol), NO, s0.movePad(act.id))
((s0, act), (s1, 0.0))
})
private def createTx8 =
validateTx(for {
pad <- 1 to SecondLastPad
c <- pad to pad + 2
} yield {
val s0 = WallStatus((1, c), SO, pad)
val s1 = WallStatus((2, c - 1), NO, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx9 =
validateTx(for {
pad <- 1 to SecondLastPad
c <- pad to pad + 2
} yield {
val s0 = WallStatus((1, c), SE, pad)
val s1 = WallStatus((2, c + 1), NE, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx10 =
validateTx(for {
pad <- 0 to LastPad - 2
c <- pad + 1 to pad + 3
} yield {
val s0 = WallStatus((1, c), SO, pad)
val s1 = WallStatus((2, c - 1), NO, pad + 1)
((s0, Right), (s1, PositiveReward))
})
private def createTx11 =
validateTx(for {
pad <- 2 to LastPad
c <- pad - 1 to pad + 1
} yield {
val s0 = WallStatus((1, c), SE, pad)
val s1 = WallStatus((2, c + 1), NE, pad - 1)
((s0, Left), (s1, PositiveReward))
})
private def createTx12 =
validateTx(for {
dir <- Seq(SO, SE)
pad <- 0 to 1
} yield {
val s0 = WallStatus((1, 0), dir, pad)
val s1 = WallStatus((2, 1), NE, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx13 =
validateTx(for {
dir <- Seq(SO, SE)
} yield {
val s0 = WallStatus((1, 0), dir, 0)
val s1 = WallStatus((2, 1), NE, 1)
((s0, Right), (s1, PositiveReward))
})
private def createTx14 =
validateTx(for {
pad <- 1 to 2
dir <- Seq(SO, SE)
} yield {
val s0 = WallStatus((1, 0), dir, pad)
val s1 = WallStatus((2, 1), NE, pad - 1)
((s0, Left), (s1, PositiveReward))
})
private def createTx15 =
validateTx(for {
pad <- SecondLastPad to LastPad
dir <- Seq(SO, SE)
} yield {
val s0 = WallStatus((1, LastCol), dir, pad)
val s1 = WallStatus((2, SecondLastCol), NO, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx16 =
validateTx(for {
dir <- Seq(SO, SE)
} yield {
val s0 = WallStatus((1, LastCol), dir, LastPad)
val s1 = WallStatus((2, SecondLastCol), NO, SecondLastPad)
((s0, Left), (s1, PositiveReward))
})
private def createTx17 =
validateTx(for {
pad <- LastPad - 2 to SecondLastPad
dir <- Seq(SO, SE)
} yield {
val s0 = WallStatus((1, LastCol), dir, pad)
val s1 = WallStatus((2, SecondLastCol), NO, pad + 1)
((s0, Right), (s1, PositiveReward))
})
private def createTx18 =
validateTx(for {
pad <- 2 to LastPad
} yield {
val s0 = WallStatus((1, pad - 1), SE, pad)
val s1 = WallStatus((2, pad - 2), NO, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx19 =
validateTx(for {
pad <- 3 to LastPad
} yield {
val s0 = WallStatus((1, pad - 2), SE, pad)
val s1 = WallStatus((2, pad - 3), NO, pad - 1)
((s0, Left), (s1, PositiveReward))
})
private def createTx20 =
validateTx(for {
pad <- 1 to SecondLastPad
} yield {
val s0 = WallStatus((1, pad), SE, pad)
val s1 = WallStatus((2, pad + 1), NE, pad + 1)
((s0, Right), (s1, PositiveReward))
})
private def createTx21 =
validateTx(for {
pad <- 0 to LastPad - 2
} yield {
val s0 = WallStatus((1, pad + PadSize), SO, pad)
val s1 = WallStatus((2, pad + PadSize + 1), NE, pad)
((s0, Rest), (s1, PositiveReward))
})
private def createTx22 =
validateTx(for {
pad <- 0 to LastPad - 3
} yield {
val s0 = WallStatus((1, pad + PadSize + 1), SO, pad)
val s1 = WallStatus((2, pad + PadSize + 2), NE, pad + 1)
((s0, Right), (s1, PositiveReward))
})
private def createTx23 =
validateTx(for {
pad <- 1 to SecondLastPad
} yield {
val s0 = WallStatus((1, pad + PadSize - 1), SO, pad)
val s1 = WallStatus((2, pad + PadSize - 2), NO, pad - 1)
((s0, Left), (s1, PositiveReward))
})
/** Create the map of transitions */
private def createMap: TransitionMap = {
val lm =
createTx0 +:
createTx1 +:
createTx2 +:
createTx3 +:
createTx4 +:
createTx5 +:
createTx6 +:
createTx7 +:
createTx8 +:
createTx9 +:
createTx10 +:
createTx11 +:
createTx12 +:
createTx13 +:
createTx14 +:
createTx15 +:
createTx16 +:
createTx17 +:
createTx18 +:
createTx19 +:
createTx20 +:
createTx21 +:
createTx22 +:
createTx23 +:
Seq()
//
val lmi = lm.zipWithIndex
for {
(li, i) <- lmi
(lj, j) <- lmi
if (j > i)
} {
val inter = li.keySet & lj.keySet
require(inter.isEmpty, s"createTx$i & createTx$j = $inter")
}
val map = lm.reduce(_ ++ _)
require(map.size == lm.map(_.size).sum, s"${map.size} != ${lm.map(_.size).sum}")
map
}
}
| m-marini/actd | src/main/scala/org/mmarini/actd/samples/WallStatus.scala | Scala | mit | 14,199 |
package org.scalaprops
import java.util.logging.Logger
import reflect.Manifest
import serialization.{StandardSerializers, Serializers}
/**
* Can be used to create beans of registered types.
*/
class BeanFactory {
type BeanConstructor = () => _ <: Bean
type BeanCreator = Symbol => _ <: Option[Bean]
/** True if properties not already present in the bean should be added to it when creating it from a map */
var addUnknownProperties = true
private var beanConstructors: Map[Symbol, () => _ <: Bean] = Map()
private val initialBeanCreator: BeanCreator = { (name: Symbol) => beanConstructors.get(name).flatMap(x => Some(x())) }
private var beanCreators: List[Symbol => _ <: Option[Bean]] = List(initialBeanCreator)
private var defaultBeanConstructor: () => _ <: Bean = {() => new PropertyBean()}
def registerBeanType(beanType: Class[_ <: Bean]) { registerBeanType(Symbol(beanType.getSimpleName), () => beanType.newInstance) }
def registerBeanTypes(beanTypes: Seq[Class[_ <: Bean]]) {beanTypes foreach {t => registerBeanType(t)}}
def registerBeanType(typeName: Symbol, createInstance: () => _ <: Bean) { beanConstructors += (typeName -> createInstance) }
def registerBeanTypes(creator: Symbol => _ <: Option[Bean]) { beanCreators ::= creator }
def setDefaultBeanType(createInstance: () => _ <: Bean) {defaultBeanConstructor = createInstance}
def createDefaultBeanInstance(): Bean = defaultBeanConstructor()
def createBeanInstance(typeName: Symbol, allowFallbackToDefault: Boolean = true): Bean = {
var bean: Bean = createBeanWithCreator(typeName)
if (bean == null) {
if (allowFallbackToDefault) {
Logger.getLogger(getClass.getName).fine("No bean creator found for bean type " + typeName + ", using default bean type.")
createDefaultBeanInstance()
}
else throw new IllegalStateException("No bean creator found for bean type '"+typeName+"'.")
}
else bean
}
def createBeanFromMap(propertyValues: Map[Symbol, AnyRef], serializers: Serializers): Bean = {
val bean: Bean = if (propertyValues.contains(Bean.typePropertyName)) createBeanInstance(asSymbol(propertyValues(Bean.typePropertyName)))
else createDefaultBeanInstance()
propertyValues foreach { e =>
val field: Symbol = e._1
var value: AnyRef = e._2
if (field != Bean.typePropertyName) {
if (bean.contains(field)) {
// Do any deserialization if needed:
val kind = bean.properties(field).kind.erasure
if (value.isInstanceOf[Map[Symbol, AnyRef]]) {
// Create contained bean
value = createBeanFromMap(value.asInstanceOf[Map[Symbol, AnyRef]], serializers)
}
else {
// De-serialize primitive values
value = serializers.deserialize(kind, value.toString)
}
bean.set(field, value)
}
else {
if (addUnknownProperties) {
val kind = if (value == null) classOf[String] else value.getClass()
bean.addProperty(field, value)(Manifest.classType(kind))
}
}
}
}
bean
}
private def asSymbol(value: AnyRef): Symbol = {
if (value.isInstanceOf[Symbol]) value.asInstanceOf[Symbol]
else Symbol(value.toString)
}
private def createBeanWithCreator(typeName: Symbol): Bean = {
var bean: Bean = null
beanCreators exists {
bc =>
bc(typeName) match {
case None =>
false
case Some(b) =>
bean = b
bean.beanType_=(typeName)
true
}
}
bean
}
} | zzorn/scalaprops | src/main/scala/org/scalaprops/BeanFactory.scala | Scala | bsd-3-clause | 3,633 |
package com.github.sorhus.webalytics.akka.domain
import java.util.concurrent.TimeUnit
import akka.actor.{ActorRef, Props}
import com.github.sorhus.webalytics.akka.event._
import akka.pattern.ask
import akka.persistence.SnapshotOffer
import akka.util.Timeout
import com.github.sorhus.webalytics.model.Query
import scala.collection.immutable.Iterable
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}
class ReadOnlyDomainActor(audienceActor: ActorRef) extends TDomainActor {
implicit val timeout = Timeout(10, TimeUnit.SECONDS)
import concurrent.ExecutionContext.Implicits.global
override def receiveCommand: Receive = {
case query: Query =>
log.info("received query {}", query)
val space = state.get(query.dimensions)
log.info("space is {}", space)
audienceActor forward QueryCommand(query, space)
case i: LoadImmutable =>
// val f: Iterable[Future[Any]] = state.data.map{case(bucket, space) =>
// log.info("Passing on space {}", space)
// audienceActor ? i.copy(space = Some(space))
// }
val space = state.getAll
log.info("Passing on space {}", space)
val f = audienceActor ? i.copy(space = Some(space))
// Try(Await.result(Future.sequence(f), Duration.Inf)) match {
// case Success(list) if list.forall(_ == Ack) =>
// log.info("successful init {}",list)
// sender() ! Ack
// case Failure(e) =>
// log.warn("failed to Initialize", e)
// sender() ! Nack
// case _ =>
// log.warn("failed to Initialize")
// sender() ! Nack
// }
Try(Await.result(f, Duration.Inf)) match {
case Success(Ack) =>
log.info("successful init")
sender() ! Ack
case _ =>
log.warn("failed to Initialize")
sender() ! Nack
}
case x =>
println(s"received $x")
}
override def receiveRecover: Receive = {
// case SnapshotOffer(_, snapshot: DomainState) =>
case SnapshotOffer(_, snapshot: MutableDomainState) =>
log.info("restoring state from snapshot")
state = snapshot
}
}
object ReadOnlyDomainActor {
def props(audienceDao: ActorRef): Props = Props(new ReadOnlyDomainActor(audienceDao))
}
| sorhus/webalytics | service/src/main/scala/com/github/sorhus/webalytics/akka/domain/ReadOnlyDomainActor.scala | Scala | gpl-3.0 | 2,279 |
package se.lu.nateko.cp.meta.onto.labeler
import org.semanticweb.owlapi.model.{IRI => OwlIri}
import org.semanticweb.owlapi.model.OWLOntology
import se.lu.nateko.cp.meta.instanceserver.InstanceServer
import se.lu.nateko.cp.meta.instanceserver.InstanceServerUtils
import se.lu.nateko.cp.meta.utils.rdf4j._
import org.eclipse.rdf4j.model.IRI
class UniversalLabeler(ontology: OWLOntology) extends InstanceLabeler{
import scala.collection.mutable.Map
private val cache: Map[IRI, InstanceLabeler] = Map()
private[this] val owlFactory = ontology.getOWLOntologyManager.getOWLDataFactory
override def getLabel(instUri: IRI, instServer: InstanceServer): String = {
try{
val theType: IRI = InstanceServerUtils.getSingleType(instUri, instServer)
val theClass = owlFactory.getOWLClass(OwlIri.create(theType.toJava))
val labeler = cache.getOrElseUpdate(theType, ClassIndividualsLabeler(theClass, ontology, this))
labeler.getLabel(instUri, instServer)
} catch{
case _: Throwable =>
super.getLabel(instUri, instServer)
}
}
}
| ICOS-Carbon-Portal/meta | src/main/scala/se/lu/nateko/cp/meta/onto/labeler/UniversalLabeler.scala | Scala | gpl-3.0 | 1,048 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.{createTempDirectory, thisLineNumber}
import java.io.File
class ShouldBeReadableSpec extends Spec with Matchers {
val tempDir = createTempDirectory()
val readableFile = File.createTempFile("delete", "me", tempDir)
readableFile.setReadable(true)
val secretFile = new File(tempDir, "imaginary")
secretFile.setReadable(false)
val fileName: String = "ShouldBeReadableSpec.scala"
def wasNotReadable(left: Any): String =
FailureMessages("wasNotReadable", left)
def wasReadable(left: Any): String =
FailureMessages("wasReadable", left)
def `readableFile should be readable, secretFile should not be readable` {
assert(readableFile.canRead === true)
assert(secretFile.canRead === false)
}
def allError(left: Any, message: String, lineNumber: Int): String = {
val messageWithIndex = UnquotedString(" " + FailureMessages("forAssertionsGenTraversableMessageWithStackDepth", 0, UnquotedString(message), UnquotedString(fileName + ":" + lineNumber)))
FailureMessages("allShorthandFailed", messageWithIndex, left)
}
object `Readable matcher` {
object `when work with 'file should be (readable)'` {
def `should do nothing when file is readable` {
readableFile should be (readable)
}
def `should throw TestFailedException with correct stack depth when file is not readable` {
val caught1 = intercept[TestFailedException] {
secretFile should be (readable)
}
assert(caught1.message === Some(wasNotReadable(secretFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file should not be readable'` {
def `should do nothing when file is not readable` {
secretFile should not be readable
}
def `should throw TestFailedException with correct stack depth when file is readable` {
val caught1 = intercept[TestFailedException] {
readableFile should not be readable
}
assert(caught1.message === Some(wasReadable(readableFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file shouldBe readable'` {
def `should do nothing when file is readable` {
readableFile shouldBe readable
}
def `should throw TestFailedException with correct stack depth when file is not readable` {
val caught1 = intercept[TestFailedException] {
secretFile shouldBe readable
}
assert(caught1.message === Some(wasNotReadable(secretFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file shouldNot be (readable)'` {
def `should do nothing when file is not readable` {
secretFile shouldNot be (readable)
}
def `should throw TestFailedException with correct stack depth when file is readable` {
val caught1 = intercept[TestFailedException] {
readableFile shouldNot be (readable)
}
assert(caught1.message === Some(wasReadable(readableFile)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should be (readable)'` {
def `should do nothing when all(xs) is readable` {
all(List(readableFile)) should be (readable)
}
def `should throw TestFailedException with correct stack depth when all(xs) is not readable` {
val left1 = List(secretFile)
val caught1 = intercept[TestFailedException] {
all(left1) should be (readable)
}
assert(caught1.message === Some(allError(left1, wasNotReadable(secretFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should not be readable'` {
def `should do nothing when all(xs) is not readable` {
all(List(secretFile)) should not be readable
}
def `should throw TestFailedException with correct stack depth when all(xs) is readable` {
val left1 = List(readableFile)
val caught1 = intercept[TestFailedException] {
all(left1) should not be readable
}
assert(caught1.message === Some(allError(left1, wasReadable(readableFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) shouldBe readable'` {
def `should do nothing when all(xs) is readable` {
all(List(readableFile)) shouldBe readable
}
def `should throw TestFailedException with correct stack depth when all(xs) is not readable` {
val left1 = List(secretFile)
val caught1 = intercept[TestFailedException] {
all(left1) shouldBe readable
}
assert(caught1.message === Some(allError(left1, wasNotReadable(secretFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) shouldNot be (readable)'` {
def `should do nothing when all(xs) is not readable` {
all(List(secretFile)) shouldNot be (readable)
}
def `should throw TestFailedException with correct stack depth when all(xs) is readable` {
val left1 = List(readableFile)
val caught1 = intercept[TestFailedException] {
all(left1) shouldNot be (readable)
}
assert(caught1.message === Some(allError(left1, wasReadable(readableFile), thisLineNumber - 2)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
} | travisbrown/scalatest | src/test/scala/org/scalatest/ShouldBeReadableSpec.scala | Scala | apache-2.0 | 7,030 |
package parser.json.detail
import parser.json.GenericJsonParser
import play.api.libs.json.JsValue
import models.Skimbo
import parser.json.providers.GoogleplusWallParser
object GoogleplusDetails extends GenericJsonParser {
override def asSkimbo(json: JsValue): Option[Skimbo] = GoogleplusWallParser.asSkimbo(json)
override def cut(json: JsValue) = List(json)
} | Froggies/Skimbo | app/parser/json/detail/GoogleplusDetails.scala | Scala | agpl-3.0 | 372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.io._
import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.json._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
private[this] sealed trait PathInstruction
private[this] object PathInstruction {
private[expressions] case object Subscript extends PathInstruction
private[expressions] case object Wildcard extends PathInstruction
private[expressions] case object Key extends PathInstruction
private[expressions] case class Index(index: Long) extends PathInstruction
private[expressions] case class Named(name: String) extends PathInstruction
}
private[this] sealed trait WriteStyle
private[this] object WriteStyle {
private[expressions] case object RawStyle extends WriteStyle
private[expressions] case object QuotedStyle extends WriteStyle
private[expressions] case object FlattenStyle extends WriteStyle
}
private[this] object JsonPathParser extends RegexParsers {
import PathInstruction._
def root: Parser[Char] = '$'
def long: Parser[Long] = "\\d+".r ^? {
case x => x.toLong
}
// parse `[*]` and `[123]` subscripts
def subscript: Parser[List[PathInstruction]] =
for {
operand <- '[' ~> ('*' ^^^ Wildcard | long ^^ Index) <~ ']'
} yield {
Subscript :: operand :: Nil
}
// parse `.name` or `['name']` child expressions
def named: Parser[List[PathInstruction]] =
for {
name <- '.' ~> "[^\\.\\[]+".r | "['" ~> "[^\\'\\?]+".r <~ "']"
} yield {
Key :: Named(name) :: Nil
}
// child wildcards: `..`, `.*` or `['*']`
def wildcard: Parser[List[PathInstruction]] =
(".*" | "['*']") ^^^ List(Wildcard)
def node: Parser[List[PathInstruction]] =
wildcard |
named |
subscript
val expression: Parser[List[PathInstruction]] = {
phrase(root ~> rep(node) ^^ (x => x.flatten))
}
def parse(str: String): Option[List[PathInstruction]] = {
this.parseAll(expression, str) match {
case Success(result, _) =>
Some(result)
case NoSuccess(msg, next) =>
None
}
}
}
private[this] object SharedFactory {
val jsonFactory = new JsonFactory()
// Enabled for Hive compatibility
jsonFactory.enable(JsonParser.Feature.ALLOW_UNQUOTED_CONTROL_CHARS)
}
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*/
@ExpressionDescription(
usage = "_FUNC_(json_txt, path) - Extracts a json object from `path`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":"b"}', '$.a');
b
""")
case class GetJsonObject(json: Expression, path: Expression)
extends BinaryExpression with ExpectsInputTypes with CodegenFallback {
import com.fasterxml.jackson.core.JsonToken._
import PathInstruction._
import SharedFactory._
import WriteStyle._
override def left: Expression = json
override def right: Expression = path
override def inputTypes: Seq[DataType] = Seq(StringType, StringType)
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def prettyName: String = "get_json_object"
@transient private lazy val parsedPath = parsePath(path.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val jsonStr = json.eval(input).asInstanceOf[UTF8String]
if (jsonStr == null) {
return null
}
val parsed = if (path.foldable) {
parsedPath
} else {
parsePath(path.eval(input).asInstanceOf[UTF8String])
}
if (parsed.isDefined) {
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, jsonStr)) { parser =>
val output = new ByteArrayOutputStream()
val matched = Utils.tryWithResource(
jsonFactory.createGenerator(output, JsonEncoding.UTF8)) { generator =>
parser.nextToken()
evaluatePath(parser, generator, RawStyle, parsed.get)
}
if (matched) {
UTF8String.fromBytes(output.toByteArray)
} else {
null
}
}
} catch {
case _: JsonProcessingException => null
}
} else {
null
}
}
private def parsePath(path: UTF8String): Option[List[PathInstruction]] = {
if (path != null) {
JsonPathParser.parse(path.toString)
} else {
None
}
}
// advance to the desired array index, assumes to start at the START_ARRAY token
private def arrayIndex(p: JsonParser, f: () => Boolean): Long => Boolean = {
case _ if p.getCurrentToken == END_ARRAY =>
// terminate, nothing has been written
false
case 0 =>
// we've reached the desired index
val dirty = f()
while (p.nextToken() != END_ARRAY) {
// advance the token stream to the end of the array
p.skipChildren()
}
dirty
case i if i > 0 =>
// skip this token and evaluate the next
p.skipChildren()
p.nextToken()
arrayIndex(p, f)(i - 1)
}
/**
* Evaluate a list of JsonPath instructions, returning a bool that indicates if any leaf nodes
* have been written to the generator
*/
private def evaluatePath(
p: JsonParser,
g: JsonGenerator,
style: WriteStyle,
path: List[PathInstruction]): Boolean = {
(p.getCurrentToken, path) match {
case (VALUE_STRING, Nil) if style == RawStyle =>
// there is no array wildcard or slice parent, emit this string without quotes
if (p.hasTextCharacters) {
g.writeRaw(p.getTextCharacters, p.getTextOffset, p.getTextLength)
} else {
g.writeRaw(p.getText)
}
true
case (START_ARRAY, Nil) if style == FlattenStyle =>
// flatten this array into the parent
var dirty = false
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, style, Nil)
}
dirty
case (_, Nil) =>
// general case: just copy the child tree verbatim
g.copyCurrentStructure(p)
true
case (START_OBJECT, Key :: xs) =>
var dirty = false
while (p.nextToken() != END_OBJECT) {
if (dirty) {
// once a match has been found we can skip other fields
p.skipChildren()
} else {
dirty = evaluatePath(p, g, style, xs)
}
}
dirty
case (START_ARRAY, Subscript :: Wildcard :: Subscript :: Wildcard :: xs) =>
// special handling for the non-structure preserving double wildcard behavior in Hive
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
dirty |= evaluatePath(p, g, FlattenStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Wildcard :: xs) if style != QuotedStyle =>
// retain Flatten, otherwise use Quoted... cannot use Raw within an array
val nextStyle = style match {
case RawStyle => QuotedStyle
case FlattenStyle => FlattenStyle
case QuotedStyle => throw new IllegalStateException()
}
// temporarily buffer child matches, the emitted json will need to be
// modified slightly if there is only a single element written
val buffer = new StringWriter()
var dirty = 0
Utils.tryWithResource(jsonFactory.createGenerator(buffer)) { flattenGenerator =>
flattenGenerator.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// track the number of array elements and only emit an outer array if
// we've written more than one element, this matches Hive's behavior
dirty += (if (evaluatePath(p, flattenGenerator, nextStyle, xs)) 1 else 0)
}
flattenGenerator.writeEndArray()
}
val buf = buffer.getBuffer
if (dirty > 1) {
g.writeRawValue(buf.toString)
} else if (dirty == 1) {
// remove outer array tokens
g.writeRawValue(buf.substring(1, buf.length()-1))
} // else do not write anything
dirty > 0
case (START_ARRAY, Subscript :: Wildcard :: xs) =>
var dirty = false
g.writeStartArray()
while (p.nextToken() != END_ARRAY) {
// wildcards can have multiple matches, continually update the dirty count
dirty |= evaluatePath(p, g, QuotedStyle, xs)
}
g.writeEndArray()
dirty
case (START_ARRAY, Subscript :: Index(idx) :: (xs@Subscript :: Wildcard :: _)) =>
p.nextToken()
// we're going to have 1 or more results, switch to QuotedStyle
arrayIndex(p, () => evaluatePath(p, g, QuotedStyle, xs))(idx)
case (START_ARRAY, Subscript :: Index(idx) :: xs) =>
p.nextToken()
arrayIndex(p, () => evaluatePath(p, g, style, xs))(idx)
case (FIELD_NAME, Named(name) :: xs) if p.getCurrentName == name =>
// exact field match
if (p.nextToken() != JsonToken.VALUE_NULL) {
evaluatePath(p, g, style, xs)
} else {
false
}
case (FIELD_NAME, Wildcard :: xs) =>
// wildcard field match
p.nextToken()
evaluatePath(p, g, style, xs)
case _ =>
p.skipChildren()
false
}
}
}
// scalastyle:off line.size.limit line.contains.tab
@ExpressionDescription(
usage = "_FUNC_(jsonStr, p1, p2, ..., pn) - Returns a tuple like the function get_json_object, but it takes multiple names. All the input parameters and output column types are string.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":2}', 'a', 'b');
1 2
""")
// scalastyle:on line.size.limit line.contains.tab
case class JsonTuple(children: Seq[Expression])
extends Generator with CodegenFallback {
import SharedFactory._
override def nullable: Boolean = {
// a row is always returned
false
}
// if processing fails this shared value will be returned
@transient private lazy val nullRow: Seq[InternalRow] =
new GenericInternalRow(Array.ofDim[Any](fieldExpressions.length)) :: Nil
// the json body is the first child
@transient private lazy val jsonExpr: Expression = children.head
// the fields to query are the remaining children
@transient private lazy val fieldExpressions: Seq[Expression] = children.tail
// eagerly evaluate any foldable the field names
@transient private lazy val foldableFieldNames: IndexedSeq[Option[String]] = {
fieldExpressions.map {
case expr if expr.foldable => Option(expr.eval()).map(_.asInstanceOf[UTF8String].toString)
case _ => null
}.toIndexedSeq
}
// and count the number of foldable fields, we'll use this later to optimize evaluation
@transient private lazy val constantFields: Int = foldableFieldNames.count(_ != null)
override def elementSchema: StructType = StructType(fieldExpressions.zipWithIndex.map {
case (_, idx) => StructField(s"c$idx", StringType, nullable = true)
})
override def prettyName: String = "json_tuple"
override def checkInputDataTypes(): TypeCheckResult = {
if (children.length < 2) {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires at least two arguments")
} else if (children.forall(child => StringType.acceptsType(child.dataType))) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(s"$prettyName requires that all arguments are strings")
}
}
override def eval(input: InternalRow): TraversableOnce[InternalRow] = {
val json = jsonExpr.eval(input).asInstanceOf[UTF8String]
if (json == null) {
return nullRow
}
try {
/* We know the bytes are UTF-8 encoded. Pass a Reader to avoid having Jackson
detect character encoding which could fail for some malformed strings */
Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parseRow(parser, input)
}
} catch {
case _: JsonProcessingException =>
nullRow
}
}
private def parseRow(parser: JsonParser, input: InternalRow): Seq[InternalRow] = {
// only objects are supported
if (parser.nextToken() != JsonToken.START_OBJECT) {
return nullRow
}
// evaluate the field names as String rather than UTF8String to
// optimize lookups from the json token, which is also a String
val fieldNames = if (constantFields == fieldExpressions.length) {
// typically the user will provide the field names as foldable expressions
// so we can use the cached copy
foldableFieldNames.map(_.orNull)
} else if (constantFields == 0) {
// none are foldable so all field names need to be evaluated from the input row
fieldExpressions.map(_.eval(input).asInstanceOf[UTF8String].toString)
} else {
// if there is a mix of constant and non-constant expressions
// prefer the cached copy when available
foldableFieldNames.zip(fieldExpressions).map {
case (null, expr) => expr.eval(input).asInstanceOf[UTF8String].toString
case (fieldName, _) => fieldName.orNull
}
}
val row = Array.ofDim[Any](fieldNames.length)
// start reading through the token stream, looking for any requested field names
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken == JsonToken.FIELD_NAME) {
// check to see if this field is desired in the output
val jsonField = parser.getCurrentName
var idx = fieldNames.indexOf(jsonField)
if (idx >= 0) {
// it is, copy the child tree to the correct location in the output row
val output = new ByteArrayOutputStream()
// write the output directly to UTF8 encoded byte array
if (parser.nextToken() != JsonToken.VALUE_NULL) {
Utils.tryWithResource(jsonFactory.createGenerator(output, JsonEncoding.UTF8)) {
generator => copyCurrentStructure(generator, parser)
}
val jsonValue = UTF8String.fromBytes(output.toByteArray)
// SPARK-21804: json_tuple returns null values within repeated columns
// except the first one; so that we need to check the remaining fields.
do {
row(idx) = jsonValue
idx = fieldNames.indexOf(jsonField, idx + 1)
} while (idx >= 0)
}
}
}
// always skip children, it's cheap enough to do even if copyCurrentStructure was called
parser.skipChildren()
}
new GenericInternalRow(row) :: Nil
}
private def copyCurrentStructure(generator: JsonGenerator, parser: JsonParser): Unit = {
parser.getCurrentToken match {
// if the user requests a string field it needs to be returned without enclosing
// quotes which is accomplished via JsonGenerator.writeRaw instead of JsonGenerator.write
case JsonToken.VALUE_STRING if parser.hasTextCharacters =>
// slight optimization to avoid allocating a String instance, though the characters
// still have to be decoded... Jackson doesn't have a way to access the raw bytes
generator.writeRaw(parser.getTextCharacters, parser.getTextOffset, parser.getTextLength)
case JsonToken.VALUE_STRING =>
// the normal String case, pass it through to the output without enclosing quotes
generator.writeRaw(parser.getText)
case JsonToken.VALUE_NULL =>
// a special case that needs to be handled outside of this method.
// if a requested field is null, the result must be null. the easiest
// way to achieve this is just by ignoring null tokens entirely
throw new IllegalStateException("Do not attempt to copy a null field")
case _ =>
// handle other types including objects, arrays, booleans and numbers
generator.copyCurrentStructure(parser)
}
}
}
/**
* Converts an json input string to a [[StructType]], [[ArrayType]] or [[MapType]]
* with the specified schema.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(jsonStr, schema[, options]) - Returns a struct value with the given `jsonStr` and `schema`.",
examples = """
Examples:
> SELECT _FUNC_('{"a":1, "b":0.8}', 'a INT, b DOUBLE');
{"a":1,"b":0.8}
> SELECT _FUNC_('{"time":"26/08/2015"}', 'time Timestamp', map('timestampFormat', 'dd/MM/yyyy'));
{"time":2015-08-26 00:00:00.0}
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class JsonToStructs(
schema: DataType,
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
val forceNullableSchema = SQLConf.get.getConf(SQLConf.FROM_JSON_FORCE_NULLABLE_SCHEMA)
// The JSON input data might be missing certain fields. We force the nullability
// of the user-provided schema to avoid data corruptions. In particular, the parquet-mr encoder
// can generate incorrect files if values are missing in columns declared as non-nullable.
val nullableSchema = if (forceNullableSchema) schema.asNullable else schema
override def nullable: Boolean = true
// Used in `FunctionRegistry`
def this(child: Expression, schema: Expression, options: Map[String, String]) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = options,
child = child,
timeZoneId = None)
def this(child: Expression, schema: Expression) = this(child, schema, Map.empty[String, String])
def this(child: Expression, schema: Expression, options: Expression) =
this(
schema = ExprUtils.evalTypeExpr(schema),
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
override def checkInputDataTypes(): TypeCheckResult = nullableSchema match {
case _: StructType | _: ArrayType | _: MapType =>
super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"Input schema ${nullableSchema.catalogString} must be a struct, an array or a map.")
}
// This converts parsed rows to the desired output by the given schema.
@transient
lazy val converter = nullableSchema match {
case _: StructType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next() else null
case _: ArrayType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getArray(0) else null
case _: MapType =>
(rows: Iterator[InternalRow]) => if (rows.hasNext) rows.next().getMap(0) else null
}
val nameOfCorruptRecord = SQLConf.get.getConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD)
@transient lazy val parser = {
val parsedOptions = new JSONOptions(options, timeZoneId.get, nameOfCorruptRecord)
val mode = parsedOptions.parseMode
if (mode != PermissiveMode && mode != FailFastMode) {
throw new IllegalArgumentException(s"from_json() doesn't support the ${mode.name} mode. " +
s"Acceptable modes are ${PermissiveMode.name} and ${FailFastMode.name}.")
}
val (parserSchema, actualSchema) = nullableSchema match {
case s: StructType =>
ExprUtils.verifyColumnNameOfCorruptRecord(s, parsedOptions.columnNameOfCorruptRecord)
(s, StructType(s.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord)))
case other =>
(StructType(StructField("value", other) :: Nil), other)
}
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = false)
val createParser = CreateJacksonParser.utf8String _
new FailureSafeParser[UTF8String](
input => rawParser.parse(input, createParser, identity[UTF8String]),
mode,
parserSchema,
parsedOptions.columnNameOfCorruptRecord)
}
override def dataType: DataType = nullableSchema
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(json: Any): Any = {
converter(parser.parse(json.asInstanceOf[UTF8String]))
}
override def inputTypes: Seq[AbstractDataType] = StringType :: Nil
override def sql: String = schema match {
case _: MapType => "entries"
case _ => super.sql
}
override def prettyName: String = "from_json"
}
/**
* Converts a [[StructType]], [[ArrayType]] or [[MapType]] to a JSON output string.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(expr[, options]) - Returns a JSON string with a given struct value",
examples = """
Examples:
> SELECT _FUNC_(named_struct('a', 1, 'b', 2));
{"a":1,"b":2}
> SELECT _FUNC_(named_struct('time', to_timestamp('2015-08-26', 'yyyy-MM-dd')), map('timestampFormat', 'dd/MM/yyyy'));
{"time":"26/08/2015"}
> SELECT _FUNC_(array(named_struct('a', 1, 'b', 2)));
[{"a":1,"b":2}]
> SELECT _FUNC_(map('a', named_struct('b', 1)));
{"a":{"b":1}}
> SELECT _FUNC_(map(named_struct('a', 1),named_struct('b', 2)));
{"[1]":{"b":2}}
> SELECT _FUNC_(map('a', 1));
{"a":1}
> SELECT _FUNC_(array((map('a', 1))));
[{"a":1}]
""",
since = "2.2.0")
// scalastyle:on line.size.limit
case class StructsToJson(
options: Map[String, String],
child: Expression,
timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with CodegenFallback with ExpectsInputTypes {
override def nullable: Boolean = true
def this(options: Map[String, String], child: Expression) = this(options, child, None)
// Used in `FunctionRegistry`
def this(child: Expression) = this(Map.empty, child, None)
def this(child: Expression, options: Expression) =
this(
options = ExprUtils.convertToMapData(options),
child = child,
timeZoneId = None)
@transient
lazy val writer = new CharArrayWriter()
@transient
lazy val gen = new JacksonGenerator(
inputSchema, writer, new JSONOptions(options, timeZoneId.get))
@transient
lazy val inputSchema = child.dataType
// This converts rows to the JSON output according to the given schema.
@transient
lazy val converter: Any => UTF8String = {
def getAndReset(): UTF8String = {
gen.flush()
val json = writer.toString
writer.reset()
UTF8String.fromString(json)
}
inputSchema match {
case _: StructType =>
(row: Any) =>
gen.write(row.asInstanceOf[InternalRow])
getAndReset()
case _: ArrayType =>
(arr: Any) =>
gen.write(arr.asInstanceOf[ArrayData])
getAndReset()
case _: MapType =>
(map: Any) =>
gen.write(map.asInstanceOf[MapData])
getAndReset()
}
}
override def dataType: DataType = StringType
override def checkInputDataTypes(): TypeCheckResult = inputSchema match {
case struct: StructType =>
try {
JacksonUtils.verifySchema(struct)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case map: MapType =>
// TODO: let `JacksonUtils.verifySchema` verify a `MapType`
try {
val st = StructType(StructField("a", map) :: Nil)
JacksonUtils.verifySchema(st)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case array: ArrayType =>
try {
JacksonUtils.verifyType(prettyName, array)
TypeCheckResult.TypeCheckSuccess
} catch {
case e: UnsupportedOperationException =>
TypeCheckResult.TypeCheckFailure(e.getMessage)
}
case _ => TypeCheckResult.TypeCheckFailure(
s"Input type ${child.dataType.catalogString} must be a struct, array of structs or " +
"a map or array of map.")
}
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(value: Any): Any = converter(value)
override def inputTypes: Seq[AbstractDataType] = TypeCollection(ArrayType, StructType) :: Nil
override def prettyName: String = "to_json"
}
/**
* A function infers schema of JSON string.
*/
@ExpressionDescription(
usage = "_FUNC_(json[, options]) - Returns schema in the DDL format of JSON string.",
examples = """
Examples:
> SELECT _FUNC_('[{"col":0}]');
array<struct<col:bigint>>
> SELECT _FUNC_('[{"col":01}]', map('allowNumericLeadingZeros', 'true'));
array<struct<col:bigint>>
""",
since = "2.4.0")
case class SchemaOfJson(
child: Expression,
options: Map[String, String])
extends UnaryExpression with CodegenFallback {
def this(child: Expression) = this(child, Map.empty[String, String])
def this(child: Expression, options: Expression) = this(
child = child,
options = ExprUtils.convertToMapData(options))
override def dataType: DataType = StringType
override def nullable: Boolean = false
@transient
private lazy val jsonOptions = new JSONOptions(options, "UTC")
@transient
private lazy val jsonFactory = {
val factory = new JsonFactory()
jsonOptions.setJacksonOptions(factory)
factory
}
@transient
private lazy val jsonInferSchema = new JsonInferSchema(jsonOptions)
@transient
private lazy val json = child.eval().asInstanceOf[UTF8String]
override def checkInputDataTypes(): TypeCheckResult = child match {
case Literal(s, StringType) if s != null => super.checkInputDataTypes()
case _ => TypeCheckResult.TypeCheckFailure(
s"The input json should be a string literal and not null; however, got ${child.sql}.")
}
override def eval(v: InternalRow): Any = {
val dt = Utils.tryWithResource(CreateJacksonParser.utf8String(jsonFactory, json)) { parser =>
parser.nextToken()
jsonInferSchema.inferField(parser)
}
UTF8String.fromString(dt.catalogString)
}
override def prettyName: String = "schema_of_json"
}
| caneGuy/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala | Scala | apache-2.0 | 27,575 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.geotools
import java.io.{File, FileWriter}
import com.typesafe.config.ConfigFactory
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.SimpleFeatureType
case class AttributeDetails(unsafeName: String, index: Int, clazz: String) {
val name = unsafeName.replaceAll("\\W", "_")
def getter: String = s"def $name(): $clazz = sf.getAttribute($index).asInstanceOf[$clazz]"
def optionGetter: String = s"def ${name}Opt(): Option[$clazz] = Option($name())"
def setter: String = s"def set${name.capitalize}(x: $clazz): Unit = sf.setAttribute($index, x)"
}
object AttributeDetails {
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors._
def apply(ad: AttributeDescriptor, sft: SimpleFeatureType): AttributeDetails = {
val majorBinding = classToString(Some(ad.getType.getBinding))
val binding = if (ad.isList) {
val subtype = classToString(Option(ad.getListType()))
s"$majorBinding[$subtype]"
} else if (ad.isMap) {
val types = ad.getMapTypes()
val keyType = classToString(Option(types._1))
val valueType = classToString(Option(types._2))
s"$majorBinding[$keyType,$valueType]"
} else {
majorBinding
}
AttributeDetails(ad.getLocalName, sft.indexOf(ad.getLocalName), binding)
}
private def classToString(clas: Option[Class[_]]) = clas.map(_.getCanonicalName).getOrElse("String")
}
object GenerateFeatureWrappers {
val className = "SimpleFeatureWrappers"
/**
* Builds all implicit classes in a wrapper object
*
* @param sfts
* @param pkg
* @return
*/
def buildAllClasses(sfts: Seq[SimpleFeatureType], pkg: String): String = {
val sb = new StringBuilder()
sb.append(s"package $pkg\n\n")
sb.append(s"object $className {")
sfts.foreach(sft => sb.append("\n\n").append(buildClass(sft, " ")))
sb.append("\n}")
sb.toString()
}
/**
* Builds a single implicit class
*
* @param sft
* @param tab
* @return
*/
def buildClass(sft: SimpleFeatureType, tab: String): String = {
import scala.collection.JavaConversions._
val attrs = sft.getAttributeDescriptors.map(AttributeDetails(_, sft))
val sb = new StringBuilder()
sb.append(s"${tab}implicit class ${sft.getTypeName}")
sb.append("(val sf: org.opengis.feature.simple.SimpleFeature) extends AnyVal {\n")
attrs.foreach { a =>
sb.append("\n")
sb.append(s"$tab ${a.getter}\n")
sb.append(s"$tab ${a.optionGetter}\n")
sb.append(s"$tab ${a.setter}\n")
}
sb.append(
s"""
|$tab def debug(): String = {
|$tab import scala.collection.JavaConversions._
|$tab val sb = new StringBuilder(s"$${sf.getType.getTypeName}:$${sf.getID}")
|$tab sf.getProperties.foreach(p => sb.append(s"|$${p.getName.getLocalPart}=$${p.getValue}"))
|$tab sb.toString()
|$tab }
|""".stripMargin)
sb.append(s"$tab}")
sb.toString()
}
/**
* Recursively looks for configuration files of the pattern 'format-*.conf'
*
* @param file
* @return
*/
def findFormatFiles(file: File): Seq[File] = {
if (!file.isDirectory) {
val name = file.getName
if (name.startsWith("format-") && name.endsWith(".conf")) {
Seq(file)
} else {
Seq.empty
}
} else {
file.listFiles().flatMap(findFormatFiles)
}
}
/**
* Creates implicit wrappers for any typesafe config format files found under src/main/resources
*
* @param args (0) - base directory for the maven project
* (1) - package to place the implicit classes
*/
def main(args: Array[String]): Unit = {
val basedir = args(0)
val packageName = args(1)
assert(basedir != null)
assert(packageName != null)
val folder = new File(basedir + "/src/main/resources")
val resources = Some(folder).filter(_.isDirectory).map(findFormatFiles).getOrElse(Seq.empty).sortBy(_.getName)
val sfts = resources.map(r => SimpleFeatureTypes.createType(ConfigFactory.parseFile(r)))
if (sfts.isEmpty) {
println("No formats found")
} else {
val classFilePath = s"$basedir/src/main/scala/${packageName.replaceAll("\\.", "/")}/$className.scala"
val classFile = new File(classFilePath)
println(s"Writing class file $packageName.$className with formats ${sfts.map(_.getTypeName).mkString(", ")}")
val fw = new FileWriter(classFile)
fw.write(buildAllClasses(sfts, packageName))
fw.flush()
fw.close()
}
}
}
/* Sample output
package com.foo
import org.opengis.feature.simple.SimpleFeature
object SimpleFeatureWrappers {
implicit class mySft(sf: SimpleFeature) extends AnyVal {
def foo(): java.lang.String = sf.getAttribute(0).asInstanceOf[java.lang.String]
def fooOpt(): Option[java.lang.String] = Option(foo())
def setFoo(x: java.lang.String): Unit = sf.setAttribute(0, x)
def lat(): java.lang.Double = sf.getAttribute(1).asInstanceOf[java.lang.Double]
def latOpt(): Option[java.lang.Double] = Option(lat())
def setLat(x: java.lang.Double): Unit = sf.setAttribute(1, x)
def lon(): java.lang.Double = sf.getAttribute(2).asInstanceOf[java.lang.Double]
def lonOpt(): Option[java.lang.Double] = Option(lon())
def setLon(x: java.lang.Double): Unit = sf.setAttribute(2, x)
def geom(): com.vividsolutions.jts.geom.Point = sf.getAttribute(3).asInstanceOf[com.vividsolutions.jts.geom.Point]
def geomOpt(): Option[com.vividsolutions.jts.geom.Point] = Option(geom())
def setGeom(x: com.vividsolutions.jts.geom.Point): Unit = sf.setAttribute(3, x)
}
}
*/
| jahhulbert-ccri/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/GenerateFeatureWrappers.scala | Scala | apache-2.0 | 6,163 |
/* scala-stm - (c) 2009-2011, Stanford University, PPL */
package scala.concurrent.stm
import actors.threadpool.TimeUnit
/** `object TxnExecutor` manages the system-wide default `TxnExecutor`. */
object TxnExecutor {
@volatile private var _default: TxnExecutor = impl.STMImpl.instance
/** Returns the default `TxnExecutor`. */
def defaultAtomic: TxnExecutor = _default
/** Atomically replaces the default `TxnExecutor` with `f(defaultAtomic)`. */
def transformDefault(f: TxnExecutor => TxnExecutor) {
synchronized { _default = f(_default) }
}
val DefaultPostDecisionExceptionHandler = { (status: Txn.Status, x: Throwable) =>
throw x
}
}
/** A `TxnExecutor` is responsible for executing atomic blocks transactionally
* using a set of configuration parameters. Configuration changes are made by
* constructing a new `TxnExecutor` using `withConfig` or `withHint`. The
* new executor may be used immediately, saved and used multiple times, or
* registered as the new system-wide default using
* `TxnExecutor.transformDefault`.
*
* @author Nathan Bronson
*/
trait TxnExecutor {
//////// functionality
/** Executes `block` one or more times until an atomic execution is achieved,
* buffering and/or locking writes so they are not visible until success.
*
* @param block code to execute atomically
* @tparam Z the return type of the atomic block
* @return the value returned from `block` after a successful optimistic
* concurrency attempt
*/
def apply[Z](block: InTxn => Z)(implicit mt: MaybeTxn): Z
/** Atomically executes a transaction that is composed from `blocks` by
* joining with a left-biased `orAtomic` operator. The following two
* examples are equivalent. Using `orAtomic`:
* {{{
* atomic { implicit t =>
* // body A
* } orAtomic { implicit t =>
* // body B
* } ...
* }}}
* Using `oneOf`:
* {{{
* atomic.oneOf( { implicit t: InTxn =>
* // body A
* }, { implicit t: InTxn =>
* // body B
* } )
* }}}
*
* The first block will be attempted in an optimistic transaction until it
* either succeeds, fails with no retry possible (in which case the causing
* exception will be rethrown), or performs a call to `retry`. If a retry
* is requested, then the next block will be attempted in the same fashion.
* If all blocks are explicitly retried then execution resumes at the first
* block, but only after another context has changed some value read by one
* of the attempts.
*
* The left-biasing of the `orAtomic` composition guarantees that if the
* first block does not call `retry`, no other blocks will be executed.
*/
def oneOf[Z](blocks: (InTxn => Z)*)(implicit mt: MaybeTxn): Z
/** (rare) Associates an alternative atomic block with the current thread.
* The next call to `apply` will consider `block` to be an alternative.
* Multiple alternatives may be associated before calling `apply`. Returns
* true if this is the first pushed alternative, false otherwise. This
* method is not usually called directly. Alternative atomic blocks are
* only attempted if the previous alternatives call `retry`.
*
* Note that it is not required that `pushAlternative` be called on the same
* instance of `TxnExecutor` as `apply`, just that they have been derived
* from the same original executor.
*/
def pushAlternative[Z](mt: MaybeTxn, block: InTxn => Z): Boolean
/** Atomically compares and sets two `Ref`s, probably more efficiently then
* the corresponding transaction. Equivalent to {{{
* atomic { implicit t =>
* a() == a0 && b() == b0 && { a() = a1 ; b() = b1 ; true }
* }
* }}}
*/
def compareAndSet[A, B](a: Ref[A], a0: A, a1: A, b: Ref[B], b0: B, b1: B): Boolean
/** Atomically compares and sets two `Ref`s using identity comparison,
* probably more efficiently then the corresponding transaction. Equivalent
* to {{{
* atomic { implicit t =>
* val f = (a() eq a0) && (b() eq b0)
* if (f && (a0 ne a1))
* a() = a1
* if (f && (b0 ne b1))
* b() = b1
* f
* }
* }}}
*/
def compareAndSetIdentity[A <: AnyRef, B <: AnyRef](a: Ref[A], a0: A, a1: A, b: Ref[B], b0: B, b1: B): Boolean
//////// configuration
/** Returns `Some(t)` if `t` is the retry timeout in nanoseconds used by
* this `TxnExecutor`, or `None` otherwise. If the retry timeout is
* `Some(t)` and an atomic block executed by the returned executor blocks
* with `retry` or `retryFor` for more than `t` nanoseconds the retry will
* be cancelled with an `InterruptedException`.
*
* The retry timeout has essentially the same effect as replacing calls to
* `retry` with
* `{ retryFor(timeout, NANOS) ; throw new InterruptedException }`.
* Alternately, `retryFor(timeout)` has roughly the same effect as {{{
* try {
* atomic.withRetryTimeout(timeout) { implicit txn => retry }
* } catch {
* case _: InterruptedException =>
* }
* }}}
*/
def retryTimeoutNanos: Option[Long]
/** Returns a `TxnExecutor` that is identical to this one, except that it has
* a `retryTimeout` of `timeoutNanos`.
*/
def withRetryTimeoutNanos(timeoutNanos: Option[Long]): TxnExecutor
/** Returns a `TxnExecutor` that is identical to this one except that it has
* the specified retry timeout. The default time unit is milliseconds.
*/
def withRetryTimeout(timeout: Long, unit: TimeUnit = TimeUnit.MILLISECONDS): TxnExecutor =
withRetryTimeoutNanos(Some(unit.toNanos(timeout)))
/** Returns true if `x` should be treated as a transfer of control, rather
* than an error. Atomic blocks that end with an uncaught control flow
* exception are committed, while atomic blocks that end with an uncaught
* error exception are rolled back.
*
* All implementations of this method must return true for instances that
* implement `scala.util.control.ControlThrowable`.
*/
def isControlFlow(x: Throwable): Boolean
/** Returns a `TxnExecutor e` that is identical to this one, except that
* `e.isControlFlow(x)` will return `pf(x)` if `pf.isDefined(x)`. For
* exceptions for which `pf` is not defined the decision will be deferred to
* the previous implementation.
*
* This function may be combined with `TxnExecutor.transformDefault` to add
* system-wide recognition of a control-transfer exception that does not
* extend `scala.util.control.ControlThrowable`. For example, to modify the
* default behavior of all `TxnExecutor.isControlFlow` calls to accept
* `DSLNonLocalControlTransferException`: {{{
* TxnExecutor.transformDefault { e =>
* e.withControlFlowRecognizer {
* case _: DSLNonLocalControlTransferException => true
* }
* }
* }}}
*/
def withControlFlowRecognizer(pf: PartialFunction[Throwable, Boolean]): TxnExecutor
/** Returns a function that records, reports or discards exceptions that were
* thrown from a while-committing, after-commit or after-rollback life-cycle
* callback.
*/
def postDecisionFailureHandler: (Txn.Status, Throwable) => Unit
/** Returns a `TxnExecutor e` that is identical to this one, except that
* `e.postDecisionFailureHandler` will return `handler`. This function may
* be called from inside a function passed to `TxnExecutor.transformDefault`
* to change the system-wide post-decision failure handler.
*/
def withPostDecisionFailureHandler(handler: (Txn.Status, Throwable) => Unit): TxnExecutor
}
| djspiewak/scala-stm | src/main/scala/scala/concurrent/stm/TxnExecutor.scala | Scala | bsd-3-clause | 7,743 |
/*
* Copyright 2009 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.ostrich.admin
import com.twitter.conversions.time._
import com.twitter.json.Json
import com.twitter.logging.{Level, Logger}
import com.twitter.ostrich.stats.{Stats, StatsListener}
import com.twitter.util.registry.{SimpleRegistry, GlobalRegistry}
import java.net.{Socket, SocketException, URI, URL}
import java.util.regex.Pattern
import org.junit.runner.RunWith
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.TableDrivenPropertyChecks
import scala.collection.JavaConverters._
import scala.io.Source
@RunWith(classOf[JUnitRunner])
class AdminHttpServiceTest extends FunSuite with BeforeAndAfter
with TableDrivenPropertyChecks
with Eventually
with IntegrationPatience {
class Context {}
def get(path: String): String = {
val port = service.address.getPort
val url = new URL(f"http://localhost:$port%d$path%s")
Source.fromURL(url).getLines().mkString("\\n")
}
def getHeaders(path: String): Map[String, List[String]] = {
val port = service.address.getPort
val url = new URL(f"http://localhost:$port%d$path%s")
url.openConnection().getHeaderFields.asScala.toMap.mapValues { _.asScala.toList }
}
var service: AdminHttpService = null
val registry = new SimpleRegistry
before {
service =
new AdminHttpService(
0,
20,
Stats,
new ServerInfoHandler(getClass),
30.seconds,
{ code => /* system-exit is a noop here */ },
registry
)
service.start()
}
after {
Stats.clearAll()
StatsListener.clearAll()
service.shutdown()
}
test("FolderResourceHandler") {
val staticHandler = new FolderResourceHandler("/nested")
info("split a URI")
assert(staticHandler.getRelativePath("/nested/1level.txt") === "1level.txt")
assert(staticHandler.getRelativePath("/nested/2level/2level.txt") === "2level/2level.txt")
info("build paths correctly")
assert(staticHandler.buildPath("1level.txt") === "/nested/1level.txt")
assert(staticHandler.buildPath("2level/2level.txt") === "/nested/2level/2level.txt")
info("load resources")
intercept[Exception] { staticHandler.loadResource("nested/1level.txt") }
try {
staticHandler.loadResource("/nested/1level.txt")
} catch {
case e: Exception => fail("staticHandler should not throw an exception")
}
}
test("static resources") {
new Context {
info("drawgraph.js")
val inputStream = getClass.getResourceAsStream("/static/drawgraph.js")
assert(inputStream !== null)
assert(Source.fromInputStream(inputStream).mkString !== null)
}
new Context {
info("unnested")
val inputStream = getClass.getResourceAsStream("/unnested.txt")
assert(Pattern.matches("we are not nested", Source.fromInputStream(inputStream).getLines.mkString))
}
new Context {
info("1 level of nesting")
val inputStream = getClass.getResourceAsStream("/nested/1level.txt")
assert(Pattern.matches("nested one level deep", Source.fromInputStream(inputStream).getLines.mkString))
}
new Context {
info("2 levels of nesting")
val inputStream = getClass.getResourceAsStream("/nested/2levels/2levels.txt")
assert(Pattern.matches("nested two levels deep", Source.fromInputStream(inputStream).getLines.mkString))
}
}
test("start and stop") {
val port = service.address.getPort
val socket = new Socket("localhost", port)
socket.close()
service.shutdown()
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("answer pings") {
val port = service.address.getPort
assert(get("/ping.json").trim === """{"response":"pong"}""")
service.shutdown()
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("shutdown") {
val port = service.address.getPort
get("/shutdown.json")
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("quiesce") {
val port = service.address.getPort
get("/quiesce.json")
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("get a proper web page back for the report URL") {
assert(get("/report/").contains("Stats Report"))
}
test("return 404 for favicon") {
intercept[java.io.FileNotFoundException] { get("/favicon.ico") }
}
test("return 404 for a missing command") {
intercept[java.io.FileNotFoundException] { get("/bullshit.json") }
}
test("not crash when fetching /") {
assert(get("/").contains("ostrich"))
}
test("tell us its ostrich version in the headers") {
assert(getHeaders("/").get("X-ostrich-version").isInstanceOf[Some[List[String]]])
}
test("server info") {
val serverInfo = get("/server_info.json")
assert(serverInfo.contains("\\"build\\":"))
assert(serverInfo.contains("\\"build_revision\\":"))
assert(serverInfo.contains("\\"name\\":"))
assert(serverInfo.contains("\\"version\\":"))
assert(serverInfo.contains("\\"start_time\\":"))
assert(serverInfo.contains("\\"uptime\\":"))
}
test("change log levels") {
// Add a logger with a very specific name
val name = "logger-" + System.currentTimeMillis
val logger = Logger.get(name) // register this logger
logger.setLevel(Level.INFO)
// no levels specified
var logLevels = get("/logging")
assert(logLevels.contains(name))
assert(logLevels.contains("Specify a logger name and level"))
// specified properly
logLevels = get("/logging?name=%s&level=FATAL".format(name))
assert(Logger.get(name).getLevel() === Level.FATAL)
assert(logLevels.contains("Successfully changed the level of the following logger"))
// made up level
logLevels = get("/logging?name=%s&level=OHEMGEE".format(name))
assert(logLevels.contains("Logging level change failed"))
// made up logger
logLevels = get("/logging?name=OHEMGEEWHYAREYOUUSINGTHISLOGGERNAME&level=INFO")
assert(logLevels.contains("Logging level change failed"))
}
test("fetch static files") {
assert(get("/static/drawgraph.js").contains("drawChart"))
}
test("mesos health") {
assert(get("/health").contains("OK"))
}
test("mesos abortabortabort") {
val port = service.address.getPort
get("/abortabortabort")
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("mesos quitquitquit") {
val port = service.address.getPort
get("/quitquitquit")
eventually {
intercept[SocketException] { new Socket("localhost", port) }
}
}
test("thread contention") {
val prof = get("/contention.json")
assert(prof.contains("\\"blocked_threads\\":"))
}
test("provide stats") {
new Context {
info("in json")
// make some statsy things happen
Stats.clearAll()
Stats.time("kangaroo_time") { Stats.incr("kangaroos", 1) }
val stats = Json.parse(get("/stats.json")).asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(stats("gauges").get("jvm_uptime").isDefined)
assert(stats("gauges").get("jvm_heap_used").isDefined)
assert(stats("counters").get("kangaroos").isDefined)
assert(stats("metrics").get("kangaroo_time_msec").isDefined)
val timing = stats("metrics")("kangaroo_time_msec").asInstanceOf[Map[String, Int]]
assert(timing("count") === 1)
assert(timing("minimum") >= 0)
assert(timing("maximum") >= timing("minimum"))
}
new Context {
info("in json, with custom listeners")
Stats.clearAll()
Stats.incr("apples", 10)
Stats.addMetric("oranges", 5)
var absStats = Json.parse(get("/stats.json")).asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(absStats("counters")("apples") === 10)
assert(absStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
var namespaceStats = Json.parse(get("/stats.json?namespace=monkey"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(namespaceStats("counters")("apples") === 10)
assert(namespaceStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
var periodicStats = Json.parse(get("/stats.json?period=30"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(periodicStats("counters")("apples") === 10)
assert(periodicStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
Stats.incr("apples", 6)
Stats.addMetric("oranges", 3)
absStats = Json.parse(get("/stats.json")).asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(absStats("counters")("apples") === 16)
assert(absStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 2)
namespaceStats = Json.parse(get("/stats.json?namespace=monkey"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(namespaceStats("counters")("apples") === 6)
assert(namespaceStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
namespaceStats = Json.parse(get("/stats.json?namespace=monkey"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(namespaceStats("counters")("apples") === 0)
assert(namespaceStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 0)
periodicStats = Json.parse(get("/stats.json?period=30"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
if (periodicStats("counters")("apples") == 6) {
// PeriodicBackgroundProcess aligns the first event to the multiple
// of the period + 1 so the first event can happen as soon as in two
// seconds. In the case of the first event already happens when we
// check the stats, we retry the test.
Stats.incr("apples", 8)
Stats.addMetric("oranges", 4)
periodicStats = Json.parse(get("/stats.json?period=30"))
.asInstanceOf[Map[String, Map[String, AnyRef]]]
assert(periodicStats("counters")("apples") === 6)
assert(periodicStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
} else {
assert(periodicStats("counters")("apples") === 10)
assert(periodicStats("metrics")("oranges").asInstanceOf[Map[String, AnyRef]]("count") === 1)
}
}
new Context {
info("in json, with histograms")
// make some statsy things happen
Stats.clearAll()
get("/stats.json")
Stats.addMetric("kangaroo_time", 1)
Stats.addMetric("kangaroo_time", 2)
Stats.addMetric("kangaroo_time", 3)
Stats.addMetric("kangaroo_time", 4)
Stats.addMetric("kangaroo_time", 5)
Stats.addMetric("kangaroo_time", 6)
val stats = get("/stats.json")
val json = Json.parse(stats).asInstanceOf[Map[String, Map[String, AnyRef]]]
val timings = json("metrics")("kangaroo_time").asInstanceOf[Map[String, Int]]
assert(timings.get("count").isDefined)
assert(timings("count") === 6)
assert(timings.get("average").isDefined)
assert(timings("average") === 3)
assert(timings.get("p50").isDefined)
assert(timings("p50") === 3)
assert(timings.get("p99").isDefined)
assert(timings("p99") === 6)
assert(timings.get("p999").isDefined)
assert(timings("p999") === 6)
assert(timings.get("p9999").isDefined)
assert(timings("p9999") === 6)
}
new Context {
info("in json, with histograms and reset")
Stats.clearAll()
// Add items indirectly to the histogram
Stats.addMetric("kangaroo_time", 1)
Stats.addMetric("kangaroo_time", 2)
Stats.addMetric("kangaroo_time", 3)
Stats.addMetric("kangaroo_time", 4)
Stats.addMetric("kangaroo_time", 5)
Stats.addMetric("kangaroo_time", 6)
val stats = get("/stats.json?reset")
val json = Json.parse(stats).asInstanceOf[Map[String, Map[String, AnyRef]]]
val timings = json("metrics")("kangaroo_time").asInstanceOf[Map[String, Int]]
assert(timings.get("count").isDefined)
assert(timings("count") === 6)
assert(timings.get("average").isDefined)
assert(timings("average") === 3)
assert(timings.get("p50").isDefined)
assert(timings("p50") === 3)
assert(timings.get("p95").isDefined)
assert(timings("p95") === 6)
assert(timings.get("p99").isDefined)
assert(timings("p99") === 6)
assert(timings.get("p999").isDefined)
assert(timings("p999") === 6)
assert(timings.get("p9999").isDefined)
assert(timings("p9999") === 6)
}
new Context {
info("in json, with callback")
val stats = get("/stats.json?callback=true")
assert(stats.startsWith("ostrichCallback("))
assert(stats.endsWith(")"))
}
new Context {
info("in json, with named callback")
val stats = get("/stats.json?callback=My.Awesome.Callback")
assert(stats.startsWith("My.Awesome.Callback("))
assert(stats.endsWith(")"))
}
new Context {
info("in json, with empty callback")
val stats = get("/stats.json?callback=")
assert(stats.startsWith("ostrichCallback("))
assert(stats.endsWith(")"))
}
new Context {
info("in text")
// make some statsy things happen
Stats.clearAll()
Stats.time("kangaroo_time") { Stats.incr("kangaroos", 1) }
assert(get("/stats.txt").contains(" kangaroos: 1"))
}
}
test("return 400 for /stats when collection period is below minimum") {
intercept[Exception] { get("/stats.json?period=10") }
}
test("parse parameters") {
val parametersTable =
Table(
("uri", "result"),
("/p", Nil),
("/p?a=b", ("a", "b") :: Nil),
("/p?a=b&c=d", ("a", "b") :: ("c", "d") :: Nil),
("/p?", Nil),
("/p?invalid", Nil),
("/p?a=", ("a", "") :: Nil),
("/p?=b", ("", "b") :: Nil)
)
forAll (parametersTable) { (uriStr: String, result: List[(String, String)]) =>
assert(CgiRequestHandler.uriToParameters(new URI(uriStr)) === result)
}
}
test("provide basic registry information") {
registry.put(Seq("foo", "bar"), "baz")
registry.put(Seq("foo", "qux"), "quux")
val actual = get("/admin/registry")
val expected = """{"registry":{"foo":{"bar":"baz","qux":"quux"}}}"""
assert(actual == expected)
}
}
| yonglehou/ostrich | src/test/scala/com/twitter/ostrich/admin/AdminHttpServiceTest.scala | Scala | apache-2.0 | 15,176 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package models
import play.api.data.Form
import play.api.data.Forms._
import play.api.libs.json.Json
case class NewDevice(nickname : String, brand : String, model : String, os : String, os_version : String, category : String)
object NewDevice {
implicit val format = Json.format[NewDevice]
}
object NewDeviceForm {
val form = Form(
mapping(
"nickname" -> nonEmptyText,
"brand" -> nonEmptyText,
"model" -> nonEmptyText,
"os" -> nonEmptyText,
"os_version" -> nonEmptyText,
"category" -> nonEmptyText
)(NewDevice.apply)(NewDevice.unapply)
)
} | chrisjwwalker/cjww-diagnostics | app/models/NewDevice.scala | Scala | apache-2.0 | 1,323 |
package appenginehelpers
import com.google.appengine.api.utils.SystemProperty
import net.sf.ehcache.CacheManager
import com.google.appengine.api.memcache._
import net.sf.ehcache.config.{CacheConfiguration, Configuration}
import java.util.logging.Logger
trait HybridCache {
private val logger = Logger.getLogger(classOf[HybridCache].getName)
implicit def int2expiringInt(timeToLiveInSeconds: Int) = ExpiringInt(timeToLiveInSeconds)
case class ExpiringInt(timeToLiveInSeconds: Int) {
lazy val seconds = Expiration.byDeltaSeconds(timeToLiveInSeconds)
lazy val second = seconds
lazy val minutes = Expiration.byDeltaSeconds(timeToLiveInSeconds * 60)
lazy val minute = minutes
lazy val hours = Expiration.byDeltaSeconds(timeToLiveInSeconds * 60 * 60)
lazy val hour = hours
lazy val days = Expiration.byDeltaSeconds(timeToLiveInSeconds * 60 * 60 * 24)
lazy val day = days
}
lazy val cache = Option(SystemProperty.version.get) match {
case Some(_) => {
logger.info("Using App Engine cache")
new SimpleCache(MemcacheServiceFactory.getMemcacheService)
}
case None => {
logger.info("Using EHCache")
new SimpleCache(new EhCacheWrapper(EhCacheConfig.getCache))
}
}
}
private object EhCacheConfig {
val cacheConfiguration = new CacheConfiguration("default", 5000)
cacheConfiguration.setDiskPersistent(false)
cacheConfiguration.setEternal(false)
cacheConfiguration.setOverflowToDisk(false)
val configuration = new Configuration()
configuration.setDefaultCacheConfiguration(cacheConfiguration)
val cacheManager = new CacheManager(configuration)
cacheManager.addCache("default")
def getCache = cacheManager.getCache("default")
}
class SimpleCache(cache: MemcacheService) {
def put(key: AnyRef, value: AnyRef) = cache.put(key, value)
def put(key: AnyRef, value: AnyRef, expiration: Expiration) = cache.put(key, value, expiration)
def get(key: AnyRef) = Option(cache.get(key)) match {
case None => None
case hit => hit
}
def delete(key: AnyRef) = cache.delete(key)
def contains(key: AnyRef) = cache.contains(key)
def clearAll = cache.clearAll
}
| gklopper/Appengine-Helpers | cache/src/main/scala/appenginehelpers/HybridCache.scala | Scala | mit | 2,166 |
package ohnosequences.db.rnacentral.test
import org.scalatest.FunSuite
import ohnosequences.test._
import ohnosequences.db.rnacentral._
import org.scalatest.EitherValues._
class Entries extends FunSuite {
test("parse all", ReleaseOnlyTest) {
Version.all foreach { v =>
data.rnacentralData(v).left.foreach { err =>
println(err.msg)
}
val (malformedRows, parsedRows) =
entries.entriesFrom(data.rnacentralData(v).right.value)
assert { malformedRows.isEmpty && allRight { parsedRows } }
}
}
}
| ohnosequences/db.rnacentral | src/test/scala/entries.scala | Scala | agpl-3.0 | 546 |
/*
* Copyright 2016 OSBI Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bi.meteorite.core.security
import bi.meteorite.core.api.objects.MeteoriteUser
import bi.meteorite.core.api.security.IUserManagement
/**
* User Manager Endpoints.
*/
class UserManager extends IUserManagement {
override def addUser(u: MeteoriteUser): MeteoriteUser = null
override def deleteUser(u: MeteoriteUser): Boolean = false
override def setUser(u: MeteoriteUser): MeteoriteUser = null
override def getUser(id: Int): MeteoriteUser = null
override def getRoles(u: MeteoriteUser): Array[String] = {
Array.ofDim[String](0)
}
override def addRole(u: MeteoriteUser) {
}
override def removeRole(u: MeteoriteUser) {
}
override def removeUser(username: String) {
}
override def updateUser(u: MeteoriteUser): MeteoriteUser = null
override def isAdmin: Boolean = false
override def getAdminRoles: List[String] = null
}
| OSBI/meteorite-core | security-scala/src/main/scala/bi/meteorite/core/security/UserManager.scala | Scala | apache-2.0 | 1,461 |
package org.scalaide.ui.editor
import scala.reflect.io.AbstractFile
import org.scalaide.core.compiler.InteractiveCompilationUnit
import org.scalaide.core.resources.EclipseResource
import org.eclipse.core.resources.IFile
import org.eclipse.jface.text.IDocument
abstract class CompilationUnit(override val workspaceFile: IFile) extends InteractiveCompilationUnit {
@volatile private var _document: Option[IDocument] = None
final protected def document: Option[IDocument] = _document
override def file: AbstractFile = EclipseResource(workspaceFile)
/** Attach the passed `doc` to this compilation unit.*/
final def connect(doc: IDocument): Unit = {
_document = Option(doc)
}
override def exists(): Boolean = workspaceFile.exists()
}
| Kwestor/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/editor/CompilationUnit.scala | Scala | bsd-3-clause | 754 |
package dotty.tools
package dotc
package ast
import core._
import Names._, Types._ , Symbols._, StdNames._, Flags._, Contexts._
import org.junit.Test
import org.junit.Assert._
class DesugarTests extends DottyTest {
import tpd._
private def validSym(sym: Symbol)(implicit ctx: Context): Unit = {
assert(
// remaining symbols must be either synthetic:
sym.is(Synthetic) ||
// or be a constructor:
sym.name == nme.CONSTRUCTOR,
s"found: $sym (${sym.flagsString})"
)
}
@Test def caseClassHasCorrectMembers: Unit =
checkCompile("typer", "case class Foo(x: Int, y: String)") { (tree, context) =>
implicit val ctx = context
val ccTree = tree.find(tree => tree.symbol.name == typeName("Foo")).get
val List(_, foo) = defPath(ccTree.symbol, tree).map(_.symbol.info)
val x :: y :: rest = foo.decls.toList
// Make sure we extracted the correct values from foo:
assert(x.name == termName("x"))
assert(y.name == termName("y"))
rest.foreach(validSym)
}
@Test def caseClassCompanionHasCorrectMembers: Unit =
checkCompile("typer", "case class Foo(x: Int, y: String)") { (tree, context) =>
implicit val ctx = context
val ccTree = tree.find(tree => tree.symbol.name == termName("Foo")).get
val List(_, foo) = defPath(ccTree.symbol, tree).map(_.symbol.info)
foo.decls.foreach(validSym)
}
}
| som-snytt/dotty | compiler/test/dotty/tools/dotc/ast/DesugarTests.scala | Scala | apache-2.0 | 1,429 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactRouter
import scala.scalajs.js
import com.glipka.easyReactJS.react._
import GlobalDefinition._
@js.native
trait RouterContextElement extends ReactElement[RouterContextProps] with js.Any{
val history:js.Any= js.native
val location:js.Any = js.native
val router : Router = js.native
} | glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactRouter/RouterContextElement.scala | Scala | apache-2.0 | 932 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api.test
import java.util.{Collection, Collections, Properties}
import scala.collection.JavaConverters._
import org.junit.runners.Parameterized
import org.junit.runner.RunWith
import org.junit.runners.Parameterized.Parameters
import org.junit.{After, Before, Test}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.junit.Assert._
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.TestUtils
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.ByteArraySerializer
@RunWith(value = classOf[Parameterized])
class ProducerCompressionTest(compression: String) extends ZooKeeperTestHarness {
private val brokerId = 0
private val topic = "topic"
private val numRecords = 2000
private var server: KafkaServer = null
@Before
override def setUp() {
super.setUp()
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
server = TestUtils.createServer(KafkaConfig.fromProps(props))
}
@After
override def tearDown() {
TestUtils.shutdownServers(Seq(server))
super.tearDown()
}
/**
* testCompression
*
* Compressed messages should be able to sent and consumed correctly
*/
@Test
def testCompression() {
val producerProps = new Properties()
val bootstrapServers = TestUtils.getBrokerListStrFromServers(Seq(server))
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers)
producerProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compression)
producerProps.put(ProducerConfig.BATCH_SIZE_CONFIG, "66000")
producerProps.put(ProducerConfig.LINGER_MS_CONFIG, "200")
val producer = new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer)
val consumer = TestUtils.createConsumer(bootstrapServers)
try {
// create topic
TestUtils.createTopic(zkClient, topic, 1, 1, List(server))
val partition = 0
// prepare the messages
val messageValues = (0 until numRecords).map(i => "value" + i)
// make sure the returned messages are correct
val now = System.currentTimeMillis()
val responses = for (message <- messageValues)
yield producer.send(new ProducerRecord(topic, null, now, null, message.getBytes))
for ((future, offset) <- responses.zipWithIndex) {
assertEquals(offset.toLong, future.get.offset)
}
val tp = new TopicPartition(topic, partition)
// make sure the fetched message count match
consumer.assign(Collections.singleton(tp))
consumer.seek(tp, 0)
val records = TestUtils.consumeRecords(consumer, numRecords)
for (((messageValue, record), index) <- messageValues.zip(records).zipWithIndex) {
assertEquals(messageValue, new String(record.value))
assertEquals(now, record.timestamp)
assertEquals(index.toLong, record.offset)
}
} finally {
producer.close()
consumer.close()
}
}
}
object ProducerCompressionTest {
@Parameters(name = "{index} compressionType = {0}")
def parameters: Collection[Array[String]] = {
Seq(
Array("none"),
Array("gzip"),
Array("snappy"),
Array("lz4")
).asJava
}
}
| ollie314/kafka | core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala | Scala | apache-2.0 | 4,091 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import com.mongodb.async.client.{Observable => JObservable, Observer => JObserver}
import org.mongodb.scala.internal._
/**
* A companion object for [[Observable]]
*/
object Observable {
/**
* Creates an Observable from an Iterable.
*
* Convenient for testing and or debugging.
*
* @param from the iterable to create the observable from
* @tparam A the type of Iterable
* @return an Observable that emits each item from the Iterable
*/
def apply[A](from: Iterable[A]): Observable[A] = IterableObservable[A](from)
}
/**
* A `Observable` represents a MongoDB operation.
*
* As such it is a provider of a potentially unbounded number of sequenced elements, publishing them according to the demand received
* from its [[Observer]](s).
*
* @tparam T the type of element signaled.
*/
trait Observable[T] extends JObservable[T] {
/**
* Request `Observable` to start streaming data.
*
* This is a "factory method" and can be called multiple times, each time starting a new [[Subscription]].
* Each `Subscription` will work for only a single [[Observer]].
*
* If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]].
*
* @param observer the `Observer` that will consume signals from this `Observable`
*/
def subscribe(observer: Observer[_ >: T]): Unit
/**
* Handles the automatic boxing of a Java `Observable` so it conforms to the interface.
*
* @note Users should not have to implement this method but rather use the Scala `Observable`.
* @param observer the `Observer` that will consume signals from this `Observable`
*/
override def subscribe(observer: JObserver[_ >: T]): Unit = subscribe(BoxedObserver[T](observer))
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/Observable.scala | Scala | apache-2.0 | 2,405 |
package repositories
import java.util.UUID
import com.omis.EmpDetails
import scala.concurrent.Future
trait EmployeeRepository extends Repository {
import ctx._
val employees = quote(querySchema[Employee]("employees"))
val empDet = quote(querySchema[EmpDetails]("emp_details"))
def createEmpWithRole(employee: Employee, role: String, empDetails: EmpDetails): Future[String]
def byId(uuid: UUID) = quote {
employees.filter(_.id == lift(uuid))
}
def empDetailById(uuid: UUID) = quote {
empDet.filter(_.userId == lift(uuid))
}
}
| iriddhi/mis | server/src/main/scala/repositories/EmployeeRepository.scala | Scala | apache-2.0 | 555 |
package cilib
import zio.prelude._
import zio.prelude.newtypes.Natural
sealed abstract class Position[+A] {
import Position._
def map[B](f: A => B): Position[B] =
Point(pos.map(f), boundary)
def flatMap[B](f: A => Position[B]): Position[B] =
Point(pos.flatMap(f(_).pos), boundary)
def zip[B](other: Position[B]): Position[(A, B)] =
Point(pos.zip(other.pos), boundary)
def traverse[G[+_]: IdentityBoth: Covariant, B](f: A => G[B]): G[Position[B]] =
this.forEach(f)
def take(n: Int): List[A] =
pos.toChunk.take(n).toList
def drop(n: Int): List[A] =
pos.toChunk.drop(n).toList
def pos: NonEmptyVector[A] =
this match {
case Point(x, _) => x
case Solution(x, _, _) => x
}
def toPoint: Position[A] =
this match {
case Point(_, _) => this
case Solution(x, b, _) => Point(x, b)
}
def objective: Option[Objective] =
this match {
case Point(_, _) => None
case Solution(_, _, o) => Some(o)
}
def boundary: NonEmptyVector[Interval] =
this match {
case Point(_, b) => b
case Solution(_, b, _) => b
}
def forall(f: A => Boolean): Boolean =
pos.forall(f)
}
object Position {
private final case class Point[A](x: NonEmptyVector[A], b: NonEmptyVector[Interval]) extends Position[A]
private final case class Solution[A](x: NonEmptyVector[A], b: NonEmptyVector[Interval], o: Objective)
extends Position[A]
implicit def positionEqual[A: zio.prelude.Equal]: zio.prelude.Equal[Position[A]] =
zio.prelude.Equal.make[Position[A]] { (l, r) =>
l.pos === r.pos && l.boundary === r.boundary
}
implicit val positionForEach: ForEach[Position] =
new ForEach[Position] {
def forEach[G[+_]: IdentityBoth: Covariant, A, B](fa: Position[A])(f: A => G[B]): G[Position[B]] =
ForEach[NonEmptyVector].forEach(fa.pos)(f).map(Point(_, fa.boundary))
}
implicit val positionNonEmptyForEach: NonEmptyForEach[Position] =
new NonEmptyForEach[Position] {
def forEach1[G[+_]: AssociativeBoth: Covariant, A, B](fa: Position[A])(f: A => G[B]): G[Position[B]] =
NonEmptyForEach[NonEmptyVector].forEach1(fa.pos)(f).map(Point(_, fa.boundary))
}
implicit def positionDotProd[A](implicit A: scala.math.Numeric[A]): algebra.DotProd[Position, A] =
new algebra.DotProd[Position, A] {
def dot(a: Position[A], b: Position[A]): Double =
// FIXME: Is this actually wrong?
A.toDouble(a.zip(b).pos.foldLeft(A.zero) { case (a, b) => A.plus(a, A.times(b._1, b._2)) })
}
implicit def positionPointwise[A](implicit A: scala.math.Numeric[A]): algebra.Pointwise[Position, A] =
new algebra.Pointwise[Position, A] {
def pointwise(a: Position[A], b: Position[A]) =
a.zip(b).map(x => A.times(x._1, x._2))
}
implicit def positionVectorOps[A]: algebra.VectorOps[Position, A] =
new algebra.VectorOps[Position, A] {
def zeroed(a: Position[A])(implicit A: scala.math.Numeric[A]): Position[A] =
a.map(_ => A.zero)
def +(a: Position[A], b: Position[A])(implicit M: scala.math.Numeric[A]): Position[A] = {
val combined =
a.pos.zipAllWith(b.pos.toChunk)(identity, identity)(M.plus(_, _))
Point(combined, a.boundary)
}
def -(a: Position[A], b: Position[A])(implicit M: scala.math.Numeric[A]): Position[A] = {
val combined =
a.pos.zipAllWith(b.pos.toChunk)(identity, identity)(M.minus(_, _))
Point(combined, a.boundary)
}
def *:(scalar: A, a: Position[A])(implicit M: scala.math.Numeric[A]): Position[A] =
a.map(x => M.times(scalar, x))
def unary_-(a: Position[A])(implicit M: scala.math.Numeric[A]): Position[A] =
a.map(x => M.negate(x))
def isZero(a: Position[A])(implicit R: scala.math.Numeric[A]): Boolean =
a.pos.forall(_ == R.zero)
}
implicit class PositionVectorOps[A](private val x: Position[A]) extends AnyVal {
def zeroed(implicit A: scala.math.Numeric[A]): Position[A] =
x.map(_ => A.zero)
def +(other: Position[A])(implicit M: algebra.VectorOps[Position, A], A: scala.math.Numeric[A]): Position[A] =
M.+(x, other)
def -(other: Position[A])(implicit M: algebra.VectorOps[Position, A], A: scala.math.Numeric[A]): Position[A] =
M.-(x, other)
def *:(scalar: A)(implicit M: algebra.VectorOps[Position, A], A: scala.math.Numeric[A]): Position[A] =
M.*:(scalar, x)
def unary_-(implicit M: algebra.VectorOps[Position, A], A: scala.math.Numeric[A]): Position[A] =
M.unary_-(x)
def isZero(implicit R: scala.math.Numeric[A]): Boolean =
x.forall(_ == R.zero)
}
implicit def positionFitness[A]: Fitness[Position, A, A] =
new Fitness[Position, A, A] {
def fitness(a: Position[A]) =
a.objective
}
def eval[A](e: Eval[NonEmptyVector], pos: Position[A]): RVar[Position[A]] =
pos match {
case Point(x, b) =>
e.eval(x).map(s => Solution(x, b, s))
case x @ Solution(_, _, _) =>
RVar.pure(x)
}
def apply[A](xs: NonEmptyVector[A], b: NonEmptyVector[Interval]): Position[A] =
Point(xs, b)
def createPosition[A](domain: NonEmptyVector[Interval]): RVar[Position[Double]] =
ForEach[NonEmptyVector]
.forEach(domain)(Dist.uniform)
.map(z => Position(z, domain))
def createPositions(
domain: NonEmptyVector[Interval],
n: Natural
): RVar[NonEmptyVector[Position[Double]]] =
createPosition(domain)
.replicateM(Natural.unwrap(n))
.map(list =>
NonEmptyVector
.fromIterableOption(list)
.getOrElse(sys.error("Impossible -> refinement requires n to be positive, i.e. n > 0"))
)
def createCollection[A](
f: Position[Double] => A
)(domain: NonEmptyVector[Interval], n: Natural): RVar[NonEmptyVector[A]] =
createPositions(domain, n).map(_.map(f))
}
| cirg-up/cilib | core/src/main/scala/cilib/Position.scala | Scala | apache-2.0 | 5,930 |
package chandu0101.scalajs.react.components.demo.routes
import chandu0101.scalajs.react.components.demo.routes.AppRouter.Page
import japgolly.scalajs.react.ReactElement
abstract class LeftRoute(val name : String,val route : String,val render : () => ReactElement)
| coreyauger/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/routes/LeftRoute.scala | Scala | apache-2.0 | 267 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features
// putting this in it's own file prevents intellij from flagging every use as a compile error
object SerializationType extends Enumeration {
type SerializationType = Value
val KRYO = Value("kryo")
val AVRO = Value("avro")
}
| tkunicki/geomesa | geomesa-features/geomesa-feature-all/src/main/scala/org/locationtech/geomesa/features/SerializationType.scala | Scala | apache-2.0 | 734 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import java.util.concurrent.atomic.AtomicInteger
import java.util.{Properties, Random}
import kafka.api.{FetchRequestBuilder, OffsetRequest, PartitionOffsetRequestInfo}
import kafka.common.TopicAndPartition
import kafka.consumer.SimpleConsumer
import kafka.log.{Log, LogSegment}
import kafka.utils.TestUtils._
import kafka.utils._
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.utils.Time
import org.easymock.{EasyMock, IAnswer}
import org.junit.Assert._
import org.junit.{After, Before, Test}
class LogOffsetTest extends ZooKeeperTestHarness {
val random = new Random()
var logDir: File = null
var topicLogDir: File = null
var server: KafkaServer = null
var logSize: Int = 140
var simpleConsumer: SimpleConsumer = null
var time: Time = new MockTime()
@Before
override def setUp() {
super.setUp()
val config: Properties = createBrokerConfig(1)
config.put(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp, Long.MaxValue.toString)
val logDirPath = config.getProperty("log.dir")
logDir = new File(logDirPath)
time = new MockTime()
server = TestUtils.createServer(KafkaConfig.fromProps(config), time)
simpleConsumer = new SimpleConsumer("localhost", TestUtils.boundPort(server), 1000000, 64*1024, "")
}
@After
override def tearDown() {
simpleConsumer.close
TestUtils.shutdownServers(Seq(server))
super.tearDown()
}
@Test
def testGetOffsetsForUnknownTopic() {
val topicAndPartition = TopicAndPartition("foo", 0)
val request = OffsetRequest(
Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 10)))
val offsetResponse = simpleConsumer.getOffsetsBefore(request)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION,
offsetResponse.partitionErrorAndOffsets(topicAndPartition).error)
}
@Test
def testGetOffsetsAfterDeleteRecords() {
val topicPartition = "kafka-" + 0
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in ZooKeeper as owners of partitions for this test
adminZkClient.createTopic(topic, 1, 1)
val logManager = server.getLogManager
waitUntilTrue(() => logManager.getLog(new TopicPartition(topic, part)).isDefined,
"Log for partition [topic,0] should be created")
val log = logManager.getLog(new TopicPartition(topic, part)).get
for (_ <- 0 until 20)
log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0)
log.flush()
log.onHighWatermarkIncremented(log.logEndOffset)
log.maybeIncrementLogStartOffset(3)
log.deleteOldSegments()
val offsets = server.apis.fetchOffsets(logManager, new TopicPartition(topic, part), OffsetRequest.LatestTime, 15)
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 3L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), "Leader should be elected")
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest = OffsetRequest(
Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 15)),
replicaId = 0)
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 3L), consumerOffsets)
}
@Test
def testGetOffsetsBeforeLatestTime() {
val topicPartition = "kafka-" + 0
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in ZooKeeper as owners of partitions for this test
adminZkClient.createTopic(topic, 1, 1)
val logManager = server.getLogManager
waitUntilTrue(() => logManager.getLog(new TopicPartition(topic, part)).isDefined,
"Log for partition [topic,0] should be created")
val log = logManager.getLog(new TopicPartition(topic, part)).get
for (_ <- 0 until 20)
log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0)
log.flush()
val offsets = server.apis.fetchOffsets(logManager, new TopicPartition(topic, part), OffsetRequest.LatestTime, 15)
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), "Leader should be elected")
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest = OffsetRequest(
Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.LatestTime, 15)),
replicaId = 0)
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), consumerOffsets)
// try to fetch using latest offset
val fetchResponse = simpleConsumer.fetch(
new FetchRequestBuilder().addFetch(topic, 0, consumerOffsets.head, 300 * 1024).build())
assertFalse(fetchResponse.messageSet(topic, 0).iterator.hasNext)
}
@Test
def testEmptyLogsGetOffsets() {
val topicPartition = "kafka-" + random.nextInt(10)
val topicPartitionPath = TestUtils.tempDir().getAbsolutePath + "/" + topicPartition
topicLogDir = new File(topicPartitionPath)
topicLogDir.mkdir()
val topic = topicPartition.split("-").head
// setup brokers in ZooKeeper as owners of partitions for this test
createTopic(zkUtils, topic, numPartitions = 1, replicationFactor = 1, servers = Seq(server))
var offsetChanged = false
for (_ <- 1 to 14) {
val topicAndPartition = TopicAndPartition(topic, 0)
val offsetRequest =
OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime, 1)))
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
if(consumerOffsets.head == 1) {
offsetChanged = true
}
}
assertFalse(offsetChanged)
}
@Test
def testGetOffsetsBeforeNow() {
val topicPartition = "kafka-" + random.nextInt(3)
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in ZooKeeper as owners of partitions for this test
adminZkClient.createTopic(topic, 3, 1)
val logManager = server.getLogManager
val log = logManager.getOrCreateLog(new TopicPartition(topic, part), logManager.defaultConfig)
for (_ <- 0 until 20)
log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0)
log.flush()
val now = time.milliseconds + 30000 // pretend it is the future to avoid race conditions with the fs
val offsets = server.apis.fetchOffsets(logManager, new TopicPartition(topic, part), now, 15)
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), "Leader should be elected")
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(now, 15)), replicaId = 0)
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(20L, 18L, 16L, 14L, 12L, 10L, 8L, 6L, 4L, 2L, 0L), consumerOffsets)
}
@Test
def testGetOffsetsBeforeEarliestTime() {
val topicPartition = "kafka-" + random.nextInt(3)
val topic = topicPartition.split("-").head
val part = Integer.valueOf(topicPartition.split("-").last).intValue
// setup brokers in ZooKeeper as owners of partitions for this test
adminZkClient.createTopic(topic, 3, 1)
val logManager = server.getLogManager
val log = logManager.getOrCreateLog(new TopicPartition(topic, part), logManager.defaultConfig)
for (_ <- 0 until 20)
log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0)
log.flush()
val offsets = server.apis.fetchOffsets(logManager, new TopicPartition(topic, part), OffsetRequest.EarliestTime, 10)
assertEquals(Seq(0L), offsets)
waitUntilTrue(() => isLeaderLocalOnBroker(topic, part, server), "Leader should be elected")
val topicAndPartition = TopicAndPartition(topic, part)
val offsetRequest =
OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime, 10)))
val consumerOffsets =
simpleConsumer.getOffsetsBefore(offsetRequest).partitionErrorAndOffsets(topicAndPartition).offsets
assertEquals(Seq(0L), consumerOffsets)
}
/* We test that `fetchOffsetsBefore` works correctly if `LogSegment.size` changes after each invocation (simulating
* a race condition) */
@Test
def testFetchOffsetsBeforeWithChangingSegmentSize() {
val log = EasyMock.niceMock(classOf[Log])
val logSegment = EasyMock.niceMock(classOf[LogSegment])
EasyMock.expect(logSegment.size).andStubAnswer(new IAnswer[Int] {
private val value = new AtomicInteger(0)
def answer: Int = value.getAndIncrement()
})
EasyMock.replay(logSegment)
val logSegments = Seq(logSegment)
EasyMock.expect(log.logSegments).andStubReturn(logSegments)
EasyMock.replay(log)
server.apis.fetchOffsetsBefore(log, System.currentTimeMillis, 100)
}
/* We test that `fetchOffsetsBefore` works correctly if `Log.logSegments` content and size are
* different (simulating a race condition) */
@Test
def testFetchOffsetsBeforeWithChangingSegments() {
val log = EasyMock.niceMock(classOf[Log])
val logSegment = EasyMock.niceMock(classOf[LogSegment])
EasyMock.expect(log.logSegments).andStubAnswer {
new IAnswer[Iterable[LogSegment]] {
def answer = new Iterable[LogSegment] {
override def size = 2
def iterator = Seq(logSegment).iterator
}
}
}
EasyMock.replay(logSegment)
EasyMock.replay(log)
server.apis.fetchOffsetsBefore(log, System.currentTimeMillis, 100)
}
private def createBrokerConfig(nodeId: Int): Properties = {
val props = new Properties
props.put("broker.id", nodeId.toString)
props.put("port", TestUtils.RandomPort.toString())
props.put("log.dir", TestUtils.tempDir().getAbsolutePath)
props.put("log.flush.interval.messages", "1")
props.put("enable.zookeeper", "false")
props.put("num.partitions", "20")
props.put("log.retention.hours", "10")
props.put("log.retention.check.interval.ms", (5*1000*60).toString)
props.put("log.segment.bytes", logSize.toString)
props.put("zookeeper.connect", zkConnect.toString)
props
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/server/LogOffsetTest.scala | Scala | apache-2.0 | 11,758 |
import sbt._
import Keys._
import sbtrelease.ReleaseStateTransformations._
import sbtrelease.ReleasePlugin.autoImport._
import xerial.sbt.Sonatype._
import com.typesafe.sbt.pgp.PgpKeys
import dog.DogPlugin.autoImport._
object Common {
private def gitHash: String = scala.util.Try(
sys.process.Process("git rev-parse HEAD").lines_!.head
).getOrElse("master")
private[this] val unusedWarnings = (
"-Ywarn-unused" ::
"-Ywarn-unused-import" ::
Nil
)
private[this] val scala211 = "2.11.7"
lazy val commonSettings = Seq(
sonatypeSettings,
dogSettings
).flatten ++ Seq(
scalaVersion := scala211,
crossScalaVersions := Seq("2.10.5", scala211),
resolvers += Opts.resolver.sonatypeReleases,
scalacOptions ++= (
"-deprecation" ::
"-unchecked" ::
"-Xlint" ::
"-feature" ::
"-language:existentials" ::
"-language:higherKinds" ::
"-language:implicitConversions" ::
"-language:reflectiveCalls" ::
Nil
),
scalacOptions ++= PartialFunction.condOpt(CrossVersion.partialVersion(scalaVersion.value)){
case Some((2, v)) if v >= 11 => unusedWarnings
}.toList.flatten,
fullResolvers ~= {_.filterNot(_.name == "jcenter")},
dogVersion := Dependencies.Version.dog,
releaseProcess := Seq[ReleaseStep](
checkSnapshotDependencies,
inquireVersions,
runClean,
runTest,
setReleaseVersion,
commitReleaseVersion,
UpdateReadme.updateReadmeProcess,
tagRelease,
ReleaseStep(
action = { state =>
val extracted = Project extract state
extracted.runAggregated(PgpKeys.publishSigned in Global in extracted.get(thisProjectRef), state)
},
enableCrossBuild = true
),
setNextVersion,
commitNextVersion,
UpdateReadme.updateReadmeProcess,
pushChanges
),
credentials ++= PartialFunction.condOpt(sys.env.get("SONATYPE_USER") -> sys.env.get("SONATYPE_PASS")){
case (Some(user), Some(pass)) =>
Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", user, pass)
}.toList,
organization := "com.github.pocketberserker",
homepage := Some(url("https://github.com/pocketberserker/dog-analyzer")),
licenses := Seq("MIT License" -> url("http://www.opensource.org/licenses/mit-license.php")),
pomExtra :=
<developers>
<developer>
<id>pocketberserker</id>
<name>Yuki Nakayama</name>
<url>https://github.com/pocketberserker</url>
</developer>
</developers>
<scm>
<url>[email protected]:pocketberserker/dog-analyzer.git</url>
<connection>scm:git:[email protected]:pocketberserker/dog-analyzer.git</connection>
<tag>{if(isSnapshot.value) gitHash else { "v" + version.value }}</tag>
</scm>
,
description := "analyze dog test cases",
pomPostProcess := { node =>
import scala.xml._
import scala.xml.transform._
def stripIf(f: Node => Boolean) = new RewriteRule {
override def transform(n: Node) =
if (f(n)) NodeSeq.Empty else n
}
val stripTestScope = stripIf { n => n.label == "dependency" && (n \\ "scope").text == "test" }
new RuleTransformer(stripTestScope).transform(node)(0)
}
) ++ Seq(Compile, Test).flatMap(c =>
scalacOptions in (c, console) ~= {_.filterNot(unusedWarnings.toSet)}
)
}
| pocketberserker/dog-analyzer | project/Common.scala | Scala | mit | 3,418 |
package com.github.novamage.svalidator.validation.simple
import com.github.novamage.svalidator.validation.simple.internals.SimpleValidationRuleStructureContainer
import testUtils.Observes
class SimpleListValidationRuleStarterBuilderSpecs extends Observes {
case class SampleValidatedClass(a: String, b: Long) {
}
describe("when building rules using the Simple Validation Rule Builder") {
val instance = SampleValidatedClass("firstValue", 8L)
describe("and build rules is called with a current rule structure that is not null") {
val property_expression = stubUnCallableFunction[SampleValidatedClass, List[Long]]
val rule_expression = stubUnCallableFunction[Long, SampleValidatedClass, Boolean]
val rule_structure_container = SimpleValidationRuleStructureContainer[SampleValidatedClass, Long](rule_expression, None, None, None, Map.empty)
val sut = new SimpleListValidationRuleContinuationBuilder[SampleValidatedClass, Long, Nothing](property_expression, Some(rule_structure_container), List(), "fieldName", false, None, None, None)
val result = sut.buildRules(instance)
it("should return a list with as many rules as rule expressions passed in") {
result.chains should have size 1
result.chains.head.mainStream should have size 1
}
}
/*
* Other behavior of this class is tested on integration tests.
* Testing more behavior as a unit test would imply making some members public which
* is not desired at this time.
*/
}
}
| NovaMage/SValidator | src/test/scala/com/github/novamage/svalidator/validation/simple/SimpleListValidationRuleStarterBuilderSpecs.scala | Scala | mit | 1,538 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package connectors.cache
import config.ApplicationConfig
import org.mockito.Mockito._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import play.api.libs.json.{JsBoolean, JsString, JsValue, Json}
import services.cache.{Cache, MongoCacheClient, MongoCacheClientFactory}
import utils.AmlsSpec
import scala.concurrent.{ExecutionContext, Future}
class DataCacheConnectorSpec
extends AmlsSpec
with Conversions
with ScalaFutures
with ScalaCheckPropertyChecks
with IntegrationPatience {
case class Model(value: String)
object Model {
implicit val format = Json.format[Model]
}
trait Fixture {
val key = "key"
val oId = "oldId"
val credId = "12345678"
val cache = Cache(oId, referenceMap())
val newCache = cache.copy(id = credId)
implicit val ec = mock[ExecutionContext]
val factory = mock[MongoCacheClientFactory]
val client = mock[MongoCacheClient]
when(factory.createClient) thenReturn client
val appConfig = mock[ApplicationConfig]
val dataCacheConnector = new MongoCacheConnector(factory) {
override lazy val mongoCache: MongoCacheClient = mock[MongoCacheClient]
}
}
def referenceMap(str1: String = "", str2: String = ""): Map[String, JsValue] = Map(
"dataKey" -> JsBoolean(true),
"name" -> JsString(str1),
"obj" -> Json.obj(
"prop1" -> str2,
"prop2" -> 12
)
)
"DataCacheConnector" must {
"save data to Mongo" in new Fixture {
val model = Model("data")
when {
dataCacheConnector.mongoCache.createOrUpdate(credId, model, key)
} thenReturn Future.successful(newCache)
whenReady(dataCacheConnector.save(credId, key, model)) { result =>
result mustBe toCacheMap(newCache)
result.id mustBe credId
}
}
"fetch saved data from Mongo" in new Fixture {
val model = Model("data")
when {
dataCacheConnector.mongoCache.find[Model](credId, key)
} thenReturn Future.successful(Some(model))
whenReady(dataCacheConnector.fetch[Model](credId, key)) { _ mustBe Some(model) }
}
"fetch all data from Mongo" in new Fixture {
when {
dataCacheConnector.mongoCache.fetchAll(Some(credId))
} thenReturn Future.successful(Some(newCache))
whenReady(dataCacheConnector.fetchAll(credId)) { _ mustBe Some(toCacheMap(newCache)) }
}
"remove data from Mongo for CredId" in new Fixture {
when {
dataCacheConnector.mongoCache.removeById(credId)
} thenReturn Future.successful(true)
whenReady(dataCacheConnector.remove(credId)) {
_ mustBe true
}
}
}
} | hmrc/amls-frontend | test/connectors/cache/DataCacheConnectorSpec.scala | Scala | apache-2.0 | 3,327 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.items.armor
import com.anathema_roguelike.entities.items.{Item, ItemPropertyCache}
import com.anathema_roguelike.environment.Location
import com.anathema_roguelike.main.display.Color
import squidpony.squidgrid.gui.gdx.SColor
import com.anathema_roguelike.entities.characters.Character
abstract class Armor(armorType: ArmorType, material: ArmorMaterial) extends Item {
applyEffect(armorType.getEffect)
applyEffect(material.getEffect)
def this(armorType: String, material: String) {
this(
ItemPropertyCache.getProperty(classOf[ArmorType], armorType),
ItemPropertyCache.getProperty(classOf[ArmorMaterial], material)
)
}
def this(armorType: String, material: ArmorMaterial) {
this(ItemPropertyCache.getProperty(classOf[ArmorType], armorType), material)
}
def this(armorType: ArmorType, material: String) {
this(armorType, ItemPropertyCache.getProperty(classOf[ArmorMaterial], material))
}
def getType: ArmorType = armorType
def getMaterial: ArmorMaterial = material
override def toString: String = material.getName + " " + armorType.getName
def getColor: SColor = material.getName match {
case ArmorMaterial.UMBRALSILK | ArmorMaterial.SHADOWEAVE | ArmorMaterial.BLACKSTEEL =>
Color.DARK_GRAY
case ArmorMaterial.CLOTH | ArmorMaterial.SILENAI_CRYSTAL =>
Color.WHITE
case ArmorMaterial.LEATHER | ArmorMaterial.DRAGONHIDE =>
Color.LIGHT_BROWN
case ArmorMaterial.CHAINMAIL | ArmorMaterial.COLD_IRON | ArmorMaterial.MITHRIL | ArmorMaterial.MAGEPLATE | ArmorMaterial.PLATE =>
Color.GRAY
case _ =>
Color.ERROR
}
}
| carlminden/anathema-roguelike | src/com/anathema_roguelike/entities/items/armor/Armor.scala | Scala | gpl-3.0 | 2,512 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package synthesis
package rules
import purescala.Path
import purescala.Expressions._
import purescala.ExprOps._
import purescala.Constructors._
import purescala.Types._
case object InputSplit extends Rule("In. Split") {
def instantiateOn(implicit hctx: SearchContext, p: Problem): Traversable[RuleInstantiation] = {
p.allAs.filter(_.getType == BooleanType).flatMap { a =>
def getProblem(v: Boolean): Problem = {
def replaceA(e: Expr) = replaceFromIDs(Map(a -> BooleanLiteral(v)), e)
val newPc: Path = {
val withoutA = p.pc -- Set(a) map replaceA
withoutA withConds (p.pc.bindings.collectFirst { case (`a`, res) =>
if (v) res else not(res)
})
}
p.copy(
as = p.as.filterNot(_ == a),
ws = replaceA(p.ws),
pc = newPc,
phi = replaceA(p.phi),
eb = p.qeb.removeIns(Set(a))
)
}
val sub1 = getProblem(true)
val sub2 = getProblem(false)
val onSuccess: List[Solution] => Option[Solution] = {
case List(s1, s2) =>
val pre = cases(List(
Variable(a) -> s1.pre,
Not(Variable(a)) -> s2.pre
))
Some(Solution(pre,
s1.defs ++ s2.defs,
IfExpr(Variable(a), s1.term, s2.term),
s1.isTrusted && s2.isTrusted
))
case _ =>
None
}
Some(decomp(List(sub1, sub2), onSuccess, s"Split on '$a'"))
}
}
}
| regb/leon | src/main/scala/leon/synthesis/rules/InputSplit.scala | Scala | gpl-3.0 | 1,598 |
package svstm.transactions
import svstm.exceptions.WriteOnReadTransactionException
import svstm.vbox.VBox
class ReadTransaction(number: Int, parent: ReadTransaction = null) extends Transaction(number, parent) {
def this(parent: ReadTransaction) = this(parent.number, parent)
def getBoxValue[T](vbox: VBox[T]): T = vbox.body.getBody(number).value
def setBoxValue[T](vbox: VBox[T], value: T): Unit = throw WriteOnReadTransactionException
def doCommit(): Unit = {
//nothing to do
}
def makeNestedTransaction: Transaction = {
new ReadTransaction(this)
}
} | fcristovao/SVSTM | src/main/scala/svstm/transactions/ReadTransaction.scala | Scala | apache-2.0 | 583 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package network
package client
import com.linkedin.norbert.cluster._
import com.linkedin.norbert.network.client.loadbalancer.{LoadBalancer, LoadBalancerFactory, LoadBalancerFactoryComponent}
import com.linkedin.norbert.network.common.{BaseNetworkClientSpecification, ClusterIoClientComponent}
import org.specs2.mock.Mockito
import org.specs2.mutable.SpecificationWithJUnit
class NetworkClientSpec extends SpecificationWithJUnit with Mockito {
trait NetworkClientSetup extends BaseNetworkClientSpecification {
val networkClient = new NetworkClient with ClusterClientComponent with ClusterIoClientComponent with LoadBalancerFactoryComponent {
val lb = mock[LoadBalancer]
val loadBalancerFactory = mock[LoadBalancerFactory]
val clusterIoClient = mock[ClusterIoClient]
val clusterClient = NetworkClientSetup.this.clusterClient
}
}
// networkClient.messageRegistry.contains(any[Message]) returns true
"NetworkClient" should {
"provide common functionality" in new NetworkClientSetup {
sharedFunctionality
}
"throw ClusterDisconnectedException if the cluster is disconnected when a method is called" in new NetworkClientSetup {
networkClient.start
networkClient.broadcastMessage(request) must throwA[ClusterDisconnectedException]
networkClient.sendRequestToNode(request, nodes(1)) must throwA[ClusterDisconnectedException]
networkClient.sendRequest(request) must throwA[ClusterDisconnectedException]
networkClient.sendMessage(request) must throwA[ClusterDisconnectedException]
}
"continue to operating with the last known router configuration if the cluster is disconnected" in new NetworkClientSetup {
clusterClient.addListener(any[ClusterListener]) returns ClusterListenerKey(1)
clusterClient.nodes returns nodeSet
}
"throw ClusterShutdownException if the cluster is shut down when a method is called" in new NetworkClientSetup {
networkClient.shutdown
networkClient.broadcastMessage(request) must throwA[NetworkShutdownException]
networkClient.sendRequestToNode(request, nodes(1)) must throwA[NetworkShutdownException]
networkClient.sendRequest(request) must throwA[NetworkShutdownException]
networkClient.sendMessage(request) must throwA[NetworkShutdownException]
}
"send the provided message to the node specified by the load balancer for sendMessage" in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode(None, None) returns Some(nodes(1))
// doNothing.when(clusterIoClient).sendMessage(node, message, null)
networkClient.start
networkClient.sendRequest(request) must_!= beNull
there was one(networkClient.lb).nextNode(None, None)
// clusterIoClient.sendMessage(node, message, null) was called
}
"send the provided message to the node specified by the load balancer for sendRequest with the requested capability " in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode(Some(0x1), Some(2L)) returns Some(nodes(1))
networkClient.start
networkClient.sendRequest(request, Some(1L), Some(2L)) must_!= beNull
there was one(networkClient.lb).nextNode(Some(0x1), Some(2L))
there was no(networkClient.lb).nextNode(None, None)
}
"send the provided message to the node specified by the load balancer for sendMessage with the requested capability " in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode(Some(0x1), Some(2L)) returns Some(nodes(1))
networkClient.start
networkClient.sendMessage(request, Some(1L), Some(2L)) must_!= beNull
there was one(networkClient.lb).nextNode(Some(0x1), Some(2L))
there was no(networkClient.lb).nextNode(None, None)
}
"retryCallback should propagate server exception to underlying when" in new NetworkClientSetup {
val MAX_RETRY = 3
var either: Either[Throwable, Ping] = null
val callback = (e: Either[Throwable, Ping]) => either = e
"exception does not provide RequestAccess" in {
networkClient.retryCallback[Ping, Ping](callback, 0, None, None)(Left(new Exception))
either must_!= beNull
either.isLeft must beTrue
}
"request.retryAttempt >= maxRetry" in {
val req: Request[Ping, Ping] = spy(Request[Ping, Ping](null, null, null, null, Some(callback), MAX_RETRY))
val ra: Exception with RequestAccess[Request[Ping, Ping]] = new Exception with RequestAccess[Request[Ping, Ping]] {
def request = req
}
networkClient.retryCallback[Ping, Ping](callback, MAX_RETRY, None, None)(Left(ra))
either must_!= beNull
either.isLeft must beTrue
either.left.get mustEqual ra
}
"cannot locate next available node" in {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode returns None
networkClient.start
networkClient.retryCallback[Ping, Ping](callback, MAX_RETRY, None, None)(Left(new RemoteException("FooClass", "ServerError")))
either must_!= beNull
either.isLeft must beTrue
either.left.get must haveClass[RemoteException]
}
"next node is same as failing node" in {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode returns Some(nodes(1))
networkClient.start
var req: Request[Ping, Ping] = spy(Request[Ping, Ping](null, nodes(1), null, null, Some(callback)))
val ra: Exception with RequestAccess[Request[Ping, Ping]] = new Exception with RequestAccess[Request[Ping, Ping]] {
def request = req
}
networkClient.retryCallback[Ping, Ping](callback, MAX_RETRY, None, None)(Left(ra))
either must_!= beNull
either.isLeft must beTrue
either.left.get mustEqual ra
}
"sendMessage: MAX_RETRY reached" in {
var either: Either[Throwable, Ping] = null
val callback = (e: Either[Throwable, Ping]) => either = e
val networkClient2 = new NetworkClient with ClusterClientComponent with ClusterIoClientComponent with LoadBalancerFactoryComponent {
val lb = new LoadBalancer {
val iter = nodes.iterator
def nextNode(capability: Option[Long], permanentCapability: Option[Long]) = Some(iter.next)
}
val loadBalancerFactory = mock[LoadBalancerFactory]
val clusterIoClient = new ClusterIoClient {
var invocationCount: Int = 0
def sendMessage[RequestMsg, ResponseMsg](node: Node, requestCtx: Request[RequestMsg, ResponseMsg]) {
invocationCount += 1
requestCtx.onFailure(new Exception with RequestAccess[Request[RequestMsg, ResponseMsg]] {
def request = requestCtx
})
}
def nodesChanged(nodes: Set[Node]) = {
endpoints
}
def shutdown {}
}
override val clusterClient: ClusterClient = this.clusterClient
}
networkClient2.clusterClient.nodes returns nodeSet
networkClient2.clusterClient.isConnected returns true
networkClient2.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient2.lb
networkClient2.start
networkClient2.sendRequest(request, callback, MAX_RETRY)
networkClient2.clusterIoClient.invocationCount mustEqual MAX_RETRY
either must_!= null
either.isLeft must beTrue
}
}
"throw InvalidClusterException if there is no load balancer instance when sendRequest is called" in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) throws new InvalidClusterException("")
// doNothing.when(clusterIoClient).sendMessage(node, message, null)
networkClient.start
networkClient.sendRequest(request) must throwA[InvalidClusterException]
// clusterIoClient.sendMessage(node, message, null) wasnt called
}
"throw InvalidClusterException if there is no load balancer instance when sendMessage is called" in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) throws new InvalidClusterException("")
// doNothing.when(clusterIoClient).sendMessage(node, message, null)
networkClient.start
networkClient.sendMessage(request) must throwA[InvalidClusterException]
// clusterIoClient.sendMessage(node, message, null) wasnt called
}
"throw NoSuchNodeException if load balancer returns None when sendRequest is called" in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode(None, None) returns None
// doNothing.when(clusterIoClient).sendMessage(node, message, null)
networkClient.start
networkClient.sendRequest(request) must throwA[NoNodesAvailableException]
there was one(networkClient.lb).nextNode(None, None)
// clusterIoClient.sendMessage(node, message, null) wasnt called
}
"throw NoSuchNodeException if load balancer returns None when sendMessage is called" in new NetworkClientSetup {
clusterClient.nodes returns nodeSet
clusterClient.isConnected returns true
networkClient.clusterIoClient.nodesChanged(nodeSet) returns endpoints
networkClient.loadBalancerFactory.newLoadBalancer(endpoints) returns networkClient.lb
networkClient.lb.nextNode(None, None) returns None
// doNothing.when(clusterIoClient).sendMessage(node, message, null)
networkClient.start
networkClient.sendMessage(request) must throwA[NoNodesAvailableException]
there was one(networkClient.lb).nextNode(None, None)
// clusterIoClient.sendMessage(node, message, null) wasnt called
}
}
} | linkedin/norbert | network/src/test/scala/com/linkedin/norbert/network/client/NetworkClientSpec.scala | Scala | apache-2.0 | 12,143 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
/**
* Message definitions for request-response protocols.
*
* Molecule supports request protocols for which there either is a single response
* or a stream of messages.
*/
package object request {
type ResponseStreamChannel[-A] = channel.OChan[A]
type ResponseChannel[-A] = channel.ROChan[A]
} | molecule-labs/molecule | molecule-core/src/main/scala/molecule/request/package.scala | Scala | apache-2.0 | 1,045 |
package gust.util
import breeze.util.SerializableLogging
import scala.reflect.ClassTag
import org.bridj.{PointerIO, Pointer}
import jcuda.runtime.{cudaStream_t, JCuda}
import jcuda.jcublas.{cublasOperation, JCublas2, cublasHandle}
import jcuda.driver.{JCudaDriver, CUcontext, CUfunction, CUstream}
import jcuda.NativePointerObject
import breeze.macros.arityize
/**
* TODO
*
* @author dlwh
**/
package object cuda extends SerializableLogging {
type CuPointer = jcuda.Pointer
def allocate[V:ClassTag](size: Long): Pointer[V] = {
val ptr = new CuPointer()
val tpe = implicitly[ClassTag[V]].runtimeClass
val io = PointerIO.getInstance[V](tpe)
val ok: Boolean = hasFreeMemory(size * io.getTargetSize)
if(!ok) {
throw new OutOfMemoryError(s"CUDA Memory")//: need $size bytes, but only have ${free(0)}. Tried running the GC to no avail.")
}
JCuda.cudaMalloc(ptr, size * io.getTargetSize)
Pointer.pointerToAddress(nativePtr(ptr), size, DeviceFreeReleaser).as(io)
}
def hasFreeMemory(size: Long): Boolean = {
val free, total = Array[Long](0)
JCudaDriver.cuMemGetInfo(free, total)
val ok = (free(0) >= size) || {
logger.debug("Running GC because we're running low on RAM!")
System.gc()
Runtime.getRuntime.runFinalization()
JCudaDriver.cuMemGetInfo(free, total)
free(0) >= size
}
ok
}
def allocateHost[V:ClassTag](size: Long):Pointer[V] = {
val ptr = new CuPointer()
val tpe = implicitly[ClassTag[V]].runtimeClass
val io = PointerIO.getInstance[V](tpe)
JCuda.cudaMallocHost(ptr, size * io.getTargetSize)
Pointer.pointerToAddress(nativePtr(ptr), size * io.getTargetSize, HostFreeReleaser).as(io)
}
def cuPointerToArray[T](array: Array[T]): jcuda.Pointer = array match {
case array: Array[Int] => jcuda.Pointer.to(array)
case array: Array[Byte] => jcuda.Pointer.to(array)
case array: Array[Long] => jcuda.Pointer.to(array)
case array: Array[Short] => jcuda.Pointer.to(array)
case array: Array[Float] => jcuda.Pointer.to(array)
case array: Array[Double] => jcuda.Pointer.to(array)
case _ => throw new UnsupportedOperationException("Can't deal with this array type!")
}
implicit class enrichBridjPtr[T](val pointer: Pointer[T]) extends AnyVal {
def toCuPointer = {
assert(pointer != null)
fromNativePtr(pointer.getPeer)
}
}
private object DeviceFreeReleaser extends Pointer.Releaser {
def release(p: Pointer[_]): Unit = {
val ptr = fromNativePtr(p.getPeer)
JCuda.cudaFree(ptr)
}
}
private object HostFreeReleaser extends Pointer.Releaser {
def release(p: Pointer[_]): Unit = {
val ptr = fromNativePtr(p.getPeer)
JCuda.cudaFreeHost(ptr)
}
}
private object NoReleaser extends Pointer.Releaser {
def release(p: Pointer[_]): Unit = {
}
}
def cupointerToPointer[T](pointer: CuPointer, size: Int, io: PointerIO[T]):Pointer[T] = {
Pointer.pointerToAddress(nativePtr(pointer), size * io.getTargetSize, NoReleaser).as(io)
}
def cupointerToPointer[_](pointer: CuPointer):Pointer[_] = {
Pointer.pointerToAddress(nativePtr(pointer), NoReleaser).offset(stealByteOffset(pointer))
}
private def nativePtr(pointer: CuPointer) = {
val m = classOf[NativePointerObject].getDeclaredMethod("getNativePointer")
m.setAccessible(true)
m.invoke(pointer).asInstanceOf[java.lang.Long].longValue()
}
private def stealByteOffset(pointer: CuPointer) = {
val m = classOf[CuPointer].getDeclaredField("byteOffset")
m.setAccessible(true)
m.get(pointer).asInstanceOf[java.lang.Long].longValue()
}
private def fromNativePtr(peer: Long, offset: Long = 0) = {
val m = classOf[CuPointer].getDeclaredConstructor(java.lang.Long.TYPE)
m.setAccessible(true)
m.newInstance(java.lang.Long.valueOf(peer)).withByteOffset(offset)
}
implicit def cudaStreamToCuStream(s: CUstream) = new cudaStream_t(s)
implicit class richBlas(val blas: cublasHandle) extends AnyVal {
def withStream[T](stream: cudaStream_t)(block: => T) = blas.synchronized {
val olds = new cudaStream_t()
JCublas2.cublasGetStream(blas, olds)
JCublas2.cublasSetStream(blas, stream)
val res = block
JCublas2.cublasSetStream(blas, olds)
res
}
}
@arityize(10)
class CuKernel[@arityize.replicate T](module: CuModule, fn: CUfunction) {
def apply(gridDims: Dim3 = Dim3.default, blockDims: Dim3 = Dim3.default, sharedMemorySize: Int = 0)(@arityize.replicate t: T @arityize.relative(t))(implicit context: CuContext):Unit = {
CuKernel.invoke(fn, gridDims, blockDims, sharedMemorySize)((t: @arityize.replicate ))
}
}
}
| dlwh/gust | src/main/scala/gust/util/cuda/package.scala | Scala | apache-2.0 | 4,711 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools {
import org.scalatest.FunSuite
import org.scalatest.Outcome
import org.scalatools.testing.{Event, EventHandler, Result, Logger, Runner => TestingRunner}
// testing runner.run:
// def run(testClassName: String, fingerprint: TestFingerprint, args: Array[String]): Array[Event]
class ScalaTestRunnerSuite extends FunSuite {
test("call with simple class") {
val results = run("org.scalatest.tools.test.SimpleTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
}
test("three different results") {
val results = run("org.scalatest.tools.test.ThreeTestsTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "throw")
assert(results(1).result === Result.Failure)
assert(results(1).error.getMessage === "baah")
assert(results(2).testName === "assert bad")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "1 did not equal 3")
assert(results.size === 3)
}
test("one tag included") {
val results = run("org.scalatest.tools.test.TagsTest", "-n hello")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results.size === 1)
}
test("two tags included") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-n", "hello helloAgain"))
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "hello, world again")
assert(results(1).result === Result.Success)
assert(results.size === 2)
}
test("one tag excluded") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-l", "hello"))
assert(results(0).testName === "hello, world again")
assert(results(0).result === Result.Success)
assert(results(1).testName === "tag3")
assert(results(1).result === Result.Success)
assert(results(2).testName === "throw")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "baah")
assert(results(3).testName === "assert bad")
assert(results(3).result === Result.Failure)
assert(results(3).error.getMessage === "1 did not equal 3")
assert(results.size === 4)
}
test("configs") {
val results = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=cool")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
val resultsF = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=bad")
assert(resultsF(0).testName === "get config")
assert(resultsF(0).result === Result.Failure)
assert(resultsF(0).error.getMessage === "\\"[bad]\\" did not equal \\"[cool]\\"")
}
test("configs 2"){
val results = run("org.scalatest.tools.test.TestWithConfigMap2", "-Da=z -Db=y -Dc=x")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
}
test("illegal arg on private constructor"){
intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.PrivateConstructor")
}
}
test("skipped test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.SuiteWithSkippedTest")
assert(results.size === 2)
assert(results(0).testName === "dependeeThatFails")
assert(results(0).result === Result.Failure)
assert(results(0).error.getMessage === "fail")
assert(results(1).testName === "depender")
assert(results(1).result === Result.Skipped)
}
test("pending test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.PendingTest")
assert(results.size === 1)
assert(results(0).testName === "i am pending")
assert(results(0).result === Result.Skipped)
}
def runner: TestingRunner = {
new ScalaTestFramework().testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
}
val fingerprint = {
val fingerprints = new ScalaTestFramework().tests
fingerprints(0).
asInstanceOf[org.scalatools.testing.TestFingerprint]
}
def run(classname: String): Array[Event] = run(classname, Array[String]())
def run(classname: String, args:String): Array[Event] = run(classname, args.split(" "))
def run(classname: String, args:Array[String]): Array[Event] = {
// val buf = scala.collection.mutable.ArrayBuffer[Event]() // Only worked under 2.8
val buf = new scala.collection.mutable.ArrayBuffer[Event]
val listener = new EventHandler {
def handle(event: Event) {
buf += event
}
}
runner.run(classname, fingerprint, listener, args)
buf.toArray
}
class TestLogger extends Logger {
def trace(t:Throwable) {}
def error(msg: String) {}
def warn(msg: String) {}
def info(msg: String) {}
def debug(msg: String) {}
def ansiCodesSupported = false
}
}
package test{
private class SimpleTest extends FunSuite {
test("hello, world") {"hello, world"}
}
private class ThreeTestsTest extends FunSuite {
test("hello, world") {"hello, world"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
import org.scalatest.fixture
private class TestWithConfigMap extends fixture.FunSuite {
type FixtureParam = String
override def withFixture(test: OneArgTest): Outcome = {
test(test.configMap("josh").toString)
}
test("get config"){ conf => assert(conf === "cool") }
}
private class TestWithConfigMap2 extends fixture.FunSuite {
type FixtureParam = Map[String,Any]
override def withFixture(test: OneArgTest): Outcome = {
test(test.configMap)
}
test("get config"){ conf => assert(conf === Map("a" -> "z", "b" -> "y", "c" -> "x")) }
}
private class TagsTest extends FunSuite {
test("hello, world", org.scalatest.Tag("hello")) {"hello, world"}
test("hello, world again", org.scalatest.Tag("helloAgain")) {"hello, world again"}
test("tag3", org.scalatest.Tag("tag3")) {"tag3"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
private class PrivateConstructor private() extends FunSuite
private class PendingTest extends FunSuite {
test("i am pending")(pending)
}
import org.scalatest.testng.TestNGSuite
private class SuiteWithSkippedTest extends TestNGSuite {
import org.testng.annotations.Test
@Test(groups = Array("run")) def dependeeThatFails() { throw new Exception("fail") }
@Test(dependsOnGroups = Array("run")) def depender() {}
}
}
}
| svn2github/scalatest | src/test/scala/org/scalatest/tools/ScalaTestRunnerSuite.scala | Scala | apache-2.0 | 7,777 |
/*
* Seldon -- open source prediction engine
* =======================================
* Copyright 2011-2015 Seldon Technologies Ltd and Rummble Ltd (http://www.seldon.io/)
*
**********************************************************************************************
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**********************************************************************************************
*/
package io.seldon.spark.mllib
import org.apache.log4j.Level
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.Vectors._
import org.apache.spark.mllib.linalg.SparseVector
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import org.apache.spark.mllib.linalg.distributed.MatrixEntry
import io.seldon.spark.SparkUtils
import scala.util.Random
import io.seldon.spark.zookeeper.ZkCuratorHandler
case class SimilarItemsConfig(
client : String = "",
inputPath : String = "/seldon-models",
outputPath : String = "/seldon-models",
startDay : Int = 1,
days : Int = 1,
awsKey : String = "",
awsSecret : String = "",
local : Boolean = false,
zkHosts : String = "",
itemType : Int = -1,
limit : Int = 100,
minItemsPerUser : Int = 0,
minUsersPerItem : Int = 0,
maxUsersPerItem : Int = 2000000,
dimsumThreshold : Double = 0.1,
sample : Double = 1.0
)
class SimilarItems(private val sc : SparkContext,config : SimilarItemsConfig) {
def parseJson(path : String,itemType : Int,sample : Double) = {
val rdd = sc.textFile(path).flatMap{line =>
import org.json4s._
import org.json4s.jackson.JsonMethods._
implicit val formats = DefaultFormats
val rand = new Random()
val json = parse(line)
val user = (json \ "userid").extract[Int]
val item = (json \ "itemid").extract[Int]
val itype = (json \ "type").extract[Int]
if (itemType == -1 || itype == itemType)
{
if (rand.nextDouble() < sample)
Seq((item,user))
else
None
}
else
None
}
rdd
}
def sortAndLimit(similarities : org.apache.spark.rdd.RDD[MatrixEntry],limit : Int) = {
val v = similarities.map{me => (me.i,(me.j,me.value))}.groupByKey().mapValues(_.toSeq.sortBy{ case (domain, count) => count }(Ordering[Double].reverse).take(limit)).flatMapValues(v => v)
v
}
def convertJson(similarities : org.apache.spark.rdd.RDD[(Long,(Long,Double))]) = {
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
similarities.map{me =>
val json = (("item1" -> me._1 ) ~
("item2" -> me._2._1 ) ~
("sim" -> me._2._2))
val jsonText = compact(render(json))
jsonText
}
}
def convertJsonFromMatrixEntry(similarities : org.apache.spark.rdd.RDD[MatrixEntry]) = {
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
similarities.map{me =>
val json = (("item1" -> me.i ) ~
("item2" -> me.j ) ~
("sim" -> me.value))
val jsonText = compact(render(json))
jsonText
}
}
def filterItems(rdd : org.apache.spark.rdd.RDD[(Int,Int)],minUsersPerItem : Int,maxUsersPerItem : Int) : org.apache.spark.rdd.RDD[(Int,Int)] =
{
rdd.distinct().groupBy(_._1).filter(_._2.size >= minUsersPerItem).filter(_._2.size <= maxUsersPerItem).flatMap(_._2).cache()
}
def getUserVectors(rdd : org.apache.spark.rdd.RDD[(Int,Int)],minItemsPerUser : Int,maxItem :Int) : org.apache.spark.rdd.RDD[Vector] =
{
rdd.groupByKey().filter(_._2.size >= minItemsPerUser)
.map{ case (user,items) =>
Vectors.sparse(maxItem, items.map(item => (item,1.toDouble)).toSeq)
}
}
def runDimSum(r :RowMatrix,dimsumThreshold : Double) : org.apache.spark.rdd.RDD[MatrixEntry] =
{
r.columnSimilarities(dimsumThreshold).entries
}
def run()
{
val glob = config.inputPath + "/" + config.client+"/actions/"+SparkUtils.getS3UnixGlob(config.startDay,config.days)+"/*"
println("loading from "+glob)
val rddJson = parseJson(glob,config.itemType,config.sample)
val itemsFiltered = filterItems(rddJson, config.minUsersPerItem, config.maxUsersPerItem)
val numItems = itemsFiltered.keys.distinct().count()
println("num items : "+numItems)
val maxItem = itemsFiltered.keys.max() + 1
val users = itemsFiltered.map{case (item,user) => (user,item)}
val userVectors = getUserVectors(users, config.minItemsPerUser, maxItem)
val numUsers = userVectors.count()
println("Number of users : "+numUsers)
val r = new RowMatrix(userVectors);
println("Running item similarity with threshold :"+config.dimsumThreshold)
val simItems = runDimSum(r, config.dimsumThreshold)
//val json = convertJson(simItems)
val json = convertJson(sortAndLimit(simItems, config.limit))
val outPath = config.outputPath + "/" + config.client + "/item-similarity/"+config.startDay
json.saveAsTextFile(outPath)
}
}
object SimilarItems
{
def updateConf(config : SimilarItemsConfig) =
{
var c = config.copy()
if (config.zkHosts.nonEmpty)
{
val curator = new ZkCuratorHandler(config.zkHosts)
val path = "/all_clients/"+config.client+"/offline/similar-items"
if (curator.getCurator.checkExists().forPath(path) != null)
{
val bytes = curator.getCurator.getData().forPath(path)
val j = new String(bytes,"UTF-8")
println("Confguration from zookeeper -> "+j)
import org.json4s._
import org.json4s.jackson.JsonMethods._
implicit val formats = DefaultFormats
val json = parse(j)
import org.json4s.JsonDSL._
import org.json4s.jackson.Serialization.write
type DslConversion = SimilarItemsConfig => JValue
val existingConf = write(c) // turn existing conf into json
val existingParsed = parse(existingConf) // parse it back into json4s internal format
val combined = existingParsed merge json // merge with zookeeper value
c = combined.extract[SimilarItemsConfig] // extract case class from merged json
c
}
else
{
println("Warning: using default configuaration - path["+path+"] not found!");
c
}
}
else
{
println("Warning: using default configuration - no zkHost!");
c
}
}
def main(args: Array[String])
{
Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
var c = new SimilarItemsConfig()
val parser = new scopt.OptionParser[Unit]("SimilarItems") {
head("ClusterUsersByDimension", "1.x")
opt[Unit]('l', "local") foreach { x => c = c.copy(local = true) } text("local mode - use local Master")
opt[String]('c', "client") required() valueName("<client>") foreach { x => c = c.copy(client = x) } text("client name (will be used as db and folder suffix)")
opt[String]('i', "inputPath") valueName("path url") foreach { x => c = c.copy(inputPath = x) } text("path prefix for input")
opt[String]('o', "outputPath") valueName("path url") foreach { x => c = c.copy(outputPath = x) } text("path prefix for output")
opt[Int]('r', "days") foreach { x =>c = c.copy(days = x) } text("number of days in past to get foreachs for")
opt[Int]('e', "itemType") foreach { x =>c = c.copy(itemType = x) } text("item type to limit foreachs to")
opt[Int]("startDay") foreach { x =>c = c.copy(startDay = x) } text("start day in unix time")
opt[Int]('u', "minUsersPerItem") foreach { x =>c = c.copy(minUsersPerItem = x) } text("min number of users to interact with an item")
opt[Int]('m', "maxUsersPerItem") foreach { x =>c = c.copy(maxUsersPerItem = x) } text("max number of users to interact with an item")
opt[Int]('p', "minItemsPerUser") foreach { x =>c = c.copy(minItemsPerUser = x) } text("min number of items a user needs to interact with")
opt[Int]('l', "limit") foreach { x =>c = c.copy(limit = x) } text("keep top N similarities per item")
opt[Double]('d', "dimsumThreshold") foreach { x =>c = c.copy(dimsumThreshold = x) } text("min cosine similarity estimate for dimsum (soft limit)")
opt[Double]('s', "sample") foreach { x =>c = c.copy(sample = x) } text("what percentage of the input data to use, values in range 0.0..1.0, defaults to 1.0 (use all the data)")
opt[String]('a', "awskey") valueName("aws access key") foreach { x => c = c.copy(awsKey = x) } text("aws key")
opt[String]('s', "awssecret") valueName("aws secret") foreach { x => c = c.copy(awsSecret = x) } text("aws secret")
opt[String]('z', "zookeeper") valueName("zookeeper hosts") foreach { x => c = c.copy(zkHosts = x) } text("zookeeper hosts (comma separated)")
}
if (parser.parse(args)) // Parse to check and get zookeeper if there
{
c = updateConf(c) // update from zookeeper args
parser.parse(args) // overrride with args that were on command line
val conf = new SparkConf().setAppName("SimilarItems")
if (c.local)
conf.setMaster("local")
.set("spark.executor.memory", "8g")
val sc = new SparkContext(conf)
try
{
sc.hadoopConfiguration.set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem")
if (c.awsKey.nonEmpty && c.awsSecret.nonEmpty)
{
sc.hadoopConfiguration.set("fs.s3n.awsAccessKeyId", c.awsKey)
sc.hadoopConfiguration.set("fs.s3n.awsSecretAccessKey", c.awsSecret)
}
println(c)
val si = new SimilarItems(sc,c)
si.run()
}
finally
{
println("Shutting down job")
sc.stop()
}
}
else
{
}
}
} | SeldonIO/seldon-server | offline-jobs/spark/src/main/scala/io/seldon/spark/mllib/SimilarItems.scala | Scala | apache-2.0 | 10,772 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.nio.ByteBuffer
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.math.max
import scala.util.control.NonFatal
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.scheduler.SchedulingMode._
import org.apache.spark.util.{AccumulatorV2, Clock, LongAccumulator, SystemClock, Utils}
import org.apache.spark.util.collection.MedianHeap
/**
* Schedules the tasks within a single TaskSet in the TaskSchedulerImpl. This class keeps track of
* each task, retries tasks if they fail (up to a limited number of times), and
* handles locality-aware scheduling for this TaskSet via delay scheduling. The main interfaces
* to it are resourceOffer, which asks the TaskSet whether it wants to run a task on one node,
* and statusUpdate, which tells it that one of its tasks changed state (e.g. finished).
*
* THREADING: This class is designed to only be called from code with a lock on the
* TaskScheduler (e.g. its event handlers). It should not be called from other threads.
*
* @param sched the TaskSchedulerImpl associated with the TaskSetManager
* @param taskSet the TaskSet to manage scheduling for
* @param maxTaskFailures if any particular task fails this number of times, the entire
* task set will be aborted
*/
private[spark] class TaskSetManager(
sched: TaskSchedulerImpl,
val taskSet: TaskSet,
val maxTaskFailures: Int,
blacklistTracker: Option[BlacklistTracker] = None,
clock: Clock = new SystemClock()) extends Schedulable with Logging {
private val conf = sched.sc.conf
// SPARK-21563 make a copy of the jars/files so they are consistent across the TaskSet
private val addedJars = HashMap[String, Long](sched.sc.addedJars.toSeq: _*)
private val addedFiles = HashMap[String, Long](sched.sc.addedFiles.toSeq: _*)
// Quantile of tasks at which to start speculation
val SPECULATION_QUANTILE = conf.getDouble("spark.speculation.quantile", 0.75)
val SPECULATION_MULTIPLIER = conf.getDouble("spark.speculation.multiplier", 1.5)
val maxResultSize = conf.get(config.MAX_RESULT_SIZE)
val speculationEnabled = conf.getBoolean("spark.speculation", false)
// Serializer for closures and tasks.
val env = SparkEnv.get
val ser = env.closureSerializer.newInstance()
val tasks = taskSet.tasks
private[scheduler] val partitionToIndex = tasks.zipWithIndex
.map { case (t, idx) => t.partitionId -> idx }.toMap
val numTasks = tasks.length
val copiesRunning = new Array[Int](numTasks)
// For each task, tracks whether a copy of the task has succeeded. A task will also be
// marked as "succeeded" if it failed with a fetch failure, in which case it should not
// be re-run because the missing map data needs to be regenerated first.
val successful = new Array[Boolean](numTasks)
private val numFailures = new Array[Int](numTasks)
// Add the tid of task into this HashSet when the task is killed by other attempt tasks.
// This happened while we set the `spark.speculation` to true. The task killed by others
// should not resubmit while executor lost.
private val killedByOtherAttempt = new HashSet[Long]
val taskAttempts = Array.fill[List[TaskInfo]](numTasks)(Nil)
private[scheduler] var tasksSuccessful = 0
val weight = 1
val minShare = 0
var priority = taskSet.priority
var stageId = taskSet.stageId
val name = "TaskSet_" + taskSet.id
var parent: Pool = null
private var totalResultSize = 0L
private var calculatedTasks = 0
private[scheduler] val taskSetBlacklistHelperOpt: Option[TaskSetBlacklist] = {
blacklistTracker.map { _ =>
new TaskSetBlacklist(sched.sc.listenerBus, conf, stageId, taskSet.stageAttemptId, clock)
}
}
private[scheduler] val runningTasksSet = new HashSet[Long]
override def runningTasks: Int = runningTasksSet.size
def someAttemptSucceeded(tid: Long): Boolean = {
successful(taskInfos(tid).index)
}
// True once no more tasks should be launched for this task set manager. TaskSetManagers enter
// the zombie state once at least one attempt of each task has completed successfully, or if the
// task set is aborted (for example, because it was killed). TaskSetManagers remain in the zombie
// state until all tasks have finished running; we keep TaskSetManagers that are in the zombie
// state in order to continue to track and account for the running tasks.
// TODO: We should kill any running task attempts when the task set manager becomes a zombie.
private[scheduler] var isZombie = false
// Whether the taskSet run tasks from a barrier stage. Spark must launch all the tasks at the
// same time for a barrier stage.
private[scheduler] def isBarrier = taskSet.tasks.nonEmpty && taskSet.tasks(0).isBarrier
// Set of pending tasks for each executor. These collections are actually
// treated as stacks, in which new tasks are added to the end of the
// ArrayBuffer and removed from the end. This makes it faster to detect
// tasks that repeatedly fail because whenever a task failed, it is put
// back at the head of the stack. These collections may contain duplicates
// for two reasons:
// (1): Tasks are only removed lazily; when a task is launched, it remains
// in all the pending lists except the one that it was launched from.
// (2): Tasks may be re-added to these lists multiple times as a result
// of failures.
// Duplicates are handled in dequeueTaskFromList, which ensures that a
// task hasn't already started running before launching it.
private val pendingTasksForExecutor = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each host. Similar to pendingTasksForExecutor,
// but at host level.
private val pendingTasksForHost = new HashMap[String, ArrayBuffer[Int]]
// Set of pending tasks for each rack -- similar to the above.
private val pendingTasksForRack = new HashMap[String, ArrayBuffer[Int]]
// Set containing pending tasks with no locality preferences.
private[scheduler] var pendingTasksWithNoPrefs = new ArrayBuffer[Int]
// Set containing all pending tasks (also used as a stack, as above).
private val allPendingTasks = new ArrayBuffer[Int]
// Tasks that can be speculated. Since these will be a small fraction of total
// tasks, we'll just hold them in a HashSet.
private[scheduler] val speculatableTasks = new HashSet[Int]
// Task index, start and finish time for each task attempt (indexed by task ID)
private[scheduler] val taskInfos = new HashMap[Long, TaskInfo]
// Use a MedianHeap to record durations of successful tasks so we know when to launch
// speculative tasks. This is only used when speculation is enabled, to avoid the overhead
// of inserting into the heap when the heap won't be used.
val successfulTaskDurations = new MedianHeap()
// How frequently to reprint duplicate exceptions in full, in milliseconds
val EXCEPTION_PRINT_INTERVAL =
conf.getLong("spark.logging.exceptionPrintInterval", 10000)
// Map of recent exceptions (identified by string representation and top stack frame) to
// duplicate count (how many times the same exception has appeared) and time the full exception
// was printed. This should ideally be an LRU map that can drop old exceptions automatically.
private val recentExceptions = HashMap[String, (Int, Long)]()
// Figure out the current map output tracker epoch and set it on all tasks
val epoch = sched.mapOutputTracker.getEpoch
logDebug("Epoch for " + taskSet + ": " + epoch)
for (t <- tasks) {
t.epoch = epoch
}
// Add all our tasks to the pending lists. We do this in reverse order
// of task index so that tasks with low indices get launched first.
for (i <- (0 until numTasks).reverse) {
addPendingTask(i)
}
/**
* Track the set of locality levels which are valid given the tasks locality preferences and
* the set of currently available executors. This is updated as executors are added and removed.
* This allows a performance optimization, of skipping levels that aren't relevant (eg., skip
* PROCESS_LOCAL if no tasks could be run PROCESS_LOCAL for the current set of executors).
*/
private[scheduler] var myLocalityLevels = computeValidLocalityLevels()
// Time to wait at each level
private[scheduler] var localityWaits = myLocalityLevels.map(getLocalityWait)
// Delay scheduling variables: we keep track of our current locality level and the time we
// last launched a task at that level, and move up a level when localityWaits[curLevel] expires.
// We then move down if we manage to launch a "more local" task.
private var currentLocalityIndex = 0 // Index of our current locality level in validLocalityLevels
private var lastLaunchTime = clock.getTimeMillis() // Time we last launched a task at this level
override def schedulableQueue: ConcurrentLinkedQueue[Schedulable] = null
override def schedulingMode: SchedulingMode = SchedulingMode.NONE
private[scheduler] var emittedTaskSizeWarning = false
/** Add a task to all the pending-task lists that it should be on. */
private[spark] def addPendingTask(index: Int) {
for (loc <- tasks(index).preferredLocations) {
loc match {
case e: ExecutorCacheTaskLocation =>
pendingTasksForExecutor.getOrElseUpdate(e.executorId, new ArrayBuffer) += index
case e: HDFSCacheTaskLocation =>
val exe = sched.getExecutorsAliveOnHost(loc.host)
exe match {
case Some(set) =>
for (e <- set) {
pendingTasksForExecutor.getOrElseUpdate(e, new ArrayBuffer) += index
}
logInfo(s"Pending task $index has a cached location at ${e.host} " +
", where there are executors " + set.mkString(","))
case None => logDebug(s"Pending task $index has a cached location at ${e.host} " +
", but there are no executors alive there.")
}
case _ =>
}
pendingTasksForHost.getOrElseUpdate(loc.host, new ArrayBuffer) += index
for (rack <- sched.getRackForHost(loc.host)) {
pendingTasksForRack.getOrElseUpdate(rack, new ArrayBuffer) += index
}
}
if (tasks(index).preferredLocations == Nil) {
pendingTasksWithNoPrefs += index
}
allPendingTasks += index // No point scanning this whole list to find the old task there
}
/**
* Return the pending tasks list for a given executor ID, or an empty list if
* there is no map entry for that host
*/
private def getPendingTasksForExecutor(executorId: String): ArrayBuffer[Int] = {
pendingTasksForExecutor.getOrElse(executorId, ArrayBuffer())
}
/**
* Return the pending tasks list for a given host, or an empty list if
* there is no map entry for that host
*/
private def getPendingTasksForHost(host: String): ArrayBuffer[Int] = {
pendingTasksForHost.getOrElse(host, ArrayBuffer())
}
/**
* Return the pending rack-local task list for a given rack, or an empty list if
* there is no map entry for that rack
*/
private def getPendingTasksForRack(rack: String): ArrayBuffer[Int] = {
pendingTasksForRack.getOrElse(rack, ArrayBuffer())
}
/**
* Dequeue a pending task from the given list and return its index.
* Return None if the list is empty.
* This method also cleans up any tasks in the list that have already
* been launched, since we want that to happen lazily.
*/
private def dequeueTaskFromList(
execId: String,
host: String,
list: ArrayBuffer[Int]): Option[Int] = {
var indexOffset = list.size
while (indexOffset > 0) {
indexOffset -= 1
val index = list(indexOffset)
if (!isTaskBlacklistedOnExecOrNode(index, execId, host)) {
// This should almost always be list.trimEnd(1) to remove tail
list.remove(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return Some(index)
}
}
}
None
}
/** Check whether a task once ran an attempt on a given host */
private def hasAttemptOnHost(taskIndex: Int, host: String): Boolean = {
taskAttempts(taskIndex).exists(_.host == host)
}
private def isTaskBlacklistedOnExecOrNode(index: Int, execId: String, host: String): Boolean = {
taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTask(host, index) ||
blacklist.isExecutorBlacklistedForTask(execId, index)
}
}
/**
* Return a speculative task for a given executor if any are available. The task should not have
* an attempt running on this host, in case the host is slow. In addition, the task should meet
* the given locality constraint.
*/
// Labeled as protected to allow tests to override providing speculative tasks if necessary
protected def dequeueSpeculativeTask(execId: String, host: String, locality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value)] =
{
speculatableTasks.retain(index => !successful(index)) // Remove finished tasks from set
def canRunOnHost(index: Int): Boolean = {
!hasAttemptOnHost(index, host) &&
!isTaskBlacklistedOnExecOrNode(index, execId, host)
}
if (!speculatableTasks.isEmpty) {
// Check for process-local tasks; note that tasks can be process-local
// on multiple nodes when we replicate cached blocks, as in Spark Streaming
for (index <- speculatableTasks if canRunOnHost(index)) {
val prefs = tasks(index).preferredLocations
val executors = prefs.flatMap(_ match {
case e: ExecutorCacheTaskLocation => Some(e.executorId)
case _ => None
});
if (executors.contains(execId)) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
// Check for node-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NODE_LOCAL)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations.map(_.host)
if (locations.contains(host)) {
speculatableTasks -= index
return Some((index, TaskLocality.NODE_LOCAL))
}
}
}
// Check for no-preference tasks
if (TaskLocality.isAllowed(locality, TaskLocality.NO_PREF)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val locations = tasks(index).preferredLocations
if (locations.size == 0) {
speculatableTasks -= index
return Some((index, TaskLocality.PROCESS_LOCAL))
}
}
}
// Check for rack-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.RACK_LOCAL)) {
for (rack <- sched.getRackForHost(host)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
val racks = tasks(index).preferredLocations.map(_.host).flatMap(sched.getRackForHost)
if (racks.contains(rack)) {
speculatableTasks -= index
return Some((index, TaskLocality.RACK_LOCAL))
}
}
}
}
// Check for non-local tasks
if (TaskLocality.isAllowed(locality, TaskLocality.ANY)) {
for (index <- speculatableTasks if canRunOnHost(index)) {
speculatableTasks -= index
return Some((index, TaskLocality.ANY))
}
}
}
None
}
/**
* Dequeue a pending task for a given node and return its index and locality level.
* Only search for tasks matching the given locality constraint.
*
* @return An option containing (task index within the task set, locality, is speculative?)
*/
private def dequeueTask(execId: String, host: String, maxLocality: TaskLocality.Value)
: Option[(Int, TaskLocality.Value, Boolean)] =
{
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForExecutor(execId))) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NODE_LOCAL)) {
for (index <- dequeueTaskFromList(execId, host, getPendingTasksForHost(host))) {
return Some((index, TaskLocality.NODE_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.NO_PREF)) {
// Look for noPref tasks after NODE_LOCAL for minimize cross-rack traffic
for (index <- dequeueTaskFromList(execId, host, pendingTasksWithNoPrefs)) {
return Some((index, TaskLocality.PROCESS_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.RACK_LOCAL)) {
for {
rack <- sched.getRackForHost(host)
index <- dequeueTaskFromList(execId, host, getPendingTasksForRack(rack))
} {
return Some((index, TaskLocality.RACK_LOCAL, false))
}
}
if (TaskLocality.isAllowed(maxLocality, TaskLocality.ANY)) {
for (index <- dequeueTaskFromList(execId, host, allPendingTasks)) {
return Some((index, TaskLocality.ANY, false))
}
}
// find a speculative task if all others tasks have been scheduled
dequeueSpeculativeTask(execId, host, maxLocality).map {
case (taskIndex, allowedLocality) => (taskIndex, allowedLocality, true)}
}
/**
* Respond to an offer of a single executor from the scheduler by finding a task
*
* NOTE: this function is either called with a maxLocality which
* would be adjusted by delay scheduling algorithm or it will be with a special
* NO_PREF locality which will be not modified
*
* @param execId the executor Id of the offered resource
* @param host the host Id of the offered resource
* @param maxLocality the maximum locality we want to schedule the tasks at
*/
@throws[TaskNotSerializableException]
def resourceOffer(
execId: String,
host: String,
maxLocality: TaskLocality.TaskLocality)
: Option[TaskDescription] =
{
val offerBlacklisted = taskSetBlacklistHelperOpt.exists { blacklist =>
blacklist.isNodeBlacklistedForTaskSet(host) ||
blacklist.isExecutorBlacklistedForTaskSet(execId)
}
if (!isZombie && !offerBlacklisted) {
val curTime = clock.getTimeMillis()
var allowedLocality = maxLocality
if (maxLocality != TaskLocality.NO_PREF) {
allowedLocality = getAllowedLocalityLevel(curTime)
if (allowedLocality > maxLocality) {
// We're not allowed to search for farther-away tasks
allowedLocality = maxLocality
}
}
dequeueTask(execId, host, allowedLocality).map { case ((index, taskLocality, speculative)) =>
// Found a task; do some bookkeeping and return a task description
val task = tasks(index)
val taskId = sched.newTaskId()
// Do various bookkeeping
copiesRunning(index) += 1
val attemptNum = taskAttempts(index).size
val info = new TaskInfo(taskId, index, attemptNum, curTime,
execId, host, taskLocality, speculative)
taskInfos(taskId) = info
taskAttempts(index) = info :: taskAttempts(index)
// Update our locality level for delay scheduling
// NO_PREF will not affect the variables related to delay scheduling
if (maxLocality != TaskLocality.NO_PREF) {
currentLocalityIndex = getLocalityIndex(taskLocality)
lastLaunchTime = curTime
}
// Serialize and return the task
val serializedTask: ByteBuffer = try {
ser.serialize(task)
} catch {
// If the task cannot be serialized, then there's no point to re-attempt the task,
// as it will always fail. So just abort the whole task-set.
case NonFatal(e) =>
val msg = s"Failed to serialize task $taskId, not attempting to retry it."
logError(msg, e)
abort(s"$msg Exception during serialization: $e")
throw new TaskNotSerializableException(e)
}
if (serializedTask.limit() > TaskSetManager.TASK_SIZE_TO_WARN_KB * 1024 &&
!emittedTaskSizeWarning) {
emittedTaskSizeWarning = true
logWarning(s"Stage ${task.stageId} contains a task of very large size " +
s"(${serializedTask.limit() / 1024} KB). The maximum recommended task size is " +
s"${TaskSetManager.TASK_SIZE_TO_WARN_KB} KB.")
}
addRunningTask(taskId)
// We used to log the time it takes to serialize the task, but task size is already
// a good proxy to task serialization time.
// val timeTaken = clock.getTime() - startTime
val taskName = s"task ${info.id} in stage ${taskSet.id}"
logInfo(s"Starting $taskName (TID $taskId, $host, executor ${info.executorId}, " +
s"partition ${task.partitionId}, $taskLocality, ${serializedTask.limit()} bytes)")
sched.dagScheduler.taskStarted(task, info)
new TaskDescription(
taskId,
attemptNum,
execId,
taskName,
index,
task.partitionId,
addedFiles,
addedJars,
task.localProperties,
serializedTask)
}
} else {
None
}
}
private def maybeFinishTaskSet() {
if (isZombie && runningTasks == 0) {
sched.taskSetFinished(this)
if (tasksSuccessful == numTasks) {
blacklistTracker.foreach(_.updateBlacklistForSuccessfulTaskSet(
taskSet.stageId,
taskSet.stageAttemptId,
taskSetBlacklistHelperOpt.get.execToFailures))
}
}
}
/**
* Get the level we can launch tasks according to delay scheduling, based on current wait time.
*/
private def getAllowedLocalityLevel(curTime: Long): TaskLocality.TaskLocality = {
// Remove the scheduled or finished tasks lazily
def tasksNeedToBeScheduledFrom(pendingTaskIds: ArrayBuffer[Int]): Boolean = {
var indexOffset = pendingTaskIds.size
while (indexOffset > 0) {
indexOffset -= 1
val index = pendingTaskIds(indexOffset)
if (copiesRunning(index) == 0 && !successful(index)) {
return true
} else {
pendingTaskIds.remove(indexOffset)
}
}
false
}
// Walk through the list of tasks that can be scheduled at each location and returns true
// if there are any tasks that still need to be scheduled. Lazily cleans up tasks that have
// already been scheduled.
def moreTasksToRunIn(pendingTasks: HashMap[String, ArrayBuffer[Int]]): Boolean = {
val emptyKeys = new ArrayBuffer[String]
val hasTasks = pendingTasks.exists {
case (id: String, tasks: ArrayBuffer[Int]) =>
if (tasksNeedToBeScheduledFrom(tasks)) {
true
} else {
emptyKeys += id
false
}
}
// The key could be executorId, host or rackId
emptyKeys.foreach(id => pendingTasks.remove(id))
hasTasks
}
while (currentLocalityIndex < myLocalityLevels.length - 1) {
val moreTasks = myLocalityLevels(currentLocalityIndex) match {
case TaskLocality.PROCESS_LOCAL => moreTasksToRunIn(pendingTasksForExecutor)
case TaskLocality.NODE_LOCAL => moreTasksToRunIn(pendingTasksForHost)
case TaskLocality.NO_PREF => pendingTasksWithNoPrefs.nonEmpty
case TaskLocality.RACK_LOCAL => moreTasksToRunIn(pendingTasksForRack)
}
if (!moreTasks) {
// This is a performance optimization: if there are no more tasks that can
// be scheduled at a particular locality level, there is no point in waiting
// for the locality wait timeout (SPARK-4939).
lastLaunchTime = curTime
logDebug(s"No tasks for locality level ${myLocalityLevels(currentLocalityIndex)}, " +
s"so moving to locality level ${myLocalityLevels(currentLocalityIndex + 1)}")
currentLocalityIndex += 1
} else if (curTime - lastLaunchTime >= localityWaits(currentLocalityIndex)) {
// Jump to the next locality level, and reset lastLaunchTime so that the next locality
// wait timer doesn't immediately expire
lastLaunchTime += localityWaits(currentLocalityIndex)
logDebug(s"Moving to ${myLocalityLevels(currentLocalityIndex + 1)} after waiting for " +
s"${localityWaits(currentLocalityIndex)}ms")
currentLocalityIndex += 1
} else {
return myLocalityLevels(currentLocalityIndex)
}
}
myLocalityLevels(currentLocalityIndex)
}
/**
* Find the index in myLocalityLevels for a given locality. This is also designed to work with
* localities that are not in myLocalityLevels (in case we somehow get those) by returning the
* next-biggest level we have. Uses the fact that the last value in myLocalityLevels is ANY.
*/
def getLocalityIndex(locality: TaskLocality.TaskLocality): Int = {
var index = 0
while (locality > myLocalityLevels(index)) {
index += 1
}
index
}
/**
* Check whether the given task set has been blacklisted to the point that it can't run anywhere.
*
* It is possible that this taskset has become impossible to schedule *anywhere* due to the
* blacklist. The most common scenario would be if there are fewer executors than
* spark.task.maxFailures. We need to detect this so we can avoid the job from being hung.
* We try to acquire new executor/s by killing an existing idle blacklisted executor.
*
* There's a tradeoff here: we could make sure all tasks in the task set are schedulable, but that
* would add extra time to each iteration of the scheduling loop. Here, we take the approach of
* making sure at least one of the unscheduled tasks is schedulable. This means we may not detect
* the hang as quickly as we could have, but we'll always detect the hang eventually, and the
* method is faster in the typical case. In the worst case, this method can take
* O(maxTaskFailures + numTasks) time, but it will be faster when there haven't been any task
* failures (this is because the method picks one unscheduled task, and then iterates through each
* executor until it finds one that the task isn't blacklisted on).
*/
private[scheduler] def getCompletelyBlacklistedTaskIfAny(
hostToExecutors: HashMap[String, HashSet[String]]): Option[Int] = {
taskSetBlacklistHelperOpt.flatMap { taskSetBlacklist =>
val appBlacklist = blacklistTracker.get
// Only look for unschedulable tasks when at least one executor has registered. Otherwise,
// task sets will be (unnecessarily) aborted in cases when no executors have registered yet.
if (hostToExecutors.nonEmpty) {
// find any task that needs to be scheduled
val pendingTask: Option[Int] = {
// usually this will just take the last pending task, but because of the lazy removal
// from each list, we may need to go deeper in the list. We poll from the end because
// failed tasks are put back at the end of allPendingTasks, so we're more likely to find
// an unschedulable task this way.
val indexOffset = allPendingTasks.lastIndexWhere { indexInTaskSet =>
copiesRunning(indexInTaskSet) == 0 && !successful(indexInTaskSet)
}
if (indexOffset == -1) {
None
} else {
Some(allPendingTasks(indexOffset))
}
}
pendingTask.find { indexInTaskSet =>
// try to find some executor this task can run on. Its possible that some *other*
// task isn't schedulable anywhere, but we will discover that in some later call,
// when that unschedulable task is the last task remaining.
hostToExecutors.forall { case (host, execsOnHost) =>
// Check if the task can run on the node
val nodeBlacklisted =
appBlacklist.isNodeBlacklisted(host) ||
taskSetBlacklist.isNodeBlacklistedForTaskSet(host) ||
taskSetBlacklist.isNodeBlacklistedForTask(host, indexInTaskSet)
if (nodeBlacklisted) {
true
} else {
// Check if the task can run on any of the executors
execsOnHost.forall { exec =>
appBlacklist.isExecutorBlacklisted(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTaskSet(exec) ||
taskSetBlacklist.isExecutorBlacklistedForTask(exec, indexInTaskSet)
}
}
}
}
} else {
None
}
}
}
private[scheduler] def abortSinceCompletelyBlacklisted(indexInTaskSet: Int): Unit = {
taskSetBlacklistHelperOpt.foreach { taskSetBlacklist =>
val partition = tasks(indexInTaskSet).partitionId
abort(s"""
|Aborting $taskSet because task $indexInTaskSet (partition $partition)
|cannot run anywhere due to node and executor blacklist.
|Most recent failure:
|${taskSetBlacklist.getLatestFailureReason}
|
|Blacklisting behavior can be configured via spark.blacklist.*.
|""".stripMargin)
}
}
/**
* Marks the task as getting result and notifies the DAG Scheduler
*/
def handleTaskGettingResult(tid: Long): Unit = {
val info = taskInfos(tid)
info.markGettingResult(clock.getTimeMillis())
sched.dagScheduler.taskGettingResult(info)
}
/**
* Check whether has enough quota to fetch the result with `size` bytes
*/
def canFetchMoreResults(size: Long): Boolean = sched.synchronized {
totalResultSize += size
calculatedTasks += 1
if (maxResultSize > 0 && totalResultSize > maxResultSize) {
val msg = s"Total size of serialized results of ${calculatedTasks} tasks " +
s"(${Utils.bytesToString(totalResultSize)}) is bigger than ${config.MAX_RESULT_SIZE.key} " +
s"(${Utils.bytesToString(maxResultSize)})"
logError(msg)
abort(msg)
false
} else {
true
}
}
/**
* Marks a task as successful and notifies the DAGScheduler that the task has ended.
*/
def handleSuccessfulTask(tid: Long, result: DirectTaskResult[_]): Unit = {
val info = taskInfos(tid)
val index = info.index
// Check if any other attempt succeeded before this and this attempt has not been handled
if (successful(index) && killedByOtherAttempt.contains(tid)) {
// Undo the effect on calculatedTasks and totalResultSize made earlier when
// checking if can fetch more results
calculatedTasks -= 1
val resultSizeAcc = result.accumUpdates.find(a =>
a.name == Some(InternalAccumulator.RESULT_SIZE))
if (resultSizeAcc.isDefined) {
totalResultSize -= resultSizeAcc.get.asInstanceOf[LongAccumulator].value
}
// Handle this task as a killed task
handleFailedTask(tid, TaskState.KILLED,
TaskKilled("Finish but did not commit due to another attempt succeeded"))
return
}
info.markFinished(TaskState.FINISHED, clock.getTimeMillis())
if (speculationEnabled) {
successfulTaskDurations.insert(info.duration)
}
removeRunningTask(tid)
// Kill any other attempts for the same task (since those are unnecessary now that one
// attempt completed successfully).
for (attemptInfo <- taskAttempts(index) if attemptInfo.running) {
logInfo(s"Killing attempt ${attemptInfo.attemptNumber} for task ${attemptInfo.id} " +
s"in stage ${taskSet.id} (TID ${attemptInfo.taskId}) on ${attemptInfo.host} " +
s"as the attempt ${info.attemptNumber} succeeded on ${info.host}")
killedByOtherAttempt += attemptInfo.taskId
sched.backend.killTask(
attemptInfo.taskId,
attemptInfo.executorId,
interruptThread = true,
reason = "another attempt succeeded")
}
if (!successful(index)) {
tasksSuccessful += 1
logInfo(s"Finished task ${info.id} in stage ${taskSet.id} (TID ${info.taskId}) in" +
s" ${info.duration} ms on ${info.host} (executor ${info.executorId})" +
s" ($tasksSuccessful/$numTasks)")
// Mark successful and stop if all the tasks have succeeded.
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
} else {
logInfo("Ignoring task-finished event for " + info.id + " in stage " + taskSet.id +
" because task " + index + " has already completed successfully")
}
// There may be multiple tasksets for this stage -- we let all of them know that the partition
// was completed. This may result in some of the tasksets getting completed.
sched.markPartitionCompletedInAllTaskSets(stageId, tasks(index).partitionId, info)
// This method is called by "TaskSchedulerImpl.handleSuccessfulTask" which holds the
// "TaskSchedulerImpl" lock until exiting. To avoid the SPARK-7655 issue, we should not
// "deserialize" the value when holding a lock to avoid blocking other threads. So we call
// "result.value()" in "TaskResultGetter.enqueueSuccessfulTask" before reaching here.
// Note: "result.value()" only deserializes the value when it's called at the first time, so
// here "result.value()" just returns the value and won't block other threads.
sched.dagScheduler.taskEnded(tasks(index), Success, result.value(), result.accumUpdates, info)
maybeFinishTaskSet()
}
private[scheduler] def markPartitionCompleted(partitionId: Int, taskInfo: TaskInfo): Unit = {
partitionToIndex.get(partitionId).foreach { index =>
if (!successful(index)) {
if (speculationEnabled && !isZombie) {
successfulTaskDurations.insert(taskInfo.duration)
}
tasksSuccessful += 1
successful(index) = true
if (tasksSuccessful == numTasks) {
isZombie = true
}
maybeFinishTaskSet()
}
}
}
/**
* Marks the task as failed, re-adds it to the list of pending tasks, and notifies the
* DAG Scheduler.
*/
def handleFailedTask(tid: Long, state: TaskState, reason: TaskFailedReason) {
val info = taskInfos(tid)
if (info.failed || info.killed) {
return
}
removeRunningTask(tid)
info.markFinished(state, clock.getTimeMillis())
val index = info.index
copiesRunning(index) -= 1
var accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty
val failureReason = s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid, ${info.host}," +
s" executor ${info.executorId}): ${reason.toErrorString}"
val failureException: Option[Throwable] = reason match {
case fetchFailed: FetchFailed =>
logWarning(failureReason)
if (!successful(index)) {
successful(index) = true
tasksSuccessful += 1
}
isZombie = true
if (fetchFailed.bmAddress != null) {
blacklistTracker.foreach(_.updateBlacklistForFetchFailure(
fetchFailed.bmAddress.host, fetchFailed.bmAddress.executorId))
}
None
case ef: ExceptionFailure =>
// ExceptionFailure's might have accumulator updates
accumUpdates = ef.accums
if (ef.className == classOf[NotSerializableException].getName) {
// If the task result wasn't serializable, there's no point in trying to re-execute it.
logError("Task %s in stage %s (TID %d) had a not serializable result: %s; not retrying"
.format(info.id, taskSet.id, tid, ef.description))
abort("Task %s in stage %s (TID %d) had a not serializable result: %s".format(
info.id, taskSet.id, tid, ef.description))
return
}
val key = ef.description
val now = clock.getTimeMillis()
val (printFull, dupCount) = {
if (recentExceptions.contains(key)) {
val (dupCount, printTime) = recentExceptions(key)
if (now - printTime > EXCEPTION_PRINT_INTERVAL) {
recentExceptions(key) = (0, now)
(true, 0)
} else {
recentExceptions(key) = (dupCount + 1, printTime)
(false, dupCount + 1)
}
} else {
recentExceptions(key) = (0, now)
(true, 0)
}
}
if (printFull) {
logWarning(failureReason)
} else {
logInfo(
s"Lost task ${info.id} in stage ${taskSet.id} (TID $tid) on ${info.host}, executor" +
s" ${info.executorId}: ${ef.className} (${ef.description}) [duplicate $dupCount]")
}
ef.exception
case tk: TaskKilled =>
// TaskKilled might have accumulator updates
accumUpdates = tk.accums
logWarning(failureReason)
None
case e: ExecutorLostFailure if !e.exitCausedByApp =>
logInfo(s"Task $tid failed because while it was being computed, its executor " +
"exited for a reason unrelated to the task. Not counting this failure towards the " +
"maximum number of failures for the task.")
None
case e: TaskFailedReason => // TaskResultLost and others
logWarning(failureReason)
None
}
if (tasks(index).isBarrier) {
isZombie = true
}
sched.dagScheduler.taskEnded(tasks(index), reason, null, accumUpdates, info)
if (!isZombie && reason.countTowardsTaskFailures) {
assert (null != failureReason)
taskSetBlacklistHelperOpt.foreach(_.updateBlacklistForFailedTask(
info.host, info.executorId, index, failureReason))
numFailures(index) += 1
if (numFailures(index) >= maxTaskFailures) {
logError("Task %d in stage %s failed %d times; aborting job".format(
index, taskSet.id, maxTaskFailures))
abort("Task %d in stage %s failed %d times, most recent failure: %s\\nDriver stacktrace:"
.format(index, taskSet.id, maxTaskFailures, failureReason), failureException)
return
}
}
if (successful(index)) {
logInfo(s"Task ${info.id} in stage ${taskSet.id} (TID $tid) failed, but the task will not" +
s" be re-executed (either because the task failed with a shuffle data fetch failure," +
s" so the previous stage needs to be re-run, or because a different copy of the task" +
s" has already succeeded).")
} else {
addPendingTask(index)
}
maybeFinishTaskSet()
}
def abort(message: String, exception: Option[Throwable] = None): Unit = sched.synchronized {
// TODO: Kill running tasks if we were not terminated due to a Mesos error
sched.dagScheduler.taskSetFailed(taskSet, message, exception)
isZombie = true
maybeFinishTaskSet()
}
/** If the given task ID is not in the set of running tasks, adds it.
*
* Used to keep track of the number of running tasks, for enforcing scheduling policies.
*/
def addRunningTask(tid: Long) {
if (runningTasksSet.add(tid) && parent != null) {
parent.increaseRunningTasks(1)
}
}
/** If the given task ID is in the set of running tasks, removes it. */
def removeRunningTask(tid: Long) {
if (runningTasksSet.remove(tid) && parent != null) {
parent.decreaseRunningTasks(1)
}
}
override def getSchedulableByName(name: String): Schedulable = {
null
}
override def addSchedulable(schedulable: Schedulable) {}
override def removeSchedulable(schedulable: Schedulable) {}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
val sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]()
sortedTaskSetQueue += this
sortedTaskSetQueue
}
/** Called by TaskScheduler when an executor is lost so we can re-enqueue our tasks */
override def executorLost(execId: String, host: String, reason: ExecutorLossReason) {
// Re-enqueue any tasks that ran on the failed executor if this is a shuffle map stage,
// and we are not using an external shuffle server which could serve the shuffle outputs.
// The reason is the next stage wouldn't be able to fetch the data from this dead executor
// so we would need to rerun these tasks on other executors.
if (tasks(0).isInstanceOf[ShuffleMapTask] && !env.blockManager.externalShuffleServiceEnabled
&& !isZombie) {
for ((tid, info) <- taskInfos if info.executorId == execId) {
val index = taskInfos(tid).index
if (successful(index) && !killedByOtherAttempt.contains(tid)) {
successful(index) = false
copiesRunning(index) -= 1
tasksSuccessful -= 1
addPendingTask(index)
// Tell the DAGScheduler that this task was resubmitted so that it doesn't think our
// stage finishes when a total of tasks.size tasks finish.
sched.dagScheduler.taskEnded(
tasks(index), Resubmitted, null, Seq.empty, info)
}
}
}
for ((tid, info) <- taskInfos if info.running && info.executorId == execId) {
val exitCausedByApp: Boolean = reason match {
case exited: ExecutorExited => exited.exitCausedByApp
case ExecutorKilled => false
case _ => true
}
handleFailedTask(tid, TaskState.FAILED, ExecutorLostFailure(info.executorId, exitCausedByApp,
Some(reason.toString)))
}
// recalculate valid locality levels and waits when executor is lost
recomputeLocality()
}
/**
* Check for tasks to be speculated and return true if there are any. This is called periodically
* by the TaskScheduler.
*
*/
override def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean = {
// Can't speculate if we only have one task, and no need to speculate if the task set is a
// zombie or is from a barrier stage.
if (isZombie || isBarrier || numTasks == 1) {
return false
}
var foundTasks = false
val minFinishedForSpeculation = (SPECULATION_QUANTILE * numTasks).floor.toInt
logDebug("Checking for speculative tasks: minFinished = " + minFinishedForSpeculation)
if (tasksSuccessful >= minFinishedForSpeculation && tasksSuccessful > 0) {
val time = clock.getTimeMillis()
val medianDuration = successfulTaskDurations.median
val threshold = max(SPECULATION_MULTIPLIER * medianDuration, minTimeToSpeculation)
// TODO: Threshold should also look at standard deviation of task durations and have a lower
// bound based on that.
logDebug("Task length threshold for speculation: " + threshold)
for (tid <- runningTasksSet) {
val info = taskInfos(tid)
val index = info.index
if (!successful(index) && copiesRunning(index) == 1 && info.timeRunning(time) > threshold &&
!speculatableTasks.contains(index)) {
logInfo(
"Marking task %d in stage %s (on %s) as speculatable because it ran more than %.0f ms"
.format(index, taskSet.id, info.host, threshold))
speculatableTasks += index
sched.dagScheduler.speculativeTaskSubmitted(tasks(index))
foundTasks = true
}
}
}
foundTasks
}
private def getLocalityWait(level: TaskLocality.TaskLocality): Long = {
val defaultWait = conf.get(config.LOCALITY_WAIT)
val localityWaitKey = level match {
case TaskLocality.PROCESS_LOCAL => "spark.locality.wait.process"
case TaskLocality.NODE_LOCAL => "spark.locality.wait.node"
case TaskLocality.RACK_LOCAL => "spark.locality.wait.rack"
case _ => null
}
if (localityWaitKey != null) {
conf.getTimeAsMs(localityWaitKey, defaultWait.toString)
} else {
0L
}
}
/**
* Compute the locality levels used in this TaskSet. Assumes that all tasks have already been
* added to queues using addPendingTask.
*
*/
private def computeValidLocalityLevels(): Array[TaskLocality.TaskLocality] = {
import TaskLocality.{PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY}
val levels = new ArrayBuffer[TaskLocality.TaskLocality]
if (!pendingTasksForExecutor.isEmpty &&
pendingTasksForExecutor.keySet.exists(sched.isExecutorAlive(_))) {
levels += PROCESS_LOCAL
}
if (!pendingTasksForHost.isEmpty &&
pendingTasksForHost.keySet.exists(sched.hasExecutorsAliveOnHost(_))) {
levels += NODE_LOCAL
}
if (!pendingTasksWithNoPrefs.isEmpty) {
levels += NO_PREF
}
if (!pendingTasksForRack.isEmpty &&
pendingTasksForRack.keySet.exists(sched.hasHostAliveOnRack(_))) {
levels += RACK_LOCAL
}
levels += ANY
logDebug("Valid locality levels for " + taskSet + ": " + levels.mkString(", "))
levels.toArray
}
def recomputeLocality() {
val previousLocalityLevel = myLocalityLevels(currentLocalityIndex)
myLocalityLevels = computeValidLocalityLevels()
localityWaits = myLocalityLevels.map(getLocalityWait)
currentLocalityIndex = getLocalityIndex(previousLocalityLevel)
}
def executorAdded() {
recomputeLocality()
}
}
private[spark] object TaskSetManager {
// The user will be warned if any stages contain a task that has a serialized size greater than
// this.
val TASK_SIZE_TO_WARN_KB = 100
}
| guoxiaolongzte/spark | core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala | Scala | apache-2.0 | 46,212 |
package com.twitter.finagle.tracing
import com.twitter.finagle.util.ByteArrays
import com.twitter.util.RichU64String
import com.twitter.util.{Try, Return, Throw}
import java.lang.{Boolean => JBool}
import scala.util.control.NonFatal
/**
* Defines trace identifiers. Span IDs name a particular (unique)
* span, while TraceIds contain a span ID as well as context (parentId
* and traceId).
*/
final class SpanId(val self: Long) extends Proxy {
def toLong = self
override def toString: String = SpanId.toString(self)
}
object SpanId {
// StringBuilder.appendAll(char..) seems to be faster than
// StringBuilder.append(string..)
private val lut: Array[Array[Char]] = (
for (b <- Byte.MinValue to Byte.MaxValue) yield {
val bb = if (b < 0) b + 256 else b
val s = "%02x".format(bb)
Array(s(0), s(1))
}
).toArray
private def byteToChars(b: Byte): Array[Char] = lut(b+128)
// This is invoked a lot, so they need to be fast.
def toString(l: Long): String = {
val b = new StringBuilder(16)
b.appendAll(byteToChars((l>>56 & 0xff).toByte))
b.appendAll(byteToChars((l>>48 & 0xff).toByte))
b.appendAll(byteToChars((l>>40 & 0xff).toByte))
b.appendAll(byteToChars((l>>32 & 0xff).toByte))
b.appendAll(byteToChars((l>>24 & 0xff).toByte))
b.appendAll(byteToChars((l>>16 & 0xff).toByte))
b.appendAll(byteToChars((l>>8 & 0xff).toByte))
b.appendAll(byteToChars((l & 0xff).toByte))
b.toString
}
def apply(spanId: Long): SpanId = new SpanId(spanId)
def fromString(spanId: String): Option[SpanId] =
try {
// Tolerates 128 bit X-B3-TraceId by reading the right-most 16 hex
// characters (as opposed to overflowing a U64 and starting a new trace).
val length = spanId.length()
val lower64Bits = if (length <= 16) spanId else spanId.substring(length - 16)
Some(SpanId(new RichU64String(lower64Bits).toU64Long))
} catch {
case NonFatal(_) => None
}
}
object TraceId {
/**
* Creates a TraceId with no flags set. See case class for more info.
*/
def apply(
traceId: Option[SpanId],
parentId: Option[SpanId],
spanId: SpanId,
sampled: Option[Boolean]
): TraceId =
TraceId(traceId, parentId, spanId, sampled, Flags())
/**
* Serialize a TraceId into an array of bytes.
*/
def serialize(traceId: TraceId): Array[Byte] = {
val flags = traceId._sampled match {
case None =>
traceId.flags
case Some(true) =>
traceId.flags.setFlag(Flags.SamplingKnown | Flags.Sampled)
case Some(false) =>
traceId.flags.setFlag(Flags.SamplingKnown)
}
val bytes = new Array[Byte](32)
ByteArrays.put64be(bytes, 0, traceId.spanId.toLong)
ByteArrays.put64be(bytes, 8, traceId.parentId.toLong)
ByteArrays.put64be(bytes, 16, traceId.traceId.toLong)
ByteArrays.put64be(bytes, 24, flags.toLong)
bytes
}
/**
* Deserialize a TraceId from an array of bytes.
*/
def deserialize(bytes: Array[Byte]): Try[TraceId] = {
if (bytes.length != 32) {
Throw(new IllegalArgumentException("Expected 32 bytes"))
} else {
val span64 = ByteArrays.get64be(bytes, 0)
val parent64 = ByteArrays.get64be(bytes, 8)
val trace64 = ByteArrays.get64be(bytes, 16)
val flags64 = ByteArrays.get64be(bytes, 24)
val flags = Flags(flags64)
val sampled = if (flags.isFlagSet(Flags.SamplingKnown)) {
Some(flags.isFlagSet(Flags.Sampled))
} else None
val traceId = TraceId(
if (trace64 == parent64) None else Some(SpanId(trace64)),
if (parent64 == span64) None else Some(SpanId(parent64)),
SpanId(span64),
sampled,
flags)
Return(traceId)
}
}
}
/**
* A trace id represents one particular trace for one request.
*
* A request is composed of one or more spans, which are generally RPCs but
* may be other in-process activity. The TraceId for each span is a tuple of
* three ids:
*
* 1. a shared id common to all spans in an overall request (trace id)
* 2. an id unique to this part of the request (span id)
* 3. an id for the parent request that caused this span (parent id)
*
* For example, when service M calls service N, they may have respective
* TraceIds like these:
*
* {{{
* TRACE ID SPAN ID PARENT ID
* SERVICE M e4bbb7c0f6a2ff07.a5f47e9fced314a2<:694eb2f05b8fd7d1
* | |
* | +-----------------+
* | |
* v v
* SERVICE N e4bbb7c0f6a2ff07.263edc9b65773b08<:a5f47e9fced314a2
* }}}
*
* Parent id and trace id are optional when constructing a TraceId because
* they are not present for the very first span in a request. In this case all
* three ids in the resulting TraceId are the same:
*
* {{{
* TRACE ID SPAN ID PARENT ID
* SERVICE A 34429b04b6bbf478.34429b04b6bbf478<:34429b04b6bbf478
* }}}
*
* @param _traceId The id for this request.
* @param _parentId The id for the request one step up the service stack.
* @param spanId The id for this particular request
* @param _sampled Should we sample this request or not? True means sample, false means don't, none means we defer
* decision to someone further down in the stack.
* @param flags Flags relevant to this request. Could be things like debug mode on/off. The sampled flag could eventually
* be moved in here.
*/
final case class TraceId(
_traceId: Option[SpanId],
_parentId: Option[SpanId],
spanId: SpanId,
_sampled: Option[Boolean],
flags: Flags)
{
def traceId: SpanId = _traceId match {
case None => parentId
case Some(id) => id
}
def parentId: SpanId = _parentId match {
case None => spanId
case Some(id) => id
}
/**
* Override [[_sampled]] to Some(true) if the debug flag is set.
* @see [[getSampled]] for a Java-friendly API.
*/
lazy val sampled: Option[Boolean] = if (flags.isDebug) Some(true) else _sampled
/**
* Java-friendly API to convert [[sampled]] to a [[Option]] of [[java.lang.Boolean]].
* @since Java generics require objects, using [[sampled]] from
* Java would give an Option<Object> instead of Option<Boolean>
*/
def getSampled(): Option[JBool] = sampled match {
case Some(b) => Some(Boolean.box(b))
case None => None
}
private[TraceId] def ids = (traceId, parentId, spanId)
override def equals(other: Any) = other match {
case other: TraceId => this.ids equals other.ids
case _ => false
}
override def hashCode(): Int =
ids.hashCode()
override def toString = s"$traceId.$spanId<:$parentId"
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/tracing/Id.scala | Scala | apache-2.0 | 6,786 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import quasar.contrib.cats.hash.toHashing
import quasar.contrib.cats.eqv.toEquiv
import java.util.concurrent.TimeUnit
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration._
import cats.effect.{Sync, Timer}
import cats.effect.concurrent.Ref
import cats.kernel.Hash
import cats.implicits._
final class RateLimiter[F[_]: Sync: Timer, A: Hash] private (
caution: Double,
updater: RateLimitUpdater[F, A]) {
// TODO make this clustering-aware
private val configs: TrieMap[A, RateLimiterConfig] =
new TrieMap[A, RateLimiterConfig](toHashing[A], toEquiv[A])
// TODO make this clustering-aware
private val states: TrieMap[A, Ref[F, State]] =
new TrieMap[A, Ref[F, State]](toHashing[A], toEquiv[A])
def configure(key: A, config: RateLimiterConfig)
: F[Option[RateLimiterConfig]] =
Sync[F].delay(configs.putIfAbsent(key, config))
// TODO implement with TrieMap#updateWith when we're on Scala 2.13
def plusOne(key: A): F[Unit] =
for {
ref <- Sync[F].delay(states.get(key))
now <- nowF
_ <- ref match {
case Some(r) =>
r.modify(s => (s.copy(count = s.count + 1), ()))
case None =>
for {
now <- nowF
ref <- Ref.of[F, State](State(1, now))
put <- Sync[F].delay(states.putIfAbsent(key, ref))
_ <- put match {
case Some(_) => plusOne(key) // retry
case None => ().pure[F]
}
} yield ()
}
} yield ()
// TODO implement with TrieMap#updateWith when we're on Scala 2.13
def wait(key: A, duration: FiniteDuration): F[Unit] =
for {
ref <- Sync[F].delay(states.get(key))
now <- nowF
_ <- ref match {
case Some(r) =>
r.update(_ => State(0, now + duration))
case None =>
for {
ref <- Ref.of[F, State](State(0, now + duration))
put <- Sync[F].delay(states.putIfAbsent(key, ref))
_ <- put match {
case Some(_) => wait(key, duration) // retry
case None => ().pure[F]
}
} yield ()
}
} yield ()
def apply(key: A, max: Int, window: FiniteDuration)
: F[RateLimiterEffects[F]] =
for {
config <- Sync[F] delay {
val c = RateLimiterConfig(max, window)
configs.putIfAbsent(key, c).getOrElse(c)
}
_ <- updater.config(key, config)
now <- nowF
maybeR <- Ref.of[F, State](State(0, now))
stateRef <- Sync[F] delay {
states.putIfAbsent(key, maybeR).getOrElse(maybeR)
}
} yield {
RateLimiterEffects[F](
limit(key, config, stateRef),
backoff(key, config, stateRef))
}
// TODO wait smarter (i.e. not for an entire window)
// the server's window falls in our previous window between
// max and max+1 requests prior to the server-throttled request
private def backoff(key: A, config: RateLimiterConfig, stateRef: Ref[F, State])
: F[Unit] =
nowF.flatMap(now =>
stateRef.update(_ => State(0, now + config.window)) >>
updater.wait(key, config.window))
private def limit(key: A, config: RateLimiterConfig, stateRef: Ref[F, State])
: F[Unit] = {
import config._
for {
now <- nowF
state <- stateRef.get
back <-
if (state.start > now) { // waiting
Timer[F].sleep(state.start - now) >>
limit(key, config, stateRef)
} else if (state.start + window < now) { // in the next window
stateRef.update(_ => State(0, state.start + window)) >>
limit(key, config, stateRef)
} else { // in the current window
stateRef.modify(s => (s.copy(count = s.count + 1), s.count)) flatMap { count =>
if (count >= max * caution) { // max exceeded
val duration = (state.start + window) - now
updater.wait(key, duration) >>
Timer[F].sleep(duration) >>
stateRef.update(_ => State(0, state.start + window)) >>
limit(key, config, stateRef)
} else { // continue
updater.plusOne(key)
}
}
}
} yield back
}
private val nowF: F[FiniteDuration] =
Timer[F].clock.realTime(TimeUnit.MILLISECONDS).map(_.millis)
private case class State(count: Int, start: FiniteDuration)
}
object RateLimiter {
def apply[F[_]: Sync: Timer, A: Hash](
caution: Double,
freshKey: F[A],
updater: RateLimitUpdater[F, A])
: F[RateLimiting[F, A]] =
Sync[F].delay(RateLimiting[F, A](
new RateLimiter[F, A](caution, updater),
freshKey))
}
| djspiewak/quasar | foundation/src/main/scala/quasar/RateLimiter.scala | Scala | apache-2.0 | 5,321 |
package com.nefariouszhen.khronos.websocket
import com.google.inject.{ConfigurationException, Inject, Injector}
import org.atmosphere.cpr.{AtmosphereFramework, AtmosphereObjectFactory}
class GuiceObjectFactory @Inject()(injector: Injector) extends AtmosphereObjectFactory {
override def newClassInstance[T, U <: T](framework: AtmosphereFramework, classType: Class[T], typ: Class[U]): T = {
// Try to pull from Guice; if non-existent, then create directly.
try {
injector.getInstance(typ)
} catch {
case e: ConfigurationException => typ.newInstance()
}
}
}
| khronos-metrics/khronos | khronos-websocket-core/src/main/scala/com/nefariouszhen/khronos/websocket/GuiceObjectFactory.scala | Scala | apache-2.0 | 590 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.core.output.types.es
import java.util.Base64
import com.bwsw.sj.common.engine.core.output.BasicType
import org.apache.commons.lang3.StringEscapeUtils
import scala.util.parsing.json._
import com.bwsw.sj.engine.core.output.IncompatibleTypeException
abstract class ElasticsearchField[T](name: String, default: T) extends BasicType[String, T](name, default)
/**
* Created by Ivan Kudryavtsev on 03.03.2017.
*/
class IntegerField(name: String, default: java.lang.Integer = 0) extends ElasticsearchField[java.lang.Integer](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(i: java.lang.Integer) => i.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Integer.")
}
}
class LongField(name: String, default: java.lang.Long = 0L) extends ElasticsearchField[java.lang.Long](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(l: java.lang.Long) => l.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Long.")
}
}
class FloatField(name: String, default: java.lang.Float = 0.0f) extends ElasticsearchField[java.lang.Float](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(f: java.lang.Float) => f.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Float.")
}
}
class DoubleField(name: String, default: java.lang.Double = 0.0) extends ElasticsearchField[java.lang.Double](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(d: java.lang.Double) => d.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Double.")
}
}
class ByteField(name: String, default: java.lang.Byte = 0.toByte) extends ElasticsearchField[java.lang.Byte](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(b: java.lang.Byte) => b.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Byte.")
}
}
class CharField(name: String, default: java.lang.Character = 0.toChar) extends ElasticsearchField[java.lang.Character](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(c: java.lang.Character) => c.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Character.")
}
}
class ShortField(name: String, default: java.lang.Short = 0.toShort) extends ElasticsearchField[java.lang.Short](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: java.lang.Short) => s.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Short.")
}
}
class BooleanField(name: String, default: java.lang.Boolean = true) extends ElasticsearchField[java.lang.Boolean](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(b: java.lang.Boolean) => b.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.Boolean.")
}
}
class DateField(name: String, default: java.lang.String = "0000-00-00") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: java.lang.String) => "\\"" + s + "\\""
case Some(l: java.lang.Long) => l.toString
case Some(d: java.util.Date) => d.getTime.toString
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be java.lang.String or Long.")
}
}
class BinaryField(name: String, default: Array[Byte] = new Array[Byte](0)) extends ElasticsearchField[Array[Byte]](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(ab: Array[Byte]) => Base64.getEncoder.encodeToString(ab)
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be Array[Byte]")
}
}
class JavaStringField(name: String, default: String = "") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: String) => "\\"" + StringEscapeUtils.escapeJava(s) + "\\""
case _ => "\\"" + StringEscapeUtils.escapeJava(fieldValue.toString) + "\\""
}
}
class HTMLStringField(name: String, default: String = "") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: String) => "\\"" + StringEscapeUtils.escapeHtml4(s) + "\\""
case _ => "\\"" + StringEscapeUtils.escapeHtml4(fieldValue.toString) + "\\""
}
}
class RangeField[T](name: String, default: String = "") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some((from: java.lang.Integer, to: java.lang.Integer)) =>
s"""{"gte": $from, "lte": $to}"""
case Some((from: java.lang.Long, to: java.lang.Long)) =>
s"""{"gte": $from, "lte": $to}"""
case Some((from: java.lang.Float, to: java.lang.Float)) =>
s"""{"gte": $from, "lte": $to}"""
case Some((from: java.lang.Double, to: java.lang.Double)) =>
s"""{"gte": $from, "lte": $to}"""
case Some((from: String, to: String)) =>
s"""{"gte": "${StringEscapeUtils.escapeJava(from)}", "lte": "${StringEscapeUtils.escapeJava(to)}"}"""
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be (Integer, Integer), (Long, Long), (Float, Float) or (String, String).")
}
}
class ArrayField(name: String, default: String = "[]") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: String) =>
JSON.parseRaw(s).orNull match {
case _: JSONArray => s
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be JSON Array in String form.")
}
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be JSON Array in String form.")
}
}
class ObjectField(name: String, default: String = "{}") extends ElasticsearchField[String](name, default) {
override def transform(fieldValue: Any): String = Option(fieldValue) match {
case None => "null"
case Some(s: String) =>
JSON.parseRaw(s).orNull match {
case _: JSONObject => s
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be JSON Object in String form.")
}
case _ => throw new IncompatibleTypeException(s"Field '$name' has an incompatible type ${fieldValue.getClass.getName}. Must be JSON Object in String form.")
}
}
| bwsw/sj-platform | core/sj-engine-core/src/main/scala/com/bwsw/sj/engine/core/output/types/es/ElasticSearchTypes.scala | Scala | apache-2.0 | 8,789 |
package com.twitter.finagle.thriftmux
import com.twitter.finagle.{Path, Failure, Dtab, ThriftMuxUtil}
import com.twitter.finagle.mux.transport.{BadMessageException, Message, Netty3Framer}
import com.twitter.finagle.netty3.BufChannelBuffer
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.thrift._
import com.twitter.finagle.thrift.thrift.{
RequestContext, RequestHeader, ResponseHeader, UpgradeReply}
import com.twitter.finagle.tracing.Trace
import com.twitter.logging.Level
import com.twitter.util.{Try, Return, Throw, NonFatal}
import java.util.concurrent.LinkedBlockingDeque
import java.util.concurrent.atomic.AtomicInteger
import org.apache.thrift.protocol.{TProtocolFactory, TMessage, TMessageType}
import org.jboss.netty.buffer.{ChannelBuffers, ChannelBuffer}
import org.jboss.netty.channel._
import scala.collection.mutable
/**
* A [[org.jboss.netty.channel.ChannelPipelineFactory]] that manages the downgrading
* of mux server sessions to plain thrift or twitter thrift. Because this is used in the
* context of the mux server dispatcher it's important that when we downgrade, we faithfully
* emulate the mux protocol. Additionally, the pipeline records the number of open ThriftMux
* and non-Mux downgraded sessions in a pair of [[java.util.concurrent.atomic.AtomicInteger AtomicIntegers]].
*/
// Note: this lives in a file that doesn't match the class name in order
// to decouple Netty from finagle and isolate everything related to Netty3 into a single file.
private[finagle] class PipelineFactory(
statsReceiver: StatsReceiver = NullStatsReceiver,
protocolFactory: TProtocolFactory = Protocols.binaryFactory())
extends ChannelPipelineFactory
{
def newUnexpectedRequestException(err: String): Failure =
Failure(err).withLogLevel(Level.DEBUG)
private object TTwitterToMux {
private val responseHeader = ChannelBuffers.wrappedBuffer(
OutputBuffer.messageToArray(new ResponseHeader, protocolFactory))
}
private class TTwitterToMux extends SimpleChannelHandler {
import TTwitterToMux._
private[this] def contextStructToKVTuple(c: RequestContext): (ChannelBuffer, ChannelBuffer) =
(ChannelBuffers.wrappedBuffer(c.getKey), ChannelBuffers.wrappedBuffer(c.getValue))
private[this] def thriftToMux(req: ChannelBuffer): Message.Tdispatch = {
val header = new RequestHeader
val request_ = InputBuffer.peelMessage(
ThriftMuxUtil.bufferToArray(req),
header,
protocolFactory
)
val richHeader = new RichRequestHeader(header)
val contextBuf =
new mutable.ArrayBuffer[(ChannelBuffer, ChannelBuffer)](
2 + (if (header.contexts == null) 0 else header.contexts.size))
contextBuf += (
BufChannelBuffer(Trace.idCtx.marshalId) ->
BufChannelBuffer(Trace.idCtx.marshal(richHeader.traceId)))
richHeader.clientId match {
case Some(clientId) =>
val clientIdBuf = ClientId.clientIdCtx.marshal(Some(clientId))
contextBuf += (
BufChannelBuffer(ClientId.clientIdCtx.marshalId) ->
BufChannelBuffer(clientIdBuf))
case None =>
}
if (header.contexts != null) {
val iter = header.contexts.iterator()
while (iter.hasNext) {
contextBuf += contextStructToKVTuple(iter.next())
}
}
val requestBuf = ChannelBuffers.wrappedBuffer(request_)
Message.Tdispatch(
Message.Tags.MinTag, contextBuf.toSeq, richHeader.dest, richHeader.dtab, requestBuf)
}
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
val buf = e.getMessage.asInstanceOf[ChannelBuffer]
super.messageReceived(ctx, new UpstreamMessageEvent(
e.getChannel, Message.encode(thriftToMux(buf)), e.getRemoteAddress))
}
override def writeRequested(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
Message.decode(e.getMessage.asInstanceOf[ChannelBuffer]) match {
case Message.RdispatchOk(_, _, rep) =>
super.writeRequested(ctx,
new DownstreamMessageEvent(e.getChannel, e.getFuture,
ChannelBuffers.wrappedBuffer(responseHeader, rep), e.getRemoteAddress))
case Message.RdispatchNack(_, _) =>
// The only mechanism for negative acknowledgement afforded by non-Mux
// clients is to tear down the connection.
Channels.close(e.getChannel)
case Message.Tdrain(tag) =>
// Terminate the write here with a "success" and synthesize a Rdrain response.
// Although downgraded connections don't understand Tdrains, we synthesize a Rdrain
// so the server dispatcher enters draining mode.
e.getFuture.setSuccess()
super.messageReceived(ctx,
new UpstreamMessageEvent(
e.getChannel,
Message.encode(Message.Rdrain(tag)),
e.getRemoteAddress))
case unexpected =>
throw newUnexpectedRequestException(
s"unable to send ${unexpected.getClass.getName} to non-mux client")
}
}
}
private class TFramedToMux extends SimpleChannelHandler {
override def writeRequested(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
Message.decode(e.getMessage.asInstanceOf[ChannelBuffer]) match {
case Message.RdispatchOk(_, _, rep) =>
super.writeRequested(ctx,
new DownstreamMessageEvent(e.getChannel, e.getFuture, rep, e.getRemoteAddress))
case Message.RdispatchNack(_, _) =>
// The only mechanism for negative acknowledgement afforded by non-Mux
// clients is to tear down the connection.
Channels.close(e.getChannel)
case Message.Tdrain(tag) =>
// Terminate the write here with a "success" and synthesize a Rdrain response.
// Although downgraded connections don't understand Tdrains, we synthesize a Rdrain
// so the server dispatcher enters draining mode.
e.getFuture.setSuccess()
super.messageReceived(ctx,
new UpstreamMessageEvent(
e.getChannel,
Message.encode(Message.Rdrain(tag)),
e.getRemoteAddress))
case unexpected =>
throw newUnexpectedRequestException(
s"unable to send ${unexpected.getClass.getName} to non-mux client")
}
}
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
val buf = e.getMessage.asInstanceOf[ChannelBuffer]
super.messageReceived(ctx,
new UpstreamMessageEvent(
e.getChannel,
Message.encode(Message.Tdispatch(Message.Tags.MinTag, Nil, Path.empty, Dtab.empty, buf)),
e.getRemoteAddress))
}
}
class RequestSerializer(pendingReqs: Int = 0) extends SimpleChannelHandler {
// Note: Since there can only be at most one pending request at any time,
// the only race condition that needs to be handled is one thread (a
// Netty worker thread) executes messageReceived while another thread
// executes writeRequested (the thread satisfies the request)
private[this] val q = new LinkedBlockingDeque[MessageEvent]
private[this] val n = new AtomicInteger(pendingReqs)
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
if (n.incrementAndGet() > 1) q.offer(e)
else super.messageReceived(ctx, e)
}
override def writeRequested(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
super.writeRequested(ctx, e)
if (n.decrementAndGet() > 0) {
// Need to call q.take() Since incrementing n and enqueueing the
// request are not atomic. n>0 guarantees q.take() does not block forever.
super.messageReceived(ctx, q.take())
}
}
}
private object Upgrader {
val upNegotiationAck = {
val buffer = new OutputBuffer(protocolFactory)
buffer().writeMessageBegin(
new TMessage(ThriftTracing.CanTraceMethodName, TMessageType.REPLY, 0))
val upgradeReply = new UpgradeReply
upgradeReply.write(buffer())
buffer().writeMessageEnd()
ChannelBuffers.copiedBuffer(buffer.toArray)
}
}
private class DrainQueue[T] {
private[this] var q = new mutable.Queue[T]
def offer(e: T): Boolean = synchronized {
if (q != null)
q.enqueue(e)
q != null
}
def drain(): Iterable[T] = {
synchronized {
assert(q != null, "Can't drain queue more than once")
val q1 = q
q = null
q1
}
}
}
private class Upgrader extends SimpleChannelHandler {
import Upgrader._
// Queue writes until we know what protocol we are speaking.
private[this] val writeq = new DrainQueue[MessageEvent]
private[this] def isTTwitterUpNegotiation(req: ChannelBuffer): Boolean = {
try {
val buffer = new InputBuffer(ThriftMuxUtil.bufferToArray(req), protocolFactory)
val msg = buffer().readMessageBegin()
msg.`type` == TMessageType.CALL &&
msg.name == ThriftTracing.CanTraceMethodName
} catch {
case NonFatal(_) => false
}
}
override def writeRequested(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
if (!writeq.offer(e))
super.writeRequested(ctx, e)
}
override def messageReceived(ctx: ChannelHandlerContext, e: MessageEvent): Unit = {
val buf = e.getMessage.asInstanceOf[ChannelBuffer]
val pipeline = ctx.getPipeline
Try { Message.decode(buf.duplicate()) } match {
// We assume that a bad message decode indicates an old-style
// session. Due to Mux message numbering, a binary-encoded
// thrift frame corresponds to an Rerr message with tag
// 65537. Note that in this context, an R-message is never
// valid.
//
// Binary-encoded thrift messages have the format
//
// header:4 n:4 method:n seqid:4
//
// The header is
//
// 0x80010000 | type
//
// where the type of CALL is 1; the type of ONEWAY is 4. This makes
// the first four bytes of a CALL message 0x80010001.
//
// Mux messages begin with
//
// Type:1 tag:3
//
// Rerr is type 0x80, so we see the above thrift header
// Rerr corresponds to (tag=0x010001).
//
// The hazards of protocol multiplexing.
case Throw(_: BadMessageException) |
Return(Message.Rerr(65537, _)) |
Return(Message.Rerr(65540, _)) =>
// Increment ThriftMux connection count stats and wire up a callback to
// decrement on channel closure.
downgradedConnects.incr()
downgradedConnectionCount.incrementAndGet()
ctx.getChannel.getCloseFuture.addListener(new ChannelFutureListener {
override def operationComplete(f: ChannelFuture): Unit =
if (!f.isCancelled) { // on success or failure
downgradedConnectionCount.decrementAndGet()
}
})
// Add a ChannelHandler to serialize the requests since we may
// deal with a client that pipelines requests
pipeline.addBefore(ctx.getName, "request_serializer", new RequestSerializer(1))
if (isTTwitterUpNegotiation(buf)) {
pipeline.replace(this, "twitter_thrift_to_mux", new TTwitterToMux)
Channels.write(ctx, e.getFuture, upNegotiationAck, e.getRemoteAddress)
} else {
pipeline.replace(this, "framed_thrift_to_mux", new TFramedToMux)
super.messageReceived(ctx,
new UpstreamMessageEvent(
e.getChannel,
Message.encode(
Message.Tdispatch(Message.Tags.MinTag, Nil, Path.empty, Dtab.empty, buf)),
e.getRemoteAddress))
}
case Return(_) =>
// Increment ThriftMux connection count stats and wire up a callback to
// decrement on channel closure.
thriftmuxConnects.incr()
thriftMuxConnectionCount.incrementAndGet()
ctx.getChannel.getCloseFuture.addListener(new ChannelFutureListener {
override def operationComplete(f: ChannelFuture): Unit =
if (!f.isCancelled) { // on success or failure
thriftMuxConnectionCount.decrementAndGet()
}
})
pipeline.remove(this)
super.messageReceived(ctx, e)
case Throw(exc) => throw exc
}
// On the surface, this seems bad, since messages may be
// reordered. In practice, the Transport interface on top of
// Netty's channel is responsible for serializing writes; the
// completion Future in the message event is satisfied only
// after it has been written to the actual socket by the
// Netty's NIO implementation.
//
// This leaves interdependence between data messages
// (writeRequested) and other messages (e.g. channel events).
// Here there are none.
for (e <- writeq.drain())
pipeline.sendDownstream(e)
}
}
private[this] val downgradedConnectionCount = new AtomicInteger
private[this] val thriftMuxConnectionCount = new AtomicInteger
private[this] val thriftmuxConnects = statsReceiver.counter("connects")
private[this] val downgradedConnects = statsReceiver.counter("downgraded_connects")
private[this] val downgradedConnectionGauge =
statsReceiver.addGauge("downgraded_connections") { downgradedConnectionCount.get() }
private[this] val thriftmuxConnectionGauge =
statsReceiver.addGauge("connections") { thriftMuxConnectionCount.get() }
def getPipeline(): ChannelPipeline = {
val pipeline = Netty3Framer.getPipeline()
pipeline.addAfter("framer", "upgrader", new Upgrader)
pipeline
}
}
| lukiano/finagle | finagle-thriftmux/src/main/scala/com/twitter/finagle/thriftmux/Netty3.scala | Scala | apache-2.0 | 13,967 |
package com.lucius.shu.dao
import com.lucius.shu.base.{Constants, Contexts}
import com.lucius.shu.util.Utils
import org.apache.spark.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StringType, StructField, StructType}
import scala.language.implicitConversions
import scala.reflect.runtime.universe.TypeTag
trait Dao extends Logging {
private val sqlContext = Contexts.sqlContext
protected def getProps(inputFilePath: String, schema: String, tempTableName: String, sqlObj: SQL, separator: String = Constants.InputPath.SEPARATOR) = {
registerTempTableIfNotExist(inputFilePath, schema, tempTableName, separator)
combineSqlAndQuery(tempTableName, sqlObj)
}
protected def getProps[A <: Product : TypeTag](rdd: RDD[A], schema: String, tempTableName: String, sqlObj: SQL) = {
registerTempTableIfNotExist(rdd, schema, tempTableName)
combineSqlAndQuery(tempTableName, sqlObj)
}
private def combineSqlAndQuery(tempTableName: String, sqlObj: SQL) = {
var _sql = s"SELECT ${sqlObj.select} FROM $tempTableName"
if (Option(sqlObj.where).isDefined)
_sql += s" WHERE ${sqlObj.where}"
if (Option(sqlObj.orderBy).isDefined) {
//ไฝฟ็จOrderBy็ๆถๅ๏ผ้่ฆๅฐspark.sql.shuffle.partitions่ฎพๅฐ
sqlContext.sql(s"SET spark.sql.shuffle.partitions=10")
logInfo(Utils.logWrapper("order by ๆไฝ้่ฆ่ฎพ็ฝฎ: SET spark.sql.shuffle.partitions=200 "))
_sql += s" ORDER BY ${sqlObj.orderBy} "
}
//ๆๆถไธๆฏๆ group by๏ผ่ฏทไฝฟ็จ็ธๅ
ณ็Transforamtionๆไฝ
if (Option(sqlObj.limit).isDefined)
_sql += s" LIMIT ${sqlObj.limit}"
logInfo(Utils.logWrapper(s"ๆง่กSql:${_sql}"))
val resultRDD = sqlContext.sql(_sql).map(a => a.toSeq.toArray)
if (Option(sqlObj.orderBy).isDefined) {
//order byๆไฝๅฎๆๅ่ฎพๅ้ป่ฎคๅผ200
logInfo(Utils.logWrapper("order by ๆไฝๅฎๆ,่ฎพๅ้ป่ฎคๅผ: SET spark.sql.shuffle.partitions=200"))
sqlContext.sql("SET spark.sql.shuffle.partitions=200")
}
resultRDD
}
def registerTempTableIfNotExist[A <: Product : TypeTag](rdd: RDD[A], schema: String, tempTableName: String) {
if (notExistTempTable(tempTableName)) {
import sqlContext.implicits._
rdd.toDF(schema.split(",") :_*).registerTempTable("full_fields_order")
logInfo(Utils.logWrapper(s"ๆณจๅไธดๆถ่กจ๏ผ$tempTableName"))
}
}
private def registerTempTableIfNotExist(inputFilePath: String, schema: String, tempTableName: String, separator: String) {
//ๅฆๆไธดๆถ่กจๆชๆณจๅ๏ผๅฐฑ่ฟ่กๆณจๅ
if (notExistTempTable(tempTableName)) {
val fieldsNum = schema.split(",").length
val tmpRDD = Contexts.sparkContext.textFile(inputFilePath)
.map(_.split(separator)).cache()
tmpRDD.map(t =>(t.length,1)).countByValue()
logInfo(Utils.logWrapper(s"schema็ๅญๆฎตๆฏ$fieldsNum,่งฃๆๅ็ๅญๆฎตไธชๆฐไธ่กๆฐ็ๅ
ณ็ณปๆฏ:${tmpRDD.map(_.length).countByValue()}"))
val rowRDD = tmpRDD.filter(x => x.length == fieldsNum)
.map(fields => for (field <- fields) yield field.trim)
.map(fields => Row(fields: _*))
sqlContext.createDataFrame(rowRDD, simpleSchema(schema)).registerTempTable(tempTableName)
logInfo(Utils.logWrapper(s"ๆณจๅไธดๆถ่กจ๏ผ$tempTableName"))
}
}
private def notExistTempTable(tempTableName: String): Boolean = {
!sqlContext.tableNames().contains(tempTableName)
}
private def simpleSchema(schemaStr: String): StructType = {
StructType(schemaStr.toLowerCase.split(",").map(fieldName => StructField(fieldName.trim, StringType, nullable = true)))
}
}
| zj-lingxin/spark_hbase_shu | src/main/scala/com/lucius/shu/dao/Dao.scala | Scala | mit | 3,659 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
import lexer.ScalaTokenTypes
import stubs.ScFunctionStub
import com.intellij.lang.ASTNode
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import com.intellij.psi.scope._
import types.{ScType, Unit}
import types.result.{TypingContext, Success, TypeResult}
import com.intellij.openapi.progress.ProgressManager
import api.base.types.ScTypeElement
import api.ScalaElementVisitor
import api.statements.params.ScParameter
import api.base.ScReferenceElement
import extensions._
import api.toplevel.templates.ScTemplateBody
import api.toplevel.typedef.{ScTypeDefinition, ScObject}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScFunctionDefinitionImpl extends ScFunctionImpl with ScFunctionDefinition {
def this(node: ASTNode) = {this (); setNode(node)}
def this(stub: ScFunctionStub) = {this (); setStub(stub); setNode(null)}
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
//process function's parameters for dependent method types, and process type parameters
if (!super[ScFunctionImpl].processDeclarations(processor, state, lastParent, place)) return false
//do not process parameters for default parameters, only for function body
//processing parameters for default parameters in ScParameters
val parameterIncludingSynthetic: Seq[ScParameter] = effectiveParameterClauses.flatMap(_.parameters)
if (getStub == null) {
body match {
case Some(x)
if lastParent != null &&
(!needCheckProcessingDeclarationsForBody ||
x.getStartOffsetInParent == lastParent.getStartOffsetInParent) =>
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
case _ =>
}
} else {
if (lastParent != null && lastParent.getContext != lastParent.getParent) {
for (p <- parameterIncludingSynthetic) {
ProgressManager.checkCanceled()
if (!processor.execute(p, state)) return false
}
}
}
true
}
protected def needCheckProcessingDeclarationsForBody = true
override def toString: String = "ScFunctionDefinition: " + name
def returnTypeInner: TypeResult[ScType] = returnTypeElement match {
case None if !hasAssign => Success(Unit, Some(this))
case None => body match {
case Some(b) => b.getType(TypingContext.empty)
case _ => Success(Unit, Some(this))
}
case Some(rte: ScTypeElement) => rte.getType(TypingContext.empty)
}
def body: Option[ScExpression] = {
val stub = getStub
if (stub != null) stub.asInstanceOf[ScFunctionStub].getBodyExpression else findChild(classOf[ScExpression])
}
override def hasAssign: Boolean = {
val stub = getStub
if (stub != null) stub.asInstanceOf[ScFunctionStub].hasAssign else assignment.isDefined
}
def assignment = Option(findChildByType(ScalaTokenTypes.tASSIGN))
def removeAssignment() {
body match {
case Some(block: ScBlockExpr) => // do nothing
case Some(exp: ScExpression) =>
val block = ScalaPsiElementFactory.createBlockFromExpr(exp, exp.getManager)
exp.replace(block)
case _ =>
}
assignment.foreach(_.delete())
}
override def getBody: FakePsiCodeBlock = body match {
case Some(b) => new FakePsiCodeBlock(b) // Needed so that LineBreakpoint.canAddLineBreakpoint allows line breakpoints on one-line method definitions
case None => null
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitFunctionDefinition(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitFunctionDefinition(this)
case _ => super.accept(visitor)
}
}
override def importantOrderFunction(): Boolean = {
hasModifierProperty("implicit") && !hasExplicitType
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/ScFunctionDefinitionImpl.scala | Scala | apache-2.0 | 4,259 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import com.twitter.util.NonFatal
import com.twitter.zipkin.Constants
import scala.collection.breakOut
/**
* A span represents one RPC request. A trace is made up of many spans.
*
* A span can contain multiple annotations, some are always included such as
* Client send -> Server received -> Server send -> Client receive.
*
* Some are created by users, describing application specific information,
* such as cache hits/misses.
*/
object Span {
// TODO(jeff): what?!
def apply(span: Span): Span = span
def apply(
_traceId: Long,
_name: String,
_id: Long,
_parentId: Option[Long],
_annotations: List[Annotation],
_binaryAnnotations: Seq[BinaryAnnotation],
_debug: Boolean = false
): Span = new Span {
def traceId = _traceId
def name = _name
def id = _id
def parentId = _parentId
def annotations = _annotations.sorted
def binaryAnnotations = _binaryAnnotations
def debug = _debug
}
def unapply(span: Span): Option[(Long, String, Long, Option[Long], List[Annotation], Seq[BinaryAnnotation], Boolean)] =
try {
Some(
span.traceId,
span.name,
span.id,
span.parentId,
span.annotations,
span.binaryAnnotations,
span.debug
)
} catch {
case NonFatal(_) => None
}
}
/**
* @param traceId random long that identifies the trace, will be set in all spans in this trace
* @param name name of span, can be rpc method name for example
* @param id random long that identifies this span
* @param parentId reference to the parent span in the trace tree
* @param annotations annotations, containing a timestamp and some value. both user generated and
* some fixed ones from the tracing framework. Sorted ascending by timestamp
* @param binaryAnnotations binary annotations, can contain more detailed information such as
* serialized objects. Sorted ascending by timestamp. Sorted ascending by timestamp
* @param debug if this is set we will make sure this span is stored, no matter what the samplers want
*/
trait Span extends Ordered[Span] { self =>
def traceId: Long
def name: String
def id: Long
def parentId: Option[Long]
def annotations: List[Annotation]
def binaryAnnotations: Seq[BinaryAnnotation]
def debug: Boolean
// TODO: cache first timestamp when this is a normal case class as opposed to a trait
override def compare(that: Span) =
java.lang.Long.compare(firstTimestamp.getOrElse(0L), that.firstTimestamp.getOrElse(0L))
def copy(
traceId: Long = self.traceId,
name: String = self.name,
id: Long = self.id,
parentId: Option[Long] = self.parentId,
annotations: List[Annotation] = self.annotations,
binaryAnnotations: Seq[BinaryAnnotation] = self.binaryAnnotations,
debug: Boolean = self.debug
): Span = Span(traceId, name, id, parentId, annotations, binaryAnnotations, debug)
private def tuple = (traceId, name, id, parentId, annotations, binaryAnnotations, debug)
override def equals(other: Any): Boolean = other match {
case o: Span => o.tuple == self.tuple
case _ => false
}
override def hashCode: Int = tuple.hashCode
override def toString: String = s"Span${tuple}"
def serviceNames: Set[String] =
annotations.flatMap(a => a.host.map(h => h.serviceName.toLowerCase)).toSet
/**
* Tries to extract the best possible service name
*/
def serviceName: Option[String] = {
if (annotations.isEmpty) None else {
serverSideAnnotations.flatMap(_.host).headOption.map(_.serviceName) orElse {
clientSideAnnotations.flatMap(_.host).headOption.map(_.serviceName)
}
}
}
/**
* Iterate through list of annotations and return the one with the given value.
*/
def getAnnotation(value: String): Option[Annotation] =
annotations.find(_.value == value)
/**
* Iterate through list of binaryAnnotations and return the one with the given key.
*/
def getBinaryAnnotation(key: String): Option[BinaryAnnotation] =
binaryAnnotations.find(_.key == key)
/**
* Take two spans with the same span id and merge all data into one of them.
*/
def mergeSpan(mergeFrom: Span): Span = {
if (id != mergeFrom.id) {
throw new IllegalArgumentException("Span ids must match")
}
// ruby tracing can give us an empty name in one part of the span
val selectedName = name match {
case "" => mergeFrom.name
case "Unknown" => mergeFrom.name
case _ => name
}
new Span {
def traceId = self.traceId
def name = selectedName
def id = self.id
def parentId = self.parentId
def annotations = (self.annotations ++ mergeFrom.annotations).sorted
def binaryAnnotations = self.binaryAnnotations ++ mergeFrom.binaryAnnotations
def debug = self.debug | mergeFrom.debug
}
}
/**
* Get the first annotation by timestamp.
*/
def firstAnnotation: Option[Annotation] = annotations.headOption
/**
* Get the last annotation by timestamp.
*/
def lastAnnotation: Option[Annotation] = annotations.lastOption
/**
* Endpoints involved in this span
*/
def endpoints: Set[Endpoint] =
annotations.flatMap(_.host).toSet
/**
* Endpoint that is likely the owner of this span
*/
def clientSideEndpoint: Option[Endpoint] =
clientSideAnnotations.map(_.host).flatten.headOption
/**
* Pick out the core client side annotations
*/
def clientSideAnnotations: Seq[Annotation] =
annotations.filter(a => Constants.CoreClient.contains(a.value))
/**
* Pick out the core server side annotations
*/
def serverSideAnnotations: Seq[Annotation] =
annotations.filter(a => Constants.CoreServer.contains(a.value))
/**
* Duration of this span. May be None if we cannot find any annotations.
*/
def duration: Option[Long] =
for (first <- firstAnnotation; last <- lastAnnotation)
yield last.timestamp - first.timestamp
/**
* @return true if Span contains at most one of each core annotation
* false otherwise
*/
def isValid: Boolean = {
Constants.CoreAnnotations.map { c =>
annotations.filter(_.value == c).length > 1
}.count(b => b) == 0
}
/**
* Get the annotations as a map with value to annotation bindings.
*/
def getAnnotationsAsMap(): Map[String, Annotation] =
annotations.map(a => a.value -> a)(breakOut)
def lastTimestamp: Option[Long] = lastAnnotation.map(_.timestamp)
def firstTimestamp: Option[Long] = firstAnnotation.map(_.timestamp)
}
| coursera/zipkin | zipkin-common/src/main/scala/com/twitter/zipkin/common/Span.scala | Scala | apache-2.0 | 7,175 |
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Brian Porter (poornerd at gmail dot com) - twitter: @poornerd
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.api.util.HTTPLayer
import com.mohiva.play.silhouette.impl.exceptions.ProfileRetrievalException
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.InstagramProvider._
import play.api.libs.json.JsValue
import scala.concurrent.Future
/**
* Base Instagram OAuth2 provider.
*
* @see http://instagram.com/developer/authentication/
* @see http://instagram.com/developer/endpoints/
*/
trait BaseInstagramProvider extends OAuth2Provider {
/**
* The content type to parse a profile from.
*/
override type Content = JsValue
/**
* The provider ID.
*/
override val id = ID
/**
* Defines the URLs that are needed to retrieve the profile data.
*/
override protected val urls = Map("api" -> settings.apiURL.getOrElse(API))
/**
* Builds the social profile.
*
* @param authInfo The auth info received from the provider.
* @return On success the build social profile, otherwise a failure.
*/
override protected def buildProfile(authInfo: OAuth2Info): Future[Profile] = {
httpLayer.url(urls("api").format(authInfo.accessToken)).get().flatMap { response =>
val json = response.json
(json \ "meta" \ "code").asOpt[Int] match {
case Some(code) if code != 200 =>
val errorType = (json \ "meta" \ "error_type").asOpt[String]
val errorMsg = (json \ "meta" \ "error_message").asOpt[String]
throw new ProfileRetrievalException(SpecifiedProfileError.format(id, code, errorType, errorMsg))
case _ => profileParser.parse(json, authInfo)
}
}
}
}
/**
* The profile parser for the common social profile.
*/
class InstagramProfileParser extends SocialProfileParser[JsValue, CommonSocialProfile, OAuth2Info] {
/**
* Parses the social profile.
*
* @param json The content returned from the provider.
* @param authInfo The auth info to query the provider again for additional data.
* @return The social profile from given result.
*/
override def parse(json: JsValue, authInfo: OAuth2Info) = Future.successful {
val data = json \ "data"
val userID = (data \ "id").as[String]
val fullName = (data \ "full_name").asOpt[String]
val avatarURL = (data \ "profile_picture").asOpt[String]
CommonSocialProfile(
loginInfo = LoginInfo(ID, userID),
fullName = fullName,
avatarURL = avatarURL)
}
}
/**
* The Instagram OAuth2 Provider.
*
* @param httpLayer The HTTP layer implementation.
* @param stateHandler The state provider implementation.
* @param settings The provider settings.
*/
class InstagramProvider(
protected val httpLayer: HTTPLayer,
protected val stateHandler: SocialStateHandler,
val settings: OAuth2Settings)
extends BaseInstagramProvider with CommonSocialProfileBuilder {
/**
* The type of this class.
*/
override type Self = InstagramProvider
/**
* The profile parser implementation.
*/
override val profileParser = new InstagramProfileParser
/**
* Gets a provider initialized with a new settings object.
*
* @param f A function which gets the settings passed and returns different settings.
* @return An instance of the provider initialized with new settings.
*/
override def withSettings(f: (Settings) => Settings) = new InstagramProvider(httpLayer, stateHandler, f(settings))
}
/**
* The companion object.
*/
object InstagramProvider {
/**
* The error messages.
*/
val SpecifiedProfileError = "[Silhouette][%s] Error retrieving profile information. Error code: %s, type: %s, message: %s"
/**
* The Instagram constants.
*/
val ID = "instagram"
val API = "https://api.instagram.com/v1/users/self?access_token=%s"
}
| mohiva/play-silhouette | silhouette/app/com/mohiva/play/silhouette/impl/providers/oauth2/InstagramProvider.scala | Scala | apache-2.0 | 4,757 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.fragments
import cogx.platform.types.{VirtualFieldRegister, FieldType}
import cogx.platform.types.ElementTypes.Uint8Pixel
/** An input field on a kernel. This is a "slot" that initially is unconnected
* to a fragment on its input.
*
* @param fieldType The type of the input field.
* @param index Index of the input field.
* @param inputRegisters Input virtual field registers for the kernel that owns this fragment.
* @param clType The type of value produced by this fragment.
*
* @author Greg Snider
*/
private[cogx]
class InputFieldFragment(val fieldType: FieldType, index: Int,
inputRegisters: Array[VirtualFieldRegister],
clType: CLType)
extends FieldFragment(clType)
{
var constant = false
/** An input field fragment may be bound to a driving input that effectively
* short-circuits the input.
*/
private var drivingInput: Fragment = null
/** Name for the field. There's some subtlety here: during the merging
* process, inputs can accumulate a chain of old InputFieldFragments.
* If this InputFieldFragment is driven by such a chain of InputFieldFragments,
* all but the last are effectively ignored when calculating name(). If the
* last InputFieldFragment has a null input, then the _in_field_ name form is
* reported, else if the last is driven by a CodeFragment, then the
* CodeFragment's name is reported.
*/
def name = {
if (drivingInput == null) {
"_in_field_" + index
} else
drivingInput.name
}
/** Generate a string to read the "current" element of an input field. */
override def read(addressing: AddressingMode) =
if (drivingInput == null) {
super.read(addressing)
} else {
drivingInput.read(addressing)
}
/** Generate a string to read element (row, column) of an input field. */
override def readNonlocal(addressing: AddressingMode): String = {
if (drivingInput == null)
super.readNonlocal(addressing)
else {
drivingInput match {
case in: FieldFragment =>
in.readNonlocal(addressing)
case x =>
throw new RuntimeException("Indexed read on non-field fragment: " +
x.getClass)
}
}
}
/** Generates an OpenCL input parameter declaration for this field. */
def inDeclaration = {
require(drivingInput == null)
if (fieldType.elementType == Uint8Pixel) {
fieldType.dimensions match {
case 2 => " read_only image2d_t " + name
case 3 => " read_only image3d_t " + name
case x => throw new RuntimeException("Unsupported ColorField field dimension: " + x)
}
} else {
val constType = if (constant) " __constant " else " __global const "
constType + "float *" + name
}
}
/** Generates a string of #define statements describing field geometry. */
def inParameters: String = {
require(drivingInput == null)
FieldDefines(name, fieldType)
}
/** Generate #undef statements to clean up #defines. */
def cleanup: String = FieldDefines.cleanup(name, fieldType)
/** Bind "input" fragment as driving this. */
def bindDrivingInput(input: Fragment) {
require(input != null)
require(drivingInput == null)
drivingInput = input
// Copy 'constant' tag on this input to any InputFieldFragment that wraps it
drivingInput match {
case in: InputFieldFragment => in.constant = constant
case x =>
}
}
/** Inputs to this fragment, none if no driving input. */
def children =
if (drivingInput == null) Array[Fragment]()
else Array(drivingInput)
/** Code for this fragment. */
val code = ""
/** ID for this fragment. */
def id = inputRegisters(index).source.id
def registerDriver = inputRegisters(index)
override def toString = "InputFieldFragment(" + id + ")"
/** Generate a string to read a tensor of an input field. The driving input will be
* non-null for merged kernels. The chaining call may stop with a CodeFragment if the input
* is driven by the output of an embedded merged kernel, or it may terminate with a null
* if the input is a primary field input of the merged kernel.
*
* @param isLocal Does the read use the default row, column, layer values for the thread?
*/
def _readTensor(isLocal: Boolean) =
if (drivingInput == null) {
super.readTensor(isLocal)
} else {
drivingInput._readTensor(isLocal)
}
/** Generate a string to read a tensor element of an input field. The driving input will be
* non-null for merged kernels. The chaining call may stop with a CodeFragment if the input
* is driven by the output of an embedded merged kernel, or it may terminate with a null
* if the input is a primary field input of the merged kernel.
*
* @param isLocal Does the read use the default row, column, layer values for the thread?
*/
def _readTensorElement(isLocal: Boolean) =
if (drivingInput == null) {
super.readTensorElement(isLocal)
} else {
drivingInput._readTensorElement(isLocal)
}
} | hpe-cct/cct-core | src/main/scala/cogx/compiler/codegenerator/opencl/fragments/InputFieldFragment.scala | Scala | apache-2.0 | 5,802 |
package riftwarp.std
import scala.reflect.ClassTag
import scalaz._, Scalaz._
import scalaz.Validation.FlatMap._
import almhirt.common._
import almhirt.almvalidation.kit._
import riftwarp._
trait PackageBuilderFuns {
def toWarpPrimitive[A](what: A)(implicit conv: WarpPrimitiveConverter[A]): WarpPrimitive = conv.convertBack(what)
def toWarpPrimitivesCollection[A](what: Traversable[A])(implicit conv: WarpPrimitiveConverter[A]): WarpCollection =
WarpCollection(what.map(conv.convertBack(_)).toVector)
def toWarpCollectionWith[A](what: Traversable[A], packer: WarpPacker[A])(implicit packers: WarpPackers): AlmValidation[WarpCollection] =
what.map(item โ packer(item).toAgg).toVector.sequence.map(WarpCollection.apply)
def toWarpCollectionLookUp(what: Traversable[Any])(implicit packers: WarpPackers): AlmValidation[WarpCollection] =
what.map(item โ
packers(WarpDescriptor(item.getClass)).flatMap(_.packBlind(item)).toAgg)
.toVector
.sequence
.map(WarpCollection.apply)
def E(label: String, what: WarpPackage): AlmValidation[WarpElement] =
WarpElement(label, Some(what)).success
def TypeClass[A](label: String, what: A)(implicit packer: WarpPacker[A], packers: WarpPackers): AlmValidation[WarpElement] =
With[A](label, what, packer)
def TypeClassOpt[A](label: String, what: Option[A])(implicit packer: WarpPacker[A], packers: WarpPackers): AlmValidation[WarpElement] =
WithOpt[A](label, what, packer)
def TypeClassFlatOpt[A](label: String, what: Option[A])(implicit packer: WarpPacker[A]): AlmValidation[WarpElement] =
WithOpt[A](label, what, packer)(WarpPackers.NoWarpPackers)
def P[A: WarpPrimitiveConverter](label: String, what: A): AlmValidation[WarpElement] =
WarpElement(label, Some(toWarpPrimitive(what))).success
def POpt[A: WarpPrimitiveConverter](label: String, what: Option[A]): AlmValidation[WarpElement] =
what match {
case Some(v) โ P(label, v)
case None โ WarpElement(label).success
}
def With[T](label: String, what: T, packer: WarpPacker[T])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
packer(what).map(x โ WarpElement(label, Some(x)))
def WithOpt[T](label: String, what: Option[T], packer: WarpPacker[T])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ packer(v).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def With2[T](label: String, what: T)(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpElement] =
packer(what).map(x โ WarpElement(label, Some(x)))
def WithOpt2[T](label: String, what: Option[T])(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ packer(v).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def LookUp(label: String, what: Any)(implicit packers: WarpPackers): AlmValidation[WarpElement] =
funs.pack(what).map(x โ WarpElement(label, Some(x)))
def LookUpOpt[T](label: String, what: Option[Any])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ funs.pack(v).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def PTup2[A: WarpPrimitiveConverter, B: WarpPrimitiveConverter](label: String, what: (A, B)): AlmValidation[WarpElement] =
WarpElement(label, Some(WarpTuple2(toWarpPrimitive(what._1), toWarpPrimitive(what._2)))).success
def PTup2Opt[A: WarpPrimitiveConverter, B: WarpPrimitiveConverter](label: String, what: Option[(A, B)]): AlmValidation[WarpElement] =
what match {
case Some(v) โ PTup2(label, v)
case None โ WarpElement(label).success
}
def PTup3[A: WarpPrimitiveConverter, B: WarpPrimitiveConverter, C: WarpPrimitiveConverter](label: String, what: (A, B, C)): AlmValidation[WarpElement] =
WarpElement(label, Some(WarpTuple3(toWarpPrimitive(what._1), toWarpPrimitive(what._2), toWarpPrimitive(what._3)))).success
def PTup3Opt[A: WarpPrimitiveConverter, B: WarpPrimitiveConverter, C: WarpPrimitiveConverter](label: String, what: Option[(A, B, C)]): AlmValidation[WarpElement] =
what match {
case Some(v) โ PTup3(label, v)
case None โ WarpElement(label).success
}
def CP[A: WarpPrimitiveConverter](label: String, what: Traversable[A]): AlmValidation[WarpElement] =
WarpElement(label, Some(toWarpPrimitivesCollection(what))).success
def CPOpt[A: WarpPrimitiveConverter](label: String, what: Option[Traversable[A]]): AlmValidation[WarpElement] =
what match {
case Some(v) โ WarpElement(label, Some(toWarpPrimitivesCollection(v))).success
case None โ WarpElement(label).success
}
def CWith[A](label: String, what: Traversable[A], packer: WarpPacker[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
toWarpCollectionWith(what, packer).map(x โ WarpElement(label, Some(x)))
def CWithOpt[A](label: String, what: Option[Traversable[A]], packer: WarpPacker[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ toWarpCollectionWith(v, packer).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def CWith2[A](label: String, what: Traversable[A])(implicit packer: WarpPacker[A], packers: WarpPackers): AlmValidation[WarpElement] =
toWarpCollectionWith(what, packer).map(x โ WarpElement(label, Some(x)))
def CWithOpt2[A](label: String, what: Option[Traversable[A]])(implicit packer: WarpPacker[A], packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ toWarpCollectionWith(v, packer).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def CLookUp[A](label: String, what: Traversable[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
toWarpCollectionLookUp(what).map(x โ WarpElement(label, Some(x)))
def CLookUpOpt[A](label: String, what: Option[Traversable[A]])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ toWarpCollectionLookUp(v).map(x โ WarpElement(label, Some(x)))
case None โ WarpElement(label).success
}
def MP[A, B](label: String, what: Map[A, B])(implicit convA: WarpPrimitiveConverter[A], convB: WarpPrimitiveConverter[B]): AlmValidation[WarpElement] =
WarpElement(label, Some(WarpAssociativeCollection(what.map { case (a, b) โ (convA.convertBack(a), convB.convertBack(b)) }.toVector))).success
def MPOpt[A: WarpPrimitiveConverter, B: WarpPrimitiveConverter](label: String, what: Option[Map[A, B]]): AlmValidation[WarpElement] =
what match {
case Some(v) โ MP[A, B](label, v)
case None โ WarpElement(label).success
}
def MWith[A, B](label: String, what: Map[A, B], packerB: WarpPacker[B])(implicit convA: WarpPrimitiveConverter[A], packers: WarpPackers): AlmValidation[WarpElement] =
what.map { case (a, b) โ packerB(b).map(wb โ (convA.convertBack(a), wb)).toAgg }.toVector.sequence.map(items โ
WarpElement(label, Some(WarpAssociativeCollection(items))))
def MWithOpt[A: WarpPrimitiveConverter, B](label: String, what: Option[Map[A, B]], packerB: WarpPacker[B])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ MWith[A, B](label, v, packerB)
case None โ WarpElement(label).success
}
def MWith2[A, B](label: String, what: Map[A, B])(implicit convA: WarpPrimitiveConverter[A], packerB: WarpPacker[B], packers: WarpPackers): AlmValidation[WarpElement] =
what.map { case (a, b) โ packerB(b).map(wb โ (convA.convertBack(a), wb)).toAgg }.toVector.sequence.map(items โ
WarpElement(label, Some(WarpAssociativeCollection(items))))
def MWithOpt2[A: WarpPrimitiveConverter, B](label: String, what: Option[Map[A, B]])(implicit packerB: WarpPacker[B], packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ MWith[A, B](label, v, packerB)
case None โ WarpElement(label).success
}
def MLookUp[A, B](label: String, what: Map[A, B])(implicit convA: WarpPrimitiveConverter[A], packers: WarpPackers): AlmValidation[WarpElement] =
what.map {
case (a, b) โ
packers(WarpDescriptor(b.getClass)).flatMap(packer โ
packer.packBlind(b).map(wb โ
(convA.convertBack(a), wb))).toAgg
}.toVector.sequence.map(items โ
WarpElement(label, Some(WarpAssociativeCollection(items))))
def MLookUpOpt[A: WarpPrimitiveConverter, B](label: String, what: Option[Map[A, B]])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ MLookUp(label, v)
case None โ WarpElement(label).success
}
def MLookUpForgiving[A, B](label: String, what: Map[A, B])(implicit convA: WarpPrimitiveConverter[A], packers: WarpPackers): AlmValidation[WarpElement] =
what.map {
case (a, b) โ
packers(WarpDescriptor(b.getClass)).fold(
fail โ
None,
packer โ
packer.packBlind(b).map(wb โ
(convA.convertBack(a), wb)).toAgg.some)
}.flatten.toVector.sequence.map(items โ
WarpElement(label, Some(WarpAssociativeCollection(items))))
def MLookUpForgivingOpt[A: WarpPrimitiveConverter, B](label: String, what: Option[Map[A, B]])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ MLookUpForgiving(label, v)
case None โ WarpElement(label).success
}
def TP[A](label: String, what: Tree[A])(implicit conv: WarpPrimitiveConverter[A]): AlmValidation[WarpElement] =
WarpElement(label, Some(WarpTree(what.map(conv.convertBack(_))))).success
def TPOpt[A: WarpPrimitiveConverter](label: String, what: Option[Tree[A]]): AlmValidation[WarpElement] =
what match {
case Some(v) โ TP(label, v)
case None โ WarpElement(label).success
}
def TWith[A](label: String, what: Tree[A], packer: WarpPacker[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what.map(x โ packer(x).toAgg).sequence.map(x โ WarpElement(label, Some(WarpTree(x))))
def TWithOpt[A](label: String, what: Option[Tree[A]], packer: WarpPacker[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ TWith(label, v, packer)
case None โ WarpElement(label).success
}
def TLookUp[A](label: String, what: Tree[A])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what.map(x โ
packers(WarpDescriptor(x.getClass)).flatMap(packer โ
packer.packBlind(x)).toAgg).sequence.map(x โ
WarpElement(label, Some(WarpTree(x))))
def TLookUpOpt[A](label: String, what: Option[Tree[A]])(implicit packers: WarpPackers): AlmValidation[WarpElement] =
what match {
case Some(v) โ TLookUp(label, v)
case None โ WarpElement(label).success
}
def VWith[T](label: String, what: AlmValidation[T], packer: WarpPacker[T])(implicit packers: WarpPackers): AlmValidation[WarpElement] = {
what match {
case scalaz.Success(v) =>
packer.pack(v).map(inner => WarpElement(label,
Some(WarpObject(
None,
Vector(WarpElement("success", Some(inner)))))))
case scalaz.Failure(p) =>
riftwarp.serialization.common.ProblemPackaging.pack(p).map(inner => WarpElement(label,
Some(WarpObject(
None,
Vector(WarpElement("problem", Some(inner)))))))
}
}
def VWith2[T](label: String, what: AlmValidation[T])(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpElement] =
VWith(label, what, packer)
def Bytes(label: String, bytes: IndexedSeq[Byte]): AlmValidation[WarpElement] = BytesOpt(label, Some(bytes))
def BytesOpt(label: String, bytes: Option[IndexedSeq[Byte]]): AlmValidation[WarpElement] = WarpElement(label, bytes.map(WarpBytes(_))).success
def Blob(label: String, bytes: IndexedSeq[Byte]): AlmValidation[WarpElement] = BlobOpt(label, Some(bytes))
def BlobOpt(label: String, bytes: Option[IndexedSeq[Byte]]): AlmValidation[WarpElement] = WarpElement(label, bytes.map(WarpBlob(_))).success
}
trait PackageBuilderOps {
import language.implicitConversions
implicit def tuple2WarpElement[T](tuple: (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpElement] =
tuple._2 match {
case None โ WarpElement(tuple._1, None).success
case Some(x) โ packer(x).map(x โ WarpElement(tuple._1, Some(x)))
}
implicit class Tuple2Ops[T](self: Tuple2[String, T]) {
def ~>(next: โ AlmValidation[WarpElement])(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ packer(self._2).map(x โ WarpObject(None, Vector(WarpElement(self._1, Some(x)), succ))))
def โฟ(next: โ AlmValidation[WarpElement])(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~>(next)
}
implicit class WarpElementOps(self: WarpElement) {
def ~>(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(None, Vector(self, succ)).success)
def ~+>(next: โ WarpElement): AlmValidation[WarpObject] =
WarpObject(None, Vector(self, next)).success
def ~~>(next: โ AlmValidation[Seq[WarpElement]]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(None, (self +: succ.toVector)).success)
def ~~+>(next: โ Seq[WarpElement]): AlmValidation[WarpObject] =
WarpObject(None, (self +: next.toVector)).success
def ~?>[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~>(tuple2WarpElement(next))
def ~>[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~?>((next._1, Some(next._2)))
def โฟ(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] = ~>(next)
def โฟ?[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~?>(next)
def โฟ[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~>[T](next)
}
implicit class WarpObjectOps(self: WarpObject) {
def ~>(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(self.warpDescriptor, self.elements :+ succ).success)
def ~+>(next: โ WarpElement): AlmValidation[WarpObject] =
WarpObject(self.warpDescriptor, self.elements :+ next).success
def ~~>(next: โ AlmValidation[Seq[WarpElement]]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(self.warpDescriptor, self.elements ++ succ).success)
def ~~+>(next: โ Seq[WarpElement]): AlmValidation[WarpObject] =
WarpObject(self.warpDescriptor, self.elements ++ next).success
def ~?>[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~>(tuple2WarpElement(next))
def ~>[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~?>((next._1, Some(next._2)))
def โฟ(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] = ~>(next)
def โฟ?[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~?>[T](next)
def โฟ[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~>[T](next)
}
implicit class WarpDescriptorOps(self: WarpDescriptor) {
def ~>(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(Some(self), Vector(succ)).success)
def ~~>(next: โ AlmValidation[Seq[WarpElement]]): AlmValidation[WarpObject] =
next.fold(
fail โ fail.failure,
succ โ WarpObject(Some(self), succ.toVector).success)
def ~?>[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~>(tuple2WarpElement(next))
def ~>[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~?>((next._1, Some(next._2)))
def โฟ(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] = ~>(next)
def โฟ?[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~?>[T](next)
def โฟ[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~>[T](next)
}
implicit class WarpObjectVOps(self: AlmValidation[WarpObject]) {
def ~>(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] =
self.fold(
fail โ fail.failure,
succObj โ
next.fold(
fail โ fail.failure,
succ โ WarpObject(succObj.warpDescriptor, succObj.elements :+ succ).success))
def ~+>(next: โ WarpElement): AlmValidation[WarpObject] =
self.fold(
fail โ fail.failure,
succObj โ WarpObject(succObj.warpDescriptor, succObj.elements :+ next).success)
def ~~>(next: โ AlmValidation[Seq[WarpElement]]): AlmValidation[WarpObject] =
self.fold(
fail โ fail.failure,
succObj โ
next.fold(
fail โ fail.failure,
succ โ WarpObject(succObj.warpDescriptor, succObj.elements ++ succ).success))
def ~~+>(next: โ Seq[WarpElement]): AlmValidation[WarpObject] =
self.fold(
fail โ fail.failure,
succObj โ WarpObject(succObj.warpDescriptor, succObj.elements ++ next).success)
def ~?>[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~>(tuple2WarpElement(next))
def ~>[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] =
~?>((next._1, Some(next._2)))
def โฟ(next: โ AlmValidation[WarpElement]): AlmValidation[WarpObject] = ~>(next)
def โฟ?[T](next: โ (String, Option[T]))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~?>[T](next)
def โฟ[T](next: โ (String, T))(implicit packer: WarpPacker[T], packers: WarpPackers): AlmValidation[WarpObject] = ~>[T](next)
}
} | chridou/almhirt | riftwarp/src/main/scala/riftwarp/std/PackageBuilder.scala | Scala | apache-2.0 | 19,402 |
package sangria.util
import org.scalatest.Matchers
import sangria.parser.QueryParser
import sangria.schema._
import sangria.validation.{AstNodeViolation, RuleBasedQueryValidator, ValidationRule}
import scala.util.Success
trait ValidationSupport extends Matchers {
type TestField = Field[Unit, Unit]
val Being = InterfaceType("Being", List[TestField](
Field("name", OptionType(StringType), resolve = _ => None)
))
val Pet = InterfaceType("Pet", List[TestField](
Field("name", OptionType(StringType), resolve = _ => None)
))
val DogCommand = EnumType("DogCommand", values = List(
EnumValue("SIT", value = 0),
EnumValue("HEEL", value = 1),
EnumValue("DOWN", value = 2)
))
val FurColor = EnumType("FurColor", values = List(
EnumValue("BROWN", value = 0),
EnumValue("BLACK", value = 1),
EnumValue("TAN", value = 2),
EnumValue("SPOTTED", value = 3)
))
val Dog = ObjectType("Dog", List[TestField](
Field("name", OptionType(StringType), resolve = _ => None),
Field("nickname", OptionType(StringType), resolve = _ => None),
Field("barks", OptionType(BooleanType), resolve = _ => None),
Field("barkVolume", OptionType(IntType), resolve = _ => None),
Field("doesKnowCommand", OptionType(BooleanType),
arguments = Argument("dogCommand", OptionInputType(DogCommand)) :: Nil,
resolve = _ => None),
Field("isHousetrained", OptionType(BooleanType),
arguments = Argument("atOtherHomes", OptionInputType(BooleanType), true) :: Nil,
resolve = _ => None),
Field("isAtLocation", OptionType(BooleanType),
arguments = Argument("x", OptionInputType(IntType)) :: Argument("y", OptionInputType(IntType)) :: Nil,
resolve = _ => None)),
interfaces[Unit, Unit](Being, Pet))
val Cat = ObjectType("Cat", List[TestField](
Field("name", OptionType(StringType), resolve = _ => None),
Field("nickname", OptionType(StringType), resolve = _ => None),
Field("meows", OptionType(BooleanType), resolve = _ => None),
Field("meowVolume", OptionType(IntType), resolve = _ => None),
Field("furColor", OptionType(FurColor), resolve = _ => None)),
interfaces[Unit, Unit](Being, Pet))
val CatOrDog = UnionType("CatOrDog", types = Dog :: Cat :: Nil)
val Intelligent = InterfaceType("Intelligent", List[TestField](
Field("iq", OptionType(IntType), resolve = _ => None)
))
val Human: ObjectType[Unit, Unit] = ObjectType("Human", () => List[TestField](
Field("name", OptionType(StringType),
arguments = Argument("surname", OptionInputType(BooleanType)) :: Nil,
resolve = _ => None),
Field("pets", OptionType(ListType(OptionType(Pet))), resolve = _ => None),
Field("relatives", OptionType(ListType(OptionType(Human))), resolve = _ => None)),
interfaces[Unit, Unit](Being, Intelligent))
val Alien = ObjectType("Alien", List[TestField](
Field("numEyes", OptionType(IntType), resolve = _ => None)),
interfaces[Unit, Unit](Being, Intelligent))
val DogOrHuman = UnionType("DogOrHuman", types = Dog :: Human :: Nil)
val HumanOrAlien = UnionType("HumanOrAlien", types = Human :: Alien :: Nil)
val ComplexInput = InputObjectType("ComplexInput", List(
InputField("requiredField", BooleanType),
InputField("intField", OptionInputType(IntType)),
InputField("stringField", OptionInputType(StringType)),
InputField("booleanField", OptionInputType(BooleanType)),
InputField("stringListField", OptionInputType(ListInputType(OptionInputType(StringType))))
))
val ComplicatedArgs = ObjectType("ComplicatedArgs", List[TestField](
Field("intArgField", OptionType(StringType),
arguments = Argument("intArg", OptionInputType(IntType)) :: Nil,
resolve = _ => None),
Field("bigIntArgField", OptionType(StringType),
arguments = Argument("bigIntArg", OptionInputType(BigIntType)) :: Nil,
resolve = _ => None),
Field("nonNullIntArgField", OptionType(StringType),
arguments = Argument("nonNullIntArg", IntType) :: Nil,
resolve = _ => None),
Field("stringArgField", OptionType(StringType),
arguments = Argument("stringArg", OptionInputType(StringType)) :: Nil,
resolve = _ => None),
Field("booleanArgField", OptionType(StringType),
arguments = Argument("booleanArg", OptionInputType(BooleanType)) :: Nil,
resolve = _ => None),
Field("enumArgField", OptionType(StringType),
arguments = Argument("enumArg", OptionInputType(FurColor)) :: Nil,
resolve = _ => None),
Field("floatArgField", OptionType(StringType),
arguments = Argument("floatArg", OptionInputType(FloatType)) :: Nil,
resolve = _ => None),
Field("bigDecimalArgField", OptionType(StringType),
arguments = Argument("bigDecimalArg", OptionInputType(BigDecimalType)) :: Nil,
resolve = _ => None),
Field("idArgField", OptionType(StringType),
arguments = Argument("idArg", OptionInputType(IDType)) :: Nil,
resolve = _ => None),
Field("stringListArgField", OptionType(StringType),
arguments = Argument("stringListArg", OptionInputType(ListInputType(OptionInputType(StringType)))) :: Nil,
resolve = _ => None),
Field("complexArgField", OptionType(StringType),
arguments = Argument("complexArg", OptionInputType(ComplexInput)) :: Nil,
resolve = _ => None),
Field("multipleReqs", OptionType(StringType),
arguments = Argument("req1", IntType) :: Argument("req2", IntType) :: Nil,
resolve = _ => None),
Field("multipleOpts", OptionType(StringType),
arguments = Argument("opt1", OptionInputType(IntType), 0) :: Argument("opt2", OptionInputType(IntType), 0) :: Nil,
resolve = _ => None),
Field("multipleOptAndReq", OptionType(StringType),
arguments =
Argument("req1", IntType) ::
Argument("req2", IntType) ::
Argument("opt1", OptionInputType(IntType), 0) ::
Argument("opt2", OptionInputType(IntType), 0) ::
Nil,
resolve = _ => None)
))
val QueryRoot = ObjectType("QueryRoot", List[TestField](
Field("human", OptionType(Human),
arguments = Argument("id", OptionInputType(IDType)) :: Nil,
resolve = _ => None),
Field("alien", OptionType(Alien), resolve = _ => None),
Field("dog", OptionType(Dog), resolve = _ => None),
Field("cat", OptionType(Cat), resolve = _ => None),
Field("pet", OptionType(Pet), resolve = _ => None),
Field("catOrDog", OptionType(CatOrDog), resolve = _ => None),
Field("dogOrHuman", OptionType(DogOrHuman), resolve = _ => None),
Field("humanOrAlien", OptionType(HumanOrAlien), resolve = _ => None),
Field("complicatedArgs", OptionType(ComplicatedArgs), resolve = _ => None)
))
val schema = Schema(QueryRoot)
def defaultRule: Option[ValidationRule] = None
def expectValid(s: Schema[_, _], rules: List[ValidationRule], query: String) = {
val Success(doc) = QueryParser.parse(query)
withClue("Should validate") {
validator(rules).validateQuery(s, doc) should have size 0
}
}
def expectInvalid(s: Schema[_, _], rules: List[ValidationRule], query: String, expectedErrors: List[(String, List[Pos])]) = {
val Success(doc) = QueryParser.parse(query)
withClue("Should not validate") {
val errors = validator(rules).validateQuery(s, doc)
errors should have size expectedErrors.size
expectedErrors foreach { case(expected, pos) =>
withClue(s"Expected error not found: $expected${pos map (p => s" (line ${p.line}, column ${p.col})") mkString "; "}. Actual:\n${errors map (_.errorMessage) mkString "\n"}") {
errors exists { error =>
error.errorMessage.contains(expected) && {
val errorPositions = error.asInstanceOf[AstNodeViolation].positions
errorPositions should have size pos.size
errorPositions zip pos forall { case (actualPos, expectedPos) =>
expectedPos.line == actualPos.line && expectedPos.col == actualPos.column
}
}
} should be (true)
}
}
}
}
def expectPassesRule(rule: ValidationRule, query: String) =
expectValid(schema, rule :: Nil, query)
def expectPasses(query: String) =
expectValid(schema, defaultRule.get :: Nil, query)
def expectFailsRule(rule: ValidationRule, query: String, expectedErrors: List[(String, Option[Pos])]) =
expectInvalid(schema, rule :: Nil, query, expectedErrors.map{case (msg, pos) => msg -> pos.toList})
def expectFails(query: String, expectedErrors: List[(String, Option[Pos])]) =
expectInvalid(schema, defaultRule.get :: Nil, query, expectedErrors.map{case (msg, pos) => msg -> pos.toList})
def expectFailsPosList(query: String, expectedErrors: List[(String, List[Pos])]) =
expectInvalid(schema, defaultRule.get :: Nil, query, expectedErrors)
def validator(rules: List[ValidationRule]) = new RuleBasedQueryValidator(rules)
}
| narahari92/sangria | src/test/scala/sangria/util/ValidationSupport.scala | Scala | apache-2.0 | 8,969 |
package quizleague.web.site.season
import rxscalajs._
import rxscalajs.subjects._
import quizleague.web.site.ApplicationContextService
import quizleague.web.model.Season
trait SeasonWatchService {
private val seasonSubj:Subject[Season] = ReplaySubject()
ApplicationContextService.get().subscribe(_.currentSeason.subscribe(s => seasonSubj.next(s)))
def season = seasonSubj
} | gumdrop/quizleague-maintain | js/src/main/scala/quizleague/web/site/season/SeasonWatchService.scala | Scala | mit | 390 |
inline def scaffolding(inline op: Unit): Unit =
val _ = op
def test = scaffolding { println("foo") }
| dotty-staging/dotty | tests/pos/i9626.scala | Scala | apache-2.0 | 104 |
/*
* LambdaExpressionLatexExporter.scala
*
*/
package at.logic.gapt.formats.latex
import at.logic.gapt.formats.{ HOLTermExporter, OutputExporter }
import at.logic.gapt.expr._
import at.logic.gapt.expr._
import at.logic.gapt.language.schema.indexedOmegaVar
trait HOLTermLatexExporter extends OutputExporter with HOLTermExporter {
// it is LambdaExpression and require because of the stupid design chose not to have a common element for HOL
def exportTerm( t: LambdaExpression ): Unit = {
require( t.isInstanceOf[LambdaExpression] );
t match {
case indexedOmegaVar( name, index ) => getOutput.write( name + """_{""" + index + """}""" )
case Var( name, _ ) => getOutput.write( name.toString )
case Const( name, _ ) => getOutput.write( name.toString )
case Neg( f ) => { getOutput.write( """\\neg """ ); exportTerm_( f ); }
case And( f1, f2 ) => { exportTerm_( f1 ); getOutput.write( """ \\wedge """ ); exportTerm_( f2 ); }
case Or( f1, f2 ) => { exportTerm_( f1 ); getOutput.write( """ \\vee """ ); exportTerm_( f2 ); }
case Imp( f1, f2 ) => { exportTerm_( f1 ); getOutput.write( """ \\rightarrow """ ); exportTerm_( f2 ); }
case Ex( v, f ) => { getOutput.write( """\\exists """ ); exportTerm_( v.asInstanceOf[Var] ); getOutput.write( """.""" ); exportTerm_( f ); }
case All( v, f ) => { getOutput.write( """\\forall """ ); exportTerm_( v.asInstanceOf[Var] ); getOutput.write( """.""" ); exportTerm_( f ); }
case Abs( v, t ) => { getOutput.write( """\\lambda """ ); exportTerm_( v ); getOutput.write( """.""" ); exportTerm_( t ); }
case HOLAtom( name, args ) => exportFunction( t )
case HOLFunction( name, args ) => exportFunction( t )
}
}
private def exportTerm_( t: LambdaExpression ): Unit = {
require( t.isInstanceOf[LambdaExpression] ); t match {
case indexedOmegaVar( name, index ) => getOutput.write( name + """_{""" + index + """}""" )
case Var( name, _ ) => getOutput.write( name.toString )
case Const( name, _ ) => getOutput.write( name.toString )
case Neg( f ) => { getOutput.write( "(" ); getOutput.write( """\\neg """ ); exportTerm_( f ); getOutput.write( ")" ) }
case And( f1, f2 ) => { getOutput.write( "(" ); exportTerm_( f1 ); getOutput.write( """ \\wedge """ ); exportTerm_( f2 ); getOutput.write( ")" ) }
case Or( f1, f2 ) => { getOutput.write( "(" ); exportTerm_( f1 ); getOutput.write( """ \\vee """ ); exportTerm_( f2 ); getOutput.write( ")" ) }
case Imp( f1, f2 ) => { getOutput.write( "(" ); exportTerm_( f1 ); getOutput.write( """ \\rightarrow """ ); exportTerm_( f2 ); getOutput.write( ")" ) }
case Ex( v, f ) => { getOutput.write( "(" ); getOutput.write( """\\exists """ ); exportTerm_( v.asInstanceOf[Var] ); getOutput.write( """.""" ); exportTerm_( f ); getOutput.write( ")" ) }
case All( v, f ) => { getOutput.write( "(" ); getOutput.write( """\\forall """ ); exportTerm_( v.asInstanceOf[Var] ); getOutput.write( """.""" ); exportTerm_( f ); getOutput.write( ")" ) }
case Abs( v, t ) => { getOutput.write( "(" ); getOutput.write( """\\lambda """ ); exportTerm_( v ); getOutput.write( """.""" ); exportTerm_( t ); getOutput.write( ")" ) }
case HOLAtom( name, args ) => exportFunction( t )
case HOLFunction( name, args ) => exportFunction( t )
}
}
protected def latexType( ta: TA ): String = ta match {
case Ti => "i"
case To => "o"
case ->( a, b ) => "(" + latexType( a ) + """ \\rightarrow """ + latexType( b ) + ")"
}
}
| gisellemnr/gapt | src/main/scala/at/logic/gapt/formats/latex/HOLTermLatexExporter.scala | Scala | gpl-3.0 | 3,900 |
package crochet
import scala.collection.mutable.{Map=>MMap}
import javax.servlet.http.{HttpServletResponse, HttpServletRequest, HttpServlet}
import util.matching.Regex
/**
* The basic dispatcher for Chrochet servlet requests
*
* @author Xavier Llora
* @date Jan 13, 2010 at 3:15:21 PM
*
*/
protected trait CrochetDispatcher extends HttpServlet with CrochetDynamicEnvironment with CrochetResponseCodes {
//
// The main structures used as dispatchers
//
protected var dispatcherMap = MMap[String,MMap[String,(()=>String,()=>Boolean,(String,Option[String])=>Boolean,()=>Any)]]()
dispatcherMap ++= List(
"GET" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"POST" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"PUT" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"DELETE" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"HEAD" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"OPTIONS" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"TRACE" -> MMap[String, (() => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)]()
)
protected var dispatcherRegexMap = MMap[String,List[(Regex,()=>String,()=>Boolean,(String,Option[String])=>Boolean,()=>Any)]]()
dispatcherRegexMap ++= List(
"GET" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"POST" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"PUT" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"DELETE" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"HEAD" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"OPTIONS" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)](),
"TRACE" -> List[(Regex, () => String, () => Boolean, (String,Option[String]) => Boolean, () => Any)]()
)
//
// Dispatch a request to this Crochet servlet
//
override def service(request: HttpServletRequest, response: HttpServletResponse) = {
val method = request.getMethod
val pathURI = request.getRequestURI
val headerMap = extractHeaderMap(request)
val (ms,ma) = extractParameters(request)
val cSession = extractSession(request)
var user = request.getRemoteUser match {
case null => None
case s => Some(s)
}
try {
pathVal.withValue(pathURI) {
requestVal.withValue(request) {
responseVal.withValue(response) {
sessionVal.withValue(cSession) {
headerVal.withValue(headerMap) {
paramVal.withValue(ms) {
paramMapVal.withValue(ma) {
//
// Check if it a regular path.
//
if (dispatcherMap(method) contains pathURI) {
elementsVal.withValue(List[String]()) {
val (mime, guard, auth, function) = dispatcherMap(method)(pathURI)
if (!auth(pathURI,user) ) {
unauthorizedAccess(path,user,request,response)
}
else if (guard()) {
// Found and guard satisfied
response.setStatus(HttpServletResponse.SC_OK)
response.setContentType(mime())
response.getWriter.print(function().toString)
}
else {
requestNotFound(path,request,response)
}
}
}
else {
//
// May be on the regular expression pool
//
val list = dispatcherRegexMap(method)
val target = list.find(
(t:Tuple5[Regex,()=>String,()=>Boolean,(String,Option[String]) => Boolean,()=>Any])=> {
val r = t._1
val e = extractMatches(pathURI,r)
elementsVal.withValue(e) {
r.findFirstIn(pathURI) match {
case Some(_) => if (t._3()) true else false
case None => false
}
}
}
)
val elems = if (target==None) List[String]() else extractMatches(pathURI,target.get._1)
elementsVal.withValue(elems) {
target match {
case Some(t) if t._3() && t._4(pathURI,user) => // Matched, guard satisfied, and authorized
response setStatus HttpServletResponse.SC_OK
response setContentType t._2()
response.getWriter.print(t._5().toString)
case Some(t) if t._3() && !t._4(pathURI,user) => // Matched and guard satisfied, but not authorized
unauthorizedAccess(path,user,request,response)
case _ => // Request could not be found either way
requestNotFound(path,request,response)
}
}
}
}
}
}
}
}
}
}
}
catch {
case e => internalServerError(pathURI,request,response,e)
}
}
//
// Extract parameters form a request
//
private def extractParameters(request:HttpServletRequest) = {
val names = request.getParameterNames
var mapSingle = Map[String,String]()
var mapArray = Map[String,Array[String]]()
while (names.hasMoreElements) {
val key = names.nextElement.toString
mapSingle += key->request.getParameter(key)
mapArray += key->request.getParameterValues(key)
}
(mapSingle,mapArray)
}
//
// Extract the matches of a URL
//
def extractMatches(pathURI:String,r:Regex):List[String] = {
r.unapplySeq(pathURI) match {
case Some(l) => l
case None => List[String]()
}
}
//
// Creates a map with the header options
//
private def extractHeaderMap(request:HttpServletRequest) = {
val nameEnum = request.getHeaderNames
var map = Map[String,String]()
while ( nameEnum.hasMoreElements ) {
val key = nameEnum.nextElement.toString
map += key->request.getHeader(key)
}
map
}
def internalServerError(path: String, request: HttpServletRequest, response: HttpServletResponse, e: Throwable)
def unauthorizedAccess(path: String, user:Option[String], request: HttpServletRequest, response: HttpServletResponse)
def requestNotFound(path: String, request: HttpServletRequest, response: HttpServletResponse)
} | xllora/Crochet | src/main/scala/CrochetDispatcher.scala | Scala | bsd-3-clause | 7,685 |
package scala.test
object Exported {
def message: String = {
// terrible, don't do this in real code:
val msg = Class.forName("scala.test.Runtime")
.newInstance
.toString
"you all, everybody. " + msg
}
}
| sdtwigg/rules_scala | test/Exported.scala | Scala | apache-2.0 | 233 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.testkit.async
// The async phase expects the state machine class to structurally conform to this interface.
trait AsyncStateMachine[F, R] {
/** Assign `i` to the state variable */
protected def state_=(i: Int): Unit
/** Retrieve the current value of the state variable */
protected def state: Int
/** Complete the state machine with the given failure. */
protected def completeFailure(t: Throwable): Unit
/** Complete the state machine with the given value. */
protected def completeSuccess(value: AnyRef): Unit
/** Register the state machine as a completion callback of the given future. */
protected def onComplete(f: F): Unit
/** Extract the result of the given future if it is complete, or `null` if it is incomplete. */
protected def getCompleted(f: F): R
/**
* Extract the success value of the given future. If the state machine detects a failure it may
* complete the async block and return `this` as a sentinel value to indicate that the caller
* (the state machine dispatch loop) should immediately exit.
*/
protected def tryGet(tr: R): AnyRef
}
| lrytz/scala | src/testkit/scala/tools/testkit/async/AsyncStateMachine.scala | Scala | apache-2.0 | 1,406 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.algorithms.consensus
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.AlignmentRecord
class ConsensusGeneratorFromReadsSuite extends ADAMFunSuite {
val cg = new ConsensusGeneratorFromReads
def artificial_reads: RDD[AlignmentRecord] = {
val path = resourcePath("artificial.sam")
sc.loadAlignments(path).rdd
}
sparkTest("checking search for consensus list for artificial reads") {
val consensus = cg.findConsensus(artificial_reads.map(new RichAlignmentRecord(_))
.collect()
.toSeq)
assert(consensus.size === 2)
}
}
| tdanford/adam | adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/ConsensusGeneratorFromReadsSuite.scala | Scala | apache-2.0 | 1,540 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.Serializable
import java.util.Properties
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.memory.TaskMemoryManager
import org.apache.spark.metrics.source.Source
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.util.{AccumulatorV2, TaskCompletionListener, TaskFailureListener}
object TaskContext {
/**
* Return the currently active TaskContext. This can be called inside of
* user functions to access contextual information about running tasks.
*/
def get(): TaskContext = taskContext.get
/**
* Returns the partition id of currently active TaskContext. It will return 0
* if there is no active TaskContext for cases like local execution.
*/
def getPartitionId(): Int = {
val tc = taskContext.get()
if (tc eq null) {
0
} else {
tc.partitionId()
}
}
private[this] val taskContext: ThreadLocal[TaskContext] = new ThreadLocal[TaskContext]
// Note: protected[spark] instead of private[spark] to prevent the following two from
// showing up in JavaDoc.
/**
* Set the thread local TaskContext. Internal to Spark.
*/
protected[spark] def setTaskContext(tc: TaskContext): Unit = taskContext.set(tc)
/**
* Unset the thread local TaskContext. Internal to Spark.
*/
protected[spark] def unset(): Unit = taskContext.remove()
/**
* An empty task context that does not represent an actual task. This is only used in tests.
*/
private[spark] def empty(): TaskContextImpl = {
new TaskContextImpl(0, 0, 0, 0, 0, null, new Properties, null)
}
}
/**
* Contextual information about a task which can be read or mutated during
* execution. To access the TaskContext for a running task, use:
* {{{
* org.apache.spark.TaskContext.get()
* }}}
*/
abstract class TaskContext extends Serializable {
// Note: TaskContext must NOT define a get method. Otherwise it will prevent the Scala compiler
// from generating a static get method (based on the companion object's get method).
// Note: Update JavaTaskContextCompileCheck when new methods are added to this class.
// Note: getters in this class are defined with parentheses to maintain backward compatibility.
/**
* Returns true if the task has completed.
*/
def isCompleted(): Boolean
/**
* Returns true if the task has been killed.
*/
def isInterrupted(): Boolean
/**
* Returns true if the task is running locally in the driver program.
* @return false
*/
@deprecated("Local execution was removed, so this always returns false", "2.0.0")
def isRunningLocally(): Boolean
/**
* Adds a (Java friendly) listener to be executed on task completion.
* This will be called in all situations - success, failure, or cancellation. Adding a listener
* to an already completed task will result in that listener being called immediately.
*
* An example use is for HadoopRDD to register a callback to close the input stream.
*
* Exceptions thrown by the listener will result in failure of the task.
*/
def addTaskCompletionListener(listener: TaskCompletionListener): TaskContext
/**
* Adds a listener in the form of a Scala closure to be executed on task completion.
* This will be called in all situations - success, failure, or cancellation. Adding a listener
* to an already completed task will result in that listener being called immediately.
*
* An example use is for HadoopRDD to register a callback to close the input stream.
*
* Exceptions thrown by the listener will result in failure of the task.
*/
def addTaskCompletionListener[U](f: (TaskContext) => U): TaskContext = {
// Note that due to this scala bug: https://github.com/scala/bug/issues/11016, we need to make
// this function polymorphic for every scala version >= 2.12, otherwise an overloaded method
// resolution error occurs at compile time.
addTaskCompletionListener(new TaskCompletionListener {
override def onTaskCompletion(context: TaskContext): Unit = f(context)
})
}
/**
* Adds a listener to be executed on task failure. Adding a listener to an already failed task
* will result in that listener being called immediately.
*/
def addTaskFailureListener(listener: TaskFailureListener): TaskContext
/**
* Adds a listener to be executed on task failure. Adding a listener to an already failed task
* will result in that listener being called immediately.
*/
def addTaskFailureListener(f: (TaskContext, Throwable) => Unit): TaskContext = {
addTaskFailureListener(new TaskFailureListener {
override def onTaskFailure(context: TaskContext, error: Throwable): Unit = f(context, error)
})
}
/**
* The ID of the stage that this task belong to.
*/
def stageId(): Int
/**
* How many times the stage that this task belongs to has been attempted. The first stage attempt
* will be assigned stageAttemptNumber = 0, and subsequent attempts will have increasing attempt
* numbers.
*/
def stageAttemptNumber(): Int
/**
* The ID of the RDD partition that is computed by this task.
*/
def partitionId(): Int
/**
* How many times this task has been attempted. The first task attempt will be assigned
* attemptNumber = 0, and subsequent attempts will have increasing attempt numbers.
*/
def attemptNumber(): Int
/**
* An ID that is unique to this task attempt (within the same SparkContext, no two task attempts
* will share the same attempt ID). This is roughly equivalent to Hadoop's TaskAttemptID.
*/
def taskAttemptId(): Long
/**
* Get a local property set upstream in the driver, or null if it is missing. See also
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String
@DeveloperApi
def taskMetrics(): TaskMetrics
/**
* ::DeveloperApi::
* Returns all metrics sources with the given name which are associated with the instance
* which runs the task. For more information see `org.apache.spark.metrics.MetricsSystem`.
*/
@DeveloperApi
def getMetricsSources(sourceName: String): Seq[Source]
/**
* If the task is interrupted, throws TaskKilledException with the reason for the interrupt.
*/
private[spark] def killTaskIfInterrupted(): Unit
/**
* If the task is interrupted, the reason this task was killed, otherwise None.
*/
private[spark] def getKillReason(): Option[String]
/**
* Returns the manager for this task's managed memory.
*/
private[spark] def taskMemoryManager(): TaskMemoryManager
/**
* Register an accumulator that belongs to this task. Accumulators must call this method when
* deserializing in executors.
*/
private[spark] def registerAccumulator(a: AccumulatorV2[_, _]): Unit
/**
* Record that this task has failed due to a fetch failure from a remote host. This allows
* fetch-failure handling to get triggered by the driver, regardless of intervening user-code.
*/
private[spark] def setFetchFailed(fetchFailed: FetchFailedException): Unit
/** Marks the task for interruption, i.e. cancellation. */
private[spark] def markInterrupted(reason: String): Unit
/** Marks the task as failed and triggers the failure listeners. */
private[spark] def markTaskFailed(error: Throwable): Unit
/** Marks the task as completed and triggers the completion listeners. */
private[spark] def markTaskCompleted(error: Option[Throwable]): Unit
/** Optionally returns the stored fetch failure in the task. */
private[spark] def fetchFailed: Option[FetchFailedException]
/** Gets local properties set upstream in the driver. */
private[spark] def getLocalProperties: Properties
}
| michalsenkyr/spark | core/src/main/scala/org/apache/spark/TaskContext.scala | Scala | apache-2.0 | 8,620 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.