code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
import com.typesafe.sbt.SbtGit._
import sbt._
object Versioning {
val snapshotSuffix = "-SNAPSHOT"
val releasedVersion = """^r?([0-9\\.]+)$""".r
val releasedCandidateVersion = """^r?([0-9\\.]+-rc\\d+)$""".r
val betaVersion = """^r?([0-9\\.]+-beta\\d+)$""".r
val snapshotVersion = """^r?[0-9\\.]+(.*)$""".r
def settings(baseVersion: String): Seq[Def.Setting[_]] = Seq(
git.baseVersion := baseVersion,
git.uncommittedSignifier := None,
git.useGitDescribe := true,
git.formattedShaVersion := git.gitHeadCommit.value map(sha => s"$baseVersion-${sha take 7}$snapshotSuffix"),
git.gitTagToVersionNumber := {
case releasedVersion(v) => Some(v)
case releasedCandidateVersion(rc) => Some(rc)
case betaVersion(beta) => Some(beta)
case snapshotVersion(v) => Some(s"$baseVersion$v$snapshotSuffix")
case _ => None
}
)
}
| jCalamari/mongo-scala-driver | project/Versioning.scala | Scala | apache-2.0 | 873 |
package controllers
import javax.inject.Inject
import play.api.libs.json.{JsObject, Json}
import play.api.mvc._
import play.modules.reactivemongo.ReactiveMongoApi
import reactivemongo.bson.BSONObjectID
import reactivemongo.play.json._
import reactivemongo.play.json.collection.JSONCollection
import scala.concurrent.{ExecutionContext, Future}
class UserAction @Inject()(val parser: BodyParsers.Default, val reactiveMongoApi: ReactiveMongoApi)(implicit val executionContext: ExecutionContext) extends ActionBuilder[UserRequest, AnyContent]
with ActionTransformer[Request, UserRequest] {
val sessionsCollection = reactiveMongoApi.database.map(_.collection[JSONCollection]("sessions"))
def transform[A](request: Request[A]) = {
request.session.get("session_id").map {
session_id =>
BSONObjectID.parse(session_id).toOption.map {
_id =>
val matcher = Json.obj("_id"->_id)
for {
sessions <- sessionsCollection
sessionOpt <- sessions.find(matcher).one[JsObject]
} yield {
val userID = sessionOpt.map {
s =>
(s \\ "userId").as[BSONObjectID]
}
new UserRequest(userID,request)
}
}.getOrElse {
Future.successful(new UserRequest(None,request))
}
}.getOrElse {
Future.successful(new UserRequest(None,request))
}
}
}
| sdor/biosys | labnotes/app/controllers/UserAction.scala | Scala | gpl-2.0 | 1,478 |
package com.vorlov.api.twitter.model
case class TweetCoordinates(latitude: Double, longitude: Double) | VolodymyrOrlov/tweets-opinion-mining | src/main/scala/com/vorlov/api/twitter/model/TweetCoordinates.scala | Scala | apache-2.0 | 102 |
package skuber
import java.util.Date
import Volume._
/**
* @author David O'Riordan
*/
case class PersistentVolumeClaim(
val kind: String ="PersistentVolumeClaim",
override val apiVersion: String = v1,
val metadata: ObjectMeta = ObjectMeta(),
spec: Option[PersistentVolumeClaim.Spec] = None,
status: Option[PersistentVolumeClaim.Status] = None)
extends ObjectResource {
def withResourceVersion(version: String) = this.copy(metadata = metadata.copy(resourceVersion=version))
}
object PersistentVolumeClaim {
import PersistentVolume.AccessMode
case class Spec(
accessModes: List[AccessMode.AccessMode] = Nil,
resources: Option[Resource.Requirements] = None,
volumeName: String="")
import PersistentVolume.Phase
case class Status(
phase: Option[Phase.Phase] = None,
accessModes: List[AccessMode.AccessMode] = List())
} | coryfklein/skuber | client/src/main/scala/skuber/PersistentVolumeClaim.scala | Scala | apache-2.0 | 907 |
package org.jetbrains.plugins.scala.lang.refactoring.extractMethod.duplicates
import com.intellij.psi.{PsiComment, PsiWhiteSpace, PsiElement}
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScReferenceExpression}
import org.jetbrains.plugins.scala.lang.refactoring.extractMethod.{ScalaExtractMethodUtils, ScalaExtractMethodSettings}
import com.intellij.openapi.application.{ApplicationNamesInfo, ApplicationManager}
import com.intellij.openapi.ui.Messages
import com.intellij.refactoring.RefactoringBundle
import com.intellij.codeInsight.folding.CodeFoldingManager
import com.intellij.openapi.project.Project
import com.intellij.openapi.editor.{ScrollType, LogicalPosition, FoldRegion, Editor}
import com.intellij.openapi.util.TextRange
import com.intellij.openapi.editor.colors.{EditorColors, EditorColorsManager}
import com.intellij.openapi.editor.markup.{RangeHighlighter, TextAttributes}
import com.intellij.codeInsight.highlighting.HighlightManager
import java.util
import com.intellij.ui.ReplacePromptDialog
import com.intellij.find.FindManager
import scala.Some
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
/**
* Nikolay.Tropin
* 2014-05-15
*/
object DuplicatesUtil {
def isSignificant(e: PsiElement): Boolean = e match {
case _: PsiWhiteSpace => false
case _: PsiComment => false
case ElementType(tp) if tp == ScalaTokenTypes.tSEMICOLON => false
case _ => true
}
def filtered(elements: Seq[PsiElement]): Seq[PsiElement] = {
elements.filter(isSignificant)
}
def filteredChildren(element: PsiElement): Seq[PsiElement] = {
filtered(element.children.toSeq)
}
def isUnder(element: PsiElement, parents: Seq[PsiElement]): Boolean = {
parents.exists(PsiTreeUtil.isAncestor(_, element, false))
}
def canBeEquivalent(pattern: PsiElement, candidate: PsiElement): Boolean = {
(pattern, candidate) match {
case (ref: ScReferenceExpression, expr: ScExpression) => true
case (ElementType(tp1), ElementType(tp2)) => tp1 == tp2
//todo this expressions, return statements, infix expressions
case _ => false
}
}
def withFilteredForwardSiblings(element: PsiElement, size: Int): Option[Seq[PsiElement]] = {
val siblingIterator = element.nextSiblings
val siblings = element +: siblingIterator.withFilter(isSignificant).take(size - 1).toSeq
if (siblings.size < size) None
else Some(siblings)
}
def findDuplicates(settings: ScalaExtractMethodSettings): Seq[DuplicateMatch] = {
val pattern = new DuplicatePattern(filtered(settings.elements), settings.parameters)
pattern.findDuplicates(settings.nextSibling.getParent)
}
def previewDuplicate(project: Project, editor: Editor, duplicate: DuplicateMatch)(work: => Unit) {
val highlighter = new util.ArrayList[RangeHighlighter](1)
highlightDuplicate(project, editor, duplicate, highlighter)
val range = duplicate.textRange
val logicalPosition: LogicalPosition = editor.offsetToLogicalPosition(range.getStartOffset)
expandAllRegionsCoveringRange(project, editor, range)
editor.getScrollingModel.scrollTo(logicalPosition, ScrollType.MAKE_VISIBLE)
work
HighlightManager.getInstance(project).removeSegmentHighlighter(editor, highlighter.get(0))
}
private def invokeDuplicateProcessing(duplicates: Seq[DuplicateMatch], settings: ScalaExtractMethodSettings, project: Project, editor: Editor) {
var replaceAll = false
var cancelled = false
for ((d, idx) <- duplicates.zipWithIndex) {
if (!replaceAll) {
previewDuplicate(project, editor, d) {
val dialog = showPromptDialog(settings.methodName, idx + 1, duplicates.size, project)
dialog.getExitCode match {
case FindManager.PromptResult.ALL =>
replaceDuplicate(project, settings, d)
replaceAll = true
case FindManager.PromptResult.OK => replaceDuplicate(project, settings, d)
case FindManager.PromptResult.SKIP =>
case FindManager.PromptResult.CANCEL => cancelled = true
}
}
if (cancelled) return
}
else replaceDuplicate(project, settings, d)
}
}
private def replaceDuplicate(project: Project, settings: ScalaExtractMethodSettings, d: DuplicateMatch) =
inWriteCommandAction(project, "Replace duplicate") {
ScalaExtractMethodUtils.replaceWithMethodCall(settings, d)
}
private def showPromptDialog(methodName: String, idx: Int, size: Int, project: Project) = {
val title = RefactoringBundle.message("process.methods.duplicates.title", Int.box(idx), Int.box(size), methodName)
val dialog: ReplacePromptDialog = new ReplacePromptDialog(false, title, project)
dialog.show()
dialog
}
def processDuplicates(duplicates: Seq[DuplicateMatch], settings: ScalaExtractMethodSettings, project: Project, editor: Editor) {
def showDuplicatesDialog(): Int = {
val message = RefactoringBundle.message("0.has.detected.1.code.fragments.in.this.file.that.can.be.replaced.with.a.call.to.extracted.method",
ApplicationNamesInfo.getInstance.getProductName, Int.box(duplicates.size))
Messages.showYesNoDialog(project, message, "Process Duplicates", Messages.getQuestionIcon)
}
if (ApplicationManager.getApplication.isUnitTestMode) {
duplicates.foreach(replaceDuplicate(project, settings, _))
return
}
if (duplicates.size == 1) {
previewDuplicate(project, editor, duplicates(0)) {
if (showDuplicatesDialog() == Messages.YES) replaceDuplicate(project, settings, duplicates(0))
}
return
}
if (showDuplicatesDialog() == Messages.YES) {
invokeDuplicateProcessing(duplicates, settings, project, editor)
}
}
private def expandAllRegionsCoveringRange(project: Project, editor: Editor, textRange: TextRange) {
val foldRegions: Array[FoldRegion] = CodeFoldingManager.getInstance(project).getFoldRegionsAtOffset(editor, textRange.getStartOffset)
val anyCollapsed: Boolean = foldRegions.exists(!_.isExpanded)
if (anyCollapsed) {
editor.getFoldingModel.runBatchFoldingOperation(new Runnable {
def run() = foldRegions.filterNot(_.isExpanded).foreach(_.setExpanded(true))
}
)
}
}
def highlightDuplicate(project: Project, editor: Editor, duplicate: DuplicateMatch, highlighters: util.Collection[RangeHighlighter]) {
val colorsManager: EditorColorsManager = EditorColorsManager.getInstance
val attributes: TextAttributes = colorsManager.getGlobalScheme.getAttributes(EditorColors.SEARCH_RESULT_ATTRIBUTES)
val range = duplicate.textRange
HighlightManager.getInstance(project).addRangeHighlight(editor, range.getStartOffset, range.getEndOffset, attributes, true, highlighters)
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/refactoring/extractMethod/duplicates/DuplicatesUtil.scala | Scala | apache-2.0 | 6,883 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.geotools
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom._
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.Conversions.toRichSimpleFeatureIterator
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GridSnapTest extends Specification with Logging {
"GridSnap" should {
"create a gridsnap around a given bbox" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 100, 100)
gridSnap must not beNull
}
"compute a SimpleFeatureSource Grid over the bbox" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 10, 10)
val grid = gridSnap.generateCoverageGrid
grid must not beNull
val featureIterator = grid.getFeatures.features
val gridLength = featureIterator.length
gridLength should be equalTo 100
}
"compute a sequence of points between various sets of coordinates" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 10, 10)
val resultDiagonal = gridSnap.genBresenhamCoordSet(0, 0, 9, 9).toList
resultDiagonal must not beNull
val diagonalLength = resultDiagonal.length
diagonalLength should be equalTo 9
val resultVeritcal = gridSnap.genBresenhamCoordSet(0, 0, 0, 9).toList
resultVeritcal must not beNull
val verticalLength = resultVeritcal.length
verticalLength should be equalTo 9
val resultHorizontal = gridSnap.genBresenhamCoordSet(0, 0, 9, 0).toList
resultHorizontal must not beNull
val horizontalLength = resultHorizontal.length
horizontalLength should be equalTo 9
val resultSamePoint = gridSnap.genBresenhamCoordSet(0, 0, 0, 0).toList
resultSamePoint must not beNull
val samePointLength = resultSamePoint.length
samePointLength should be equalTo 1
}
"check corner cases where GridSnap is given below minimum coordinates of the grid" in {
val bbox = new Envelope(0.0, 10.0, 0.0, 10.0)
val gridSnap = new GridSnap(bbox, 100, 100)
val iReturn = gridSnap.i(bbox.getMinX - 1)
val jReturn = gridSnap.j(bbox.getMinY - 1)
iReturn should be equalTo 0
jReturn should be equalTo 0
gridSnap.x(iReturn) should be equalTo bbox.getMinX
gridSnap.y(jReturn) should be equalTo bbox.getMinY
}
}
}
| giserh/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geotools/GridSnapTest.scala | Scala | apache-2.0 | 2,980 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.util.concurrent.ThreadLocalRandom
import io.gatling.commons.util.Spire._
object Arrays {
private def swap[T](array: Array[T], i: Int, j: Int): Unit = {
val tmp = array(i)
array(i) = array(j)
array(j) = tmp
}
def shuffle[T](array: Array[T]): Array[T] =
shuffle(array, array.length)
def shuffle[T](array: Array[T], length: Int): Array[T] = {
val rnd = ThreadLocalRandom.current()
cfor(length)(_ > 1, _ - 1) { i =>
swap(array, i - 1, rnd.nextInt(i))
}
array
}
}
| gatling/gatling | gatling-commons/src/main/scala/io/gatling/commons/util/Arrays.scala | Scala | apache-2.0 | 1,176 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.sbtplugin
import OptimizerOptions._
/** Various options for the Scala.js optimizer tool chain
*
* This is not a case class and does have a private constructor so that we
* can add fields in a binary-compatible manner.
*
* Use [[OptimizerOptions.apply]] and the `with` methods to create a configured
* instance.
*/
final class OptimizerOptions private (
/** Whether to only warn if the linker has errors */
val bypassLinkingErrors: Boolean = false,
/** Whether to parallelize the optimizer (currently fastOptJS only) **/
val parallel: Boolean = true,
/** Whether to run the optimizer in batch (i.e. non-incremental) mode */
val batchMode: Boolean = false,
/** Whether to run the Scala.js optimizer */
val disableOptimizer: Boolean = false,
/** Whether to pretty-print in fullOptJS */
val prettyPrintFullOptJS: Boolean = false,
/** Perform expensive checks of the sanity of the Scala.js IR */
val checkScalaJSIR: Boolean = false,
/** Use Google Closure Backend */
val useClosureCompiler: Boolean = false
) {
@deprecated(
"Bypassing linking errors will not be possible in the next major version.",
"0.6.6")
def withBypassLinkingErrors(bypassLinkingErrors: Boolean): OptimizerOptions =
copy(bypassLinkingErrors = bypassLinkingErrors)
def withParallel(parallel: Boolean): OptimizerOptions =
copy(parallel = parallel)
def withBatchMode(batchMode: Boolean): OptimizerOptions =
copy(batchMode = batchMode)
def withDisableOptimizer(disableOptimizer: Boolean): OptimizerOptions =
copy(disableOptimizer = disableOptimizer)
def withPrettyPrintFullOptJS(prettyPrintFullOptJS: Boolean): OptimizerOptions =
copy(prettyPrintFullOptJS = prettyPrintFullOptJS)
def withCheckScalaJSIR(checkScalaJSIR: Boolean): OptimizerOptions =
copy(checkScalaJSIR = checkScalaJSIR)
def withUseClosureCompiler(useClosureCompiler: Boolean): OptimizerOptions =
copy(useClosureCompiler = useClosureCompiler)
private def copy(bypassLinkingErrors: Boolean = bypassLinkingErrors,
parallel: Boolean = parallel, batchMode: Boolean = batchMode,
disableOptimizer: Boolean = disableOptimizer,
prettyPrintFullOptJS: Boolean = prettyPrintFullOptJS,
checkScalaJSIR: Boolean = checkScalaJSIR,
useClosureCompiler: Boolean = useClosureCompiler) = {
new OptimizerOptions(bypassLinkingErrors, parallel, batchMode,
disableOptimizer, prettyPrintFullOptJS, checkScalaJSIR,
useClosureCompiler)
}
override def toString: String = {
s"""OptimizerOptions(
| bypassLinkingErrors = $bypassLinkingErrors
| parallel = $parallel
| batchMode = $batchMode
| disableOptimizer = $disableOptimizer
| prettyPrintFullOptJS = $prettyPrintFullOptJS
| checkScalaJSIR = $checkScalaJSIR
| useClosureCompiler = $useClosureCompiler
|)""".stripMargin
}
}
object OptimizerOptions {
def apply(): OptimizerOptions = new OptimizerOptions()
}
| mdedetrich/scala-js | sbt-plugin/src/main/scala/scala/scalajs/sbtplugin/OptimizerOptions.scala | Scala | bsd-3-clause | 3,602 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* Created by mucianm on 23.03.16.
*/
@Category(Array(classOf[PerfCycleTests]))
class PrimitivesTest extends TypeInferenceTestBase {
override def folderPath: String = super.folderPath + "bugs5/"
def testSCL7521() = doTest()
def testSCL2045() = doTest(
""" def returnsANumber = {
| if (1==1) {
| /*start*/0/*end*/
| } else {
| 0.0
| }
| }
|
| //Double""".stripMargin)
def testSCL7101(): Unit = doTest {
"""
|object SCL7101 {
| def fun(x: Byte): Byte = x
|
| def fun(x: Boolean): Boolean = x
|
| /*start*/fun((10))/*end*/
|}
|//Byte
""".stripMargin.trim
}
def testSCL7923(): Unit = doTest {
"""
|object Scl7923 {
| import java.lang.{Long => JLong}
|
| class Test {
|
| def withJavaLong(number: JLong): Unit = {}
|
| def test(): Unit = {
| val num: Int = 5
| withJavaLong(number = /*start*/num/*end*/)
| }
| }
|}//Long""".stripMargin
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/failed/typeInference/PrimitivesTest.scala | Scala | apache-2.0 | 1,356 |
package net.categoricaldata.server.transformers
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import net.categoricaldata.examples.Examples
@RunWith(classOf[JUnitRunner])
class TransformersTest extends FlatSpec with ShouldMatchers {
import net.liftweb.json.Serialization.write
import net.liftweb.json.JsonParser.parse
implicit val formats = net.liftweb.json.DefaultFormats
"Ontology" should "pass through the Bowler transformer successfully" in {
for (ontology <- List(Examples.Isomorphism)) {
new OntologyTransformer().toValue(write(ontology.toJSON)) should equal(Some(ontology))
}
}
"Translation" should "pass through the Bowler transformer successfully" in {
for (translation <- List(Examples.PointedSetsToIsomorphism, Examples.ReverseGraph)) {
new TranslationTransformer().toValue(write(translation.toJSON)) should equal(Some(translation))
}
val literal = """{
"source": {
"objects": [
"an edge",
"a vertex"
],
"arrows": [
{
"source": "an edge",
"target": "a vertex",
"label": "has as source"
},
{
"source": "an edge",
"target": "a vertex",
"label": "has as target"
}
],
"relations": [],
"extra": "arbitrary text along for the ride"
},
"target": {
"objects": [
"an edge",
"a vertex"
],
"arrows": [
{
"source": "an edge",
"target": "a vertex",
"label": "has as source"
},
{
"source": "an edge",
"target": "a vertex",
"label": "has as target"
}
],
"relations": []
},
"onObjects": {
"an edge": "an edge",
"a vertex": "a vertex"
},
"onGenerators": [
{
"arrow": {
"source": "an edge",
"target": "a vertex",
"label": "has as source"
},
"path": [
{
"source": "an edge",
"target": "a vertex",
"label": "has as target"
}
]
},
{
"arrow": {
"source": "an edge",
"target": "a vertex",
"label": "has as target"
},
"path": [
{
"source": "an edge",
"target": "a vertex",
"label": "has as source"
}
]
}
]
}"""
val transformed = new TranslationTransformer().toValue(literal)
transformed should not be ('isEmpty)
// TODO get this working
// transformed.get.toJSON.source.json.get.contains("extra") should equal (true)
}
"Dataset" should "pass through the Bowler transformer successfully" in {
for (dataset <- List(Examples.TerminalBigraph)) {
new DatasetTransformer().toValue(write(dataset.toJSON)) should equal(Some(dataset))
}
}
}
| JasonGross/categoricaldata | src/test/scala/net/categoricaldata/server/transformers/TransformersTest.scala | Scala | mit | 2,849 |
import scala.quoted.*
object Macros {
implicit inline def printTree[T](inline x: T): Unit =
${ impl('x) }
def impl[T](x: Expr[T])(using q: Quotes) : Expr[Unit] = {
import q.reflect.*
val tree = x.asTerm
val treeStr = Expr(tree.show(using Printer.TreeStructure))
val treeTpeStr = Expr(tree.tpe.show(using Printer.TypeReprStructure))
'{
println(${treeStr})
println(${treeTpeStr})
println()
}
}
}
| dotty-staging/dotty | tests/run-macros/tasty-extractors-2/quoted_1.scala | Scala | apache-2.0 | 452 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.persistence.cluster
import akka.Done
import akka.actor.Status.Failure
import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, PoisonPill, Props, SupervisorStrategy }
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings, ClusterSingletonProxy, ClusterSingletonProxySettings }
import akka.pattern.BackoffSupervisor
import akka.util.Timeout
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
/**
* Performs an idempotent task on one node on cluster startup.
*
* The task guarantees that when the actor is asked to perform the operation, the operation will only be executed on
* one node of the cluster at a time, and that when the returned future is redeemed, the task will be performed.
*
* This will start a cluster singleton which will execute the task. The task may be executed again when a new node
* becomes the singleton, hence the task must be idempotent.
*
* If the task fails, it will be re-executed using exponential backoff using the given backoff parameters.
*/
object ClusterStartupTask {
def apply(
system: ActorSystem,
taskName: String,
task: () => Future[Done],
taskTimeout: FiniteDuration,
role: Option[String],
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomBackoffFactor: Double
): ClusterStartupTask = {
val startupTaskProps = Props(classOf[ClusterStartupTaskActor], task, taskTimeout)
val backoffProps = BackoffSupervisor.propsWithSupervisorStrategy(
startupTaskProps, taskName, minBackoff, maxBackoff, randomBackoffFactor, SupervisorStrategy.stoppingStrategy
)
val singletonProps = ClusterSingletonManager.props(backoffProps, PoisonPill,
ClusterSingletonManagerSettings(system))
val singleton = system.actorOf(singletonProps, s"$taskName-singleton")
val singletonProxy = system.actorOf(
ClusterSingletonProxy.props(
singletonManagerPath = singleton.path.toStringWithoutAddress,
settings = ClusterSingletonProxySettings(system).withRole(role)
), s"$taskName-singletonProxy"
)
new ClusterStartupTask(singletonProxy)
}
}
class ClusterStartupTask(actorRef: ActorRef) {
import ClusterStartupTaskActor._
/**
* Execute the task. The startup task will reply with [[akka.Done]] when it's done, or a
* [[akka.actor.Status.Failure]] if the task failed or timed out.
*
* @param sender The sender to reply to.
*/
def execute()(implicit sender: ActorRef): Unit = {
actorRef ! Execute
}
/**
* Request the task to be executed using the ask pattern.
*
* @return A future of the result.
*/
def askExecute()(implicit timeout: Timeout): Future[Done] = {
import akka.pattern.ask
(actorRef ? Execute).mapTo[Done]
}
}
private[lagom] object ClusterStartupTaskActor {
case object Execute
}
private[lagom] class ClusterStartupTaskActor(task: () => Future[Done], timeout: FiniteDuration) extends Actor with ActorLogging {
import ClusterStartupTaskActor._
import akka.pattern.ask
import akka.pattern.pipe
import context.dispatcher
override def preStart(): Unit = {
// We let the ask pattern handle the timeout, by asking ourselves to execute the task and piping the result back to
// ourselves
implicit val askTimeout = Timeout(timeout)
self ? Execute pipeTo self
}
def receive = {
case Execute =>
log.info(s"Executing cluster start task ${self.path.name}.")
task() pipeTo self
context become executing(List(sender()))
}
def executing(outstandingRequests: List[ActorRef]): Receive = {
case Execute =>
context become executing(sender() :: outstandingRequests)
case Done =>
log.info(s"Cluster start task ${self.path.name} done.")
outstandingRequests foreach { requester =>
requester ! Done
}
context become executed
case failure @ Failure(e) =>
outstandingRequests foreach { requester =>
requester ! failure
}
// If we failed to prepare, crash
throw e
}
def executed: Receive = {
case Execute =>
sender() ! Done
case Done =>
// We do expect to receive Done once executed since we initially asked ourselves to execute
}
}
| edouardKaiser/lagom | persistence/core/src/main/scala/com/lightbend/lagom/internal/persistence/cluster/ClusterStartupTask.scala | Scala | apache-2.0 | 4,457 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\\
* @ @ *
* # # # # (c) 2017 CAB *
* # # # # # # *
* # # # # # # # # # # # # *
* # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* @ @ *
\\* * http://github.com/alexcab * * * * * * * * * * * * * * * * * * * * * * * * * */
package mathact.core.sketch.infrastructure.instance
import java.util.concurrent.{TimeoutException, ExecutionException}
import mathact.core.WorkerBase
import mathact.core.bricks.blocks.BlockContext
import mathact.core.bricks.data.SketchData
import mathact.core.model.config.SketchInstanceConfigLike
import mathact.core.model.enums.BlockType
import mathact.core.model.holders._
import mathact.core.model.messages.M
import mathact.core.sketch.blocks.WorkbenchLike
import scala.concurrent.Future
/** Sketch instance actor
* Created by CAB on 17.10.2016.
*/
private[core] class SketchInstanceActor(
config: SketchInstanceConfigLike,
sketchData: SketchData,
controller: SketchControllerRef,
userLogging: UserLoggingRef,
plumbing: PlumbingRef,
layout: LayoutRef)
extends WorkerBase{ import SketchInstance._
//Variables
var isBuildingRan = false
var isSketchContextBuilt = false
var isBuildingDone = false
var isBuildingTimeout = false
var startBuildingTime = 0L
//Functions
/** Sketch run building, called after all UI shown */
def constructSketchInstance(): Unit = isBuildingRan match{
case false β
//Run building
log.debug(
s"[SketchInstanceActor.sketchRunBuilding] Try to create sketch instance, " +
s"sketchBuildingTimeout: ${config.sketchBuildingTimeout}")
//Run building timeout
context.system.scheduler.scheduleOnce(
config.sketchBuildingTimeout,
self,
SketchInstanceBuildTimeout)
//Set is build and start built time
isBuildingRan = true
startBuildingTime = System.currentTimeMillis
//Build sketch
Future{sketchData.clazz.newInstance()}
.map{ s β self ! SketchInstanceBuilt(s.asInstanceOf[WorkbenchLike])}
.recover{
case t: ExecutionException β self ! SketchInstanceBuiltError(t.getCause)
case t: Throwable β self ! SketchInstanceBuiltError(t)}
case true β
//Already build log error
log.error(
s"[SketchInstanceActor.sketchRunBuilding] Sketch instance already build.")}
/** Get workbench context, create and return of BlockContext
* @return - Either[Exception, BlockContext] */
def buildSketchContext(): Either[Exception, BlockContext] = isSketchContextBuilt match{
case false β
log.debug(s"[SketchInstanceActor.getSketchContext] Build BlockContext")
val response = Right{ new BlockContext(
BlockType.Workbench,
context.system,
controller,
userLogging,
layout,
plumbing,
config.pumpConfig,
config.commonConfig)}
isSketchContextBuilt = true
response
case trueβ
val err = new IllegalStateException(s"[SketchInstanceActor.getSketchContext] Context already created.")
log.error(err, s"[SketchInstanceActor.getSketchContext] Error on creating.")
Left(err)}
/** Sketch instance successfully built
* @param workbench - WorkbenchLike */
def sketchInstanceBuilt(workbench: WorkbenchLike): Unit = {
//Set done
isBuildingDone = true
//Calc build time
val time = System.currentTimeMillis - startBuildingTime
//Check if sketch context built and if no timeout
(isSketchContextBuilt, isBuildingTimeout) match{
case (true, false) β
log.debug(s"[SketchInstanceActor.sketchInstanceBuilt] time: $time, workbench: $workbench.")
//Log to user logging
userLogging ! M.LogInfo(None, "SketchInstance", s"Sketch instance successfully built in $time mills.")
//Report to controller
controller ! M.SketchInstanceReady(workbench)
case (false, _) β
log.error(s"[SketchInstanceActor.sketchInstanceBuilt] Building failed, BlockContext is not built, time: $time.")
//Log to user logging
userLogging ! M.LogError(None, "SketchInstance", Seq(), "BlockContext is not built in init of sketch instance.")
//Send SketchInstanceFail
controller ! M.SketchInstanceError(new IllegalStateException(
s"[SketchInstanceActor.sketchInstanceBuilt] BlockContext is not built, time: $time."))
case (_, true) β
log.error(s"[SketchInstanceActor.sketchInstanceBuilt] Built after timeout, do nothing, time: $time.")
//Log to user logging
userLogging ! M.LogError(
None,
"SketchInstance",
Seq(),
s"Built after timeout (${config.sketchBuildingTimeout}), building time: $time.")}}
/** Error during sketch instance building
* @param error - Throwable */
def sketchInstanceBuiltError(error: Throwable): Unit = {
//Set done
isBuildingDone = true
//Calc build time
val time = System.currentTimeMillis - startBuildingTime
//Log
log.error(
error,
s"[SketchInstanceActor.sketchInstanceBuiltError] Error on creating Sketch extends Workbench instance, " +
s"time: $time, isBuildingTimeout: $isBuildingTimeout")
//Build message and log to user logging
val msg = error match{
case err: NoSuchMethodException β s"NoSuchMethodException, check if sketch class is not inner."
case err β s"Exception on building of sketch instance, building time: $time mills."}
userLogging ! M.LogError(None, "SketchInstance", Seq(error), msg)
//Send SketchInstanceFail if no timeout
if(! isBuildingTimeout) controller ! M.SketchInstanceError(error)}
/** Sketch instance not build in required time */
def sketchInstanceBuiltTimeout(): Unit = isBuildingDone match{
case false β
log.error(
s"[SketchInstanceActor.sketchInstanceBuiltTimeout] Building failed, sketch not built " +
s"in ${config.sketchBuildingTimeout}.")
//Set timeout
isBuildingTimeout = true
//Log to user logging
userLogging ! M.LogError(
None,
"SketchInstance",
Seq(),
s"Timeout, sketch instance not built in ${config.sketchBuildingTimeout}.")
//Send SketchInstanceFail
controller ! M.SketchInstanceError(new TimeoutException(
s"[SketchInstanceActor.sketchInstanceBuiltTimeout] Sketch not built in ${config.sketchBuildingTimeout}"))
case true β
log.debug(s"[SketchInstanceActor.sketchInstanceBuiltTimeout] Building done, do nothing.")}
//Messages handling
def reaction = {
//Try to create instance
case M.CreateSketchInstance β constructSketchInstance()
//Build sketch context for given actor
case M.BuildSketchContextFor(actor) β actor ! buildSketchContext()
//Sketch instance built
case SketchInstanceBuilt(instance) β sketchInstanceBuilt(instance)
//Sketch instance built error
case SketchInstanceBuiltError(error) β sketchInstanceBuiltError(error)
//Sketch instance built timeout
case SketchInstanceBuildTimeout β sketchInstanceBuiltTimeout()}
//Cleanup
def cleanup(): Unit = { }}
| AlexCAB/MathAct | mathact_core/src/main/scala/mathact/core/sketch/infrastructure/instance/SketchInstanceActor.scala | Scala | mit | 7,992 |
/*
* @author Daniel Strebel
*
* Copyright 2012 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.examples
import scala.collection.immutable.Queue
import scala.collection.mutable.LinkedList
import scala.collection.mutable.ListBuffer
import com.signalcollect._
/**
* Elements of a path query.
* PathQueryNodes represent nodes in a query path that can match for a node in the graph.
*/
abstract class PathQueryNode extends Serializable {
def matches(vertex: Vertex[_, _]): Boolean
def expand: List[PathQueryNode] = List()
}
/**
* PathQueryNode for which the provided condition specifies whether a node matches this query node or not.
*/
class WildcardQueryNode(condition: Vertex[_, _] => Boolean = vertex => true) extends PathQueryNode {
def matches(vertex: Vertex[_, _]) = condition(vertex)
}
/**
* More generalized version of WildcardQueryNode that can match 0 to (including) maxExpansion times in a row.
*/
class StarQueryNode(condition: Vertex[_, _] => Boolean = vertex => true, maxExpansion: Int = 1) extends WildcardQueryNode(condition) {
override def expand: List[PathQueryNode] = {
if (maxExpansion > 0) {
List(new StarQueryNode(condition, maxExpansion - 1))
} else {
List()
}
}
}
/**
* Query node that only matches a vertex if it has the specified id.
*/
class FixedQueryNode(id: Any) extends PathQueryNode {
def matches(vertex: Vertex[_, _]) = (vertex.id.hashCode() == id.hashCode())
}
/**
* A PathQuery is a chain of PathQueryNodes that specify which conditions a path through the graph
* must fulfill in order to match the query. As the query is passed along matching nodes the matched
* PathQueryNodes are removed from the active path query and stored in the matched path queue to keep track
* of the matched nodes in the graph.
*/
class PathQuery() extends Serializable {
var unmatchedQuery = LinkedList[PathQueryNode]() //Part of the query that is not matched yet.
var matchedPath = Queue[Any]() // Trail of already matched nodes
/**
* Match the head of the query to the provided vertex. If the match was successful a list follow-up queries is returned.
*
* @param vertex that should be matched to the head of the remaining path query.
* @return a list of remaining queries after matching a vertex to the head of the unmatched path query or None if the head did not match the vertex.
*/
def getRemainingQuery(vertex: Vertex[_, _]): Option[List[PathQuery]] = {
if (unmatchedQuery.size > 0 && unmatchedQuery.head.matches(vertex)) {
val remainingQuery = new PathQuery
remainingQuery.matchedPath = matchedPath.enqueue(vertex.id)
remainingQuery.unmatchedQuery = unmatchedQuery.tail
val expandedQueryHeads = unmatchedQuery.head.expand
val expandedQueries = expandedQueryHeads.map(queryHead => {
val expandedQuery = new PathQuery
expandedQuery.matchedPath = remainingQuery.matchedPath
expandedQuery.prependQueryNode(queryHead)
expandedQuery
})
Some(remainingQuery :: expandedQueries)
} else {
None
}
}
/**
* Adds a PathQueryNode to the end of the unmatched query
*/
def appendQueryNode(node: PathQueryNode) {
unmatchedQuery = unmatchedQuery :+ node
}
/**
* Adds a PathQueryNode to the beginning of the unmatched query
*/
def prependQueryNode(node: PathQueryNode) {
unmatchedQuery = node +: unmatchedQuery
}
}
/**
* Collects all matched paths as results of the query.
*/
object ResultHandler {
val results = ListBuffer[List[Any]]()
def addPath(path: List[Any]) = results += path
def getResults = results.toList
}
/**
* Collects all matched Paths
*/
class QueryNode
class QueryVertex(vertexId: Int, state: List[PathQuery]) extends DataFlowVertex(vertexId, state) with ResetStateAfterSignaling[Int, List[PathQuery]] {
type Signal = List[PathQuery]
val resetState = null
def collect(queries: List[PathQuery]): List[PathQuery] = {
var newState = state
if (queries != null) {
for (query <- queries) {
if (query != null) {
query.getRemainingQuery(this) match {
case Some(restQueries) => {
for (restQuery <- restQueries) {
if (restQuery.unmatchedQuery.size == 0) {
ResultHandler.addPath(restQuery.matchedPath.toList)
} else {
if (state != null) {
newState = restQuery +: newState
} else {
newState = List(restQuery)
}
}
}
}
case _ =>
}
}
}
}
newState
}
}
/**
* A little demo that builds a graph and looks for paths
*/
object PathQueryExample extends App {
val graph = GraphBuilder.build
val query = new PathQuery
// query.addQueryNode(new WildcardQueryNode)
// query.addQueryNode(new WildcardQueryNode)
// query.addQueryNode(new FixedQueryNode(3))
// query.addQueryNode(new WildcardQueryNode)
// query.addQueryNode(new FixedQueryNode(2))
// query.addQueryNode(new WildcardQueryNode)
query.appendQueryNode(new StarQueryNode(maxExpansion = 5))
query.appendQueryNode(new FixedQueryNode(2))
graph.addVertex(new QueryVertex(0, List(query)))
graph.addVertex(new QueryVertex(1, null))
graph.addVertex(new QueryVertex(2, null))
graph.addVertex(new QueryVertex(3, null))
graph.addVertex(new QueryVertex(4, null))
graph.addVertex(new QueryVertex(5, null))
graph.addEdge(0, new StateForwarderEdge(1))
graph.addEdge(0, new StateForwarderEdge(2))
graph.addEdge(1, new StateForwarderEdge(2))
graph.addEdge(2, new StateForwarderEdge(3))
graph.addEdge(3, new StateForwarderEdge(4))
graph.addEdge(4, new StateForwarderEdge(2))
graph.addEdge(2, new StateForwarderEdge(5))
val stats = graph.execute
println(ResultHandler.getResults)
graph.shutdown
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/examples/PathQuery.scala | Scala | apache-2.0 | 6,498 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex
import java.io.{PrintWriter, StringWriter}
import java.util
import java.util.Date
import java.util.concurrent.ConcurrentHashMap
import akka.NotUsed
import akka.actor.SupervisorStrategy._
import akka.actor.{Extension => AkkaExtension, _}
import akka.agent.Agent
import akka.http.scaladsl.model.HttpResponse
import akka.pattern._
import akka.stream.scaladsl.Flow
import com.typesafe.config.Config
import org.squbs.lifecycle.{ExtensionLifecycle, GracefulStop, GracefulStopHelper}
import org.squbs.pipeline.{PipelineSetting, RequestContext}
import org.squbs.unicomplex.UnicomplexBoot.StartupType
import scala.annotation.varargs
import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
class UnicomplexExtension(system: ExtendedActorSystem) extends AkkaExtension {
val uniActor = system.actorOf(Props[Unicomplex], "unicomplex")
private var _scannedComponents: Seq[String] = null
private[unicomplex] def setScannedComponents(components: Seq[String]): Unit = synchronized {
// Allowing setting only once
if (_scannedComponents != null && _scannedComponents != components)
throw new IllegalStateException(s"_scannedComponents previously set to ${_scannedComponents}")
else if (_scannedComponents == null)
_scannedComponents = components
}
lazy val scannedComponents = _scannedComponents
val config = system.settings.config.getConfig("squbs")
lazy val externalConfigDir = config.getString("external-config-dir")
val boot = Agent[UnicomplexBoot](null)(system.dispatcher)
}
object Unicomplex extends ExtensionId[UnicomplexExtension] with ExtensionIdProvider {
override def lookup() = Unicomplex
override def createExtension(system: ExtendedActorSystem) = new UnicomplexExtension(system)
type InitReport = Try[Option[String]]
def config(implicit context: ActorContext): Config = apply(context.system).config
def externalConfigDir(implicit context: ActorContext): String = apply(context.system).externalConfigDir
def apply()(implicit context: ActorContext): ActorRef = apply(context.system).uniActor
// Unicomplex actor registry so we can find it without setting up remote or having an actor system (needed on shutdown)
private[unicomplex] val actors = new ConcurrentHashMap[String, ActorRef]
def apply(actorSystemName: String): ActorRef = actors.get(actorSystemName)
}
import org.squbs.unicomplex.Unicomplex._
private[unicomplex] case class PreStartWebService(listeners: Map[String, Config])
private[unicomplex] case object StartWebService
private[unicomplex] case class StartListener(name: String, config: Config)
private[unicomplex] case object RoutesStarted
private[unicomplex] case class StartCubeActor(props: Props, name: String = "", initRequired: Boolean = false)
private[unicomplex] case class StartCubeService(webContext: String, listeners: Seq[String], props: Props,
name: String = "", ps: PipelineSetting, initRequired: Boolean = false)
private[unicomplex] case class StartFailure(t: Throwable)
private[unicomplex] case object CheckInitStatus
private[unicomplex] case object Started
private[unicomplex] case object Activate
private[unicomplex] case object ActivateTimedOut
private[unicomplex] case object ShutdownTimedOut
case class Cube(name: String, fullName: String, version: String, jarPath: String)
case class InitReports(state: LifecycleState, reports: Map[ActorRef, Option[InitReport]])
case class CubeRegistration(info: Cube, cubeSupervisor: ActorRef)
case class Extension(info: Cube, sequence: Int, extLifecycle: Option[ExtensionLifecycle],
exceptions: Seq[(String, Throwable)])
case class Extensions(extensions: Seq[Extension])
sealed trait LifecycleState {
// for Java
def instance = this
}
case object Starting extends LifecycleState // uniActor starts from Starting state
case object Initializing extends LifecycleState // Cubes start from Initializing state
case object Active extends LifecycleState
case object Failed extends LifecycleState
case object Stopping extends LifecycleState
case object Stopped extends LifecycleState
case class Initialized(report: InitReport)
case object Ack
case object ReportStatus
case class StatusReport(state: LifecycleState, cubes: Map[ActorRef, (CubeRegistration, Option[InitReports])],
extensions: Seq[Extension])
case class Timestamp(nanos: Long, millis: Long)
case object SystemState {
// for Java
def instance = this
}
case object LifecycleTimesRequest
case class LifecycleTimes(start: Option[Timestamp], started: Option[Timestamp],
active: Option[Timestamp], stop: Option[Timestamp])
case class ObtainLifecycleEvents(states: LifecycleState*)
// for Java
object ObtainLifecycleEvents {
@varargs def create(states: LifecycleState*) = new ObtainLifecycleEvents(states : _*)
}
case class StopTimeout(timeout: FiniteDuration)
case class StopCube(name: String)
case class StartCube(name: String)
private[unicomplex] case object HttpBindSuccess
private[unicomplex] case object HttpBindFailed
case object PortBindings
case class FlowWrapper(flow: Flow[RequestContext, RequestContext, NotUsed], actor: ActorRef)
/**
* The Unicomplex actor is the supervisor of the Unicomplex.
* It starts actors that are part of the Unicomplex.
*/
class Unicomplex extends Actor with Stash with ActorLogging {
import context.dispatcher
private var shutdownTimeout = 1.second
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case NonFatal(e) =>
log.warning(s"Received ${e.getClass.getName} with message ${e.getMessage} from ${sender().path}")
Restart
}
private var systemStart: Option[Timestamp] = None
private var systemStarted: Option[Timestamp] = None
private var systemActive: Option[Timestamp] = None
private var systemStop: Option[Timestamp] = None
private var systemState: LifecycleState = Starting
private var activated = false
private var cubes = Map.empty[ActorRef, (CubeRegistration, Option[InitReports])]
private var extensions = Seq.empty[Extension]
private var lifecycleListeners = Seq.empty[(ActorRef, Seq[LifecycleState], Boolean)] // Last boolean is flag whether to remove
private var servicesStarted= false
lazy val serviceRegistry = new ServiceRegistry(log)
private val unicomplexExtension = Unicomplex(context.system)
import unicomplexExtension._
// $COVERAGE-OFF$
/**
* MXBean for exposing Unicomplex state
*/
class SystemStateBean extends SystemStateMXBean {
private[Unicomplex] var startTime: Date = null
private[Unicomplex] var initDuration = -1
private[Unicomplex] var activationDuration = -1
override def getSystemState: String = systemState.toString
override def getStartTime: Date = startTime
override def getInitMillis: Int = initDuration
override def getActivationMillis: Int = activationDuration
}
// $COVERAGE-ON$
class CubesBean extends CubesMXBean {
override def getCubes: util.List[CubeInfo] = {
import scala.collection.JavaConversions._
cubes.values.toSeq map { c =>
CubeInfo(c._1.info.name, c._1.info.fullName, c._1.info.version, c._1.cubeSupervisor.toString())
}
}
}
class ExtensionsBean extends ExtensionsMXBean {
override def getExtensions: util.List[ExtensionInfo] = {
import scala.collection.JavaConversions._
extensions map { e =>
val (phase, ex) = e.exceptions.headOption map {
case (iphase, exception) => (iphase, exception.toString)
} getOrElse (("", ""))
ExtensionInfo(e.info.name, e.sequence, phase, ex)
}
}
}
private val stateMXBean = new SystemStateBean
override def preStart() {
Unicomplex.actors.put(context.system.name, self)
import org.squbs.unicomplex.JMX._
register(stateMXBean, prefix + systemStateName)
register(new CubesBean, prefix + cubesName)
register(new SystemSettingBean(context.system.settings.config), prefix + systemSettingName)
register(new ExtensionsBean, prefix + extensionsName)
}
override def postStop() {
import org.squbs.unicomplex.JMX._ // JMX registrations
unregister(prefix + extensionsName)
unregister(prefix + cubesName)
unregister(prefix + systemStateName)
unregister(prefix + systemSettingName)
Unicomplex.actors.remove(context.system.name)
}
private def shutdownState: Receive = {
case Terminated(target) => log.debug(s"$target is terminated")
if (cubes contains target) {
cubes -= target
} else {
serviceRegistry.listenerTerminated(target)
}
if (cubes.isEmpty && serviceRegistry.isShutdownComplete) {
log.info("All CubeSupervisors and services were terminated. Shutting down the system")
updateSystemState(Stopped)
context.system.shutdown()
}
case ShutdownTimedOut => log.warning("Graceful shutdown timed out.")
updateSystemState(Stopped)
context.system.shutdown()
}
def shutdownBehavior: Receive = {
case StopTimeout(timeout) => if (shutdownTimeout < timeout) shutdownTimeout = timeout
case GracefulStop =>
log.info(s"got GracefulStop from ${sender().path}.")
updateSystemState(Stopping)
if (servicesStarted) {
serviceRegistry.stopAll()
servicesStarted = false
}
cubes.foreach(_._1 ! GracefulStop)
context.become(shutdownState orElse serviceRegistry.shutdownState)
log.info(s"Set shutdown timeout $shutdownTimeout")
context.system.scheduler.scheduleOnce(shutdownTimeout, self, ShutdownTimedOut)
}
def stopAndStartCube: Receive = {
case StopCube(name) =>
val responder = sender()
boot.get().cubes.find(_.info.name == name) flatMap {cube =>
cube.components.get(StartupType.SERVICES)
} map {configs =>
configs.map(_.getString("web-context"))
} match {
case Some(webContexts) =>
serviceRegistry.deregisterContext(webContexts)
self ! Ack
case None => self ! Ack
}
context.become({
case Ack =>
context.actorSelection(s"/user/$name") ! Identify(name)
context.become({
case ActorIdentity(`name`, Some(cubeSupervisor)) =>
cubes get cubeSupervisor match {
case Some(cube) =>
cubes -= cubeSupervisor
cubeSupervisor ! GracefulStop
context.become({
case Terminated(`cubeSupervisor`) =>
responder ! Ack
unstashAll()
context.unbecome()
case other => stash()
}, true)
case None =>
unstashAll()
context.unbecome()
}
case ActorIdentity(`name`, None) =>
log.warning(s"Cube $name does not exist")
unstashAll()
context.unbecome()
case other => stash()
}, true)
case Status.Failure(e) =>
log.warning(s"Failed to unregister web-contexts. Cause: $e")
unstashAll()
context.unbecome()
case other => stash()
}, false)
case StartCube(name) =>
val responder = sender()
context.actorSelection(s"/user/$name") ! Identify(name)
context.become({
case ActorIdentity(cubeName, Some(cubeSupervisor)) =>
log.warning(s"Cube $cubeName is already started")
unstashAll()
context.unbecome()
case ActorIdentity(`name`, None) =>
boot.get().cubes.find(_.info.name == name) foreach {cube =>
UnicomplexBoot.startComponents(cube, boot.get().listenerAliases)(context.system)
}
responder ! Ack
unstashAll()
context.unbecome()
case other => stash()
}, false)
}
def receive = stopAndStartCube orElse shutdownBehavior orElse {
case t: Timestamp => // Setting the real start time from bootstrap
systemStart = Some(t)
stateMXBean.startTime = new Date(t.millis)
case Extensions(es) => // Extension registration
extensions = es
updateSystemState(checkInitState())
case r: CubeRegistration => // Cube registration requests, normally from bootstrap
cubes = cubes + (r.cubeSupervisor -> (r, None))
context.watch(r.cubeSupervisor)
// Sent from Bootstrap before Started signal to tell we have web services to start.
case PreStartWebService(listeners) =>
if (!servicesStarted) {
servicesStarted = true
serviceRegistry.prepListeners(listeners.keys)
}
case RegisterContext(listeners, webContext, serviceHandler, ps) =>
sender() ! Try { serviceRegistry.registerContext(listeners, webContext, serviceHandler, ps) }
case StartListener(name, conf) => // Sent from Bootstrap to start the web service infrastructure.
Try { serviceRegistry.startListener(name, conf, notifySender = sender()) } match {
case Success(startupBehavior) =>
context.become(startupBehavior orElse {
case HttpBindSuccess =>
if (serviceRegistry.isListenersBound) updateSystemState(checkInitState())
context.unbecome()
unstashAll()
case HttpBindFailed =>
updateSystemState(checkInitState())
context.unbecome()
unstashAll()
case _ => stash()
},
discardOld = false)
case Failure(t) => updateSystemState(checkInitState())
}
case Started => // Bootstrap startup and extension init done
updateSystemState(Initializing)
case Activate => // Bootstrap is done. Register for callback when system is active or failed. Remove afterwards
lifecycleListeners = lifecycleListeners :+ (sender(), Seq(Active, Failed), true)
activated = true
updateSystemState(checkInitState())
case ActivateTimedOut =>
// Deploy failFastStrategy for checking once activate times out.
checkInitState = failFastStrategy _
updateSystemState(checkInitState())
sender() ! systemState
case ir: InitReports => // Cubes initialized
updateCubes(ir)
case ReportStatus => // Status report request from admin tooling
if (systemState == Active) sender ! StatusReport(systemState, cubes, extensions)
else {
val requester = sender()
var pendingCubes = cubes collect {
case (actorRef, (_, None)) => actorRef
case (actorRef, (_, Some(InitReports(state, _)))) if state != Active => actorRef
}
if (pendingCubes.isEmpty) sender() ! StatusReport(systemState, cubes, extensions)
else {
pendingCubes foreach (_ ! CheckInitStatus)
val expected: Actor.Receive = {
case ReportStatus => stash() // Stash concurrent ReportStatus requests, handle everything else.
case (ir: InitReports, true) =>
updateCubes(ir)
pendingCubes = pendingCubes.filter(_ != sender())
if (pendingCubes.isEmpty) {
requester ! StatusReport(systemState, cubes, extensions)
unstashAll()
context.unbecome()
}
}
context.become(expected orElse receive, discardOld = false)
}
}
case SystemState =>
sender() ! systemState
case r: ObtainLifecycleEvents => // Registration of lifecycle listeners
lifecycleListeners = lifecycleListeners :+ (sender(), r.states, false)
case LifecycleTimesRequest => // Obtain all timestamps.
sender() ! LifecycleTimes(systemStart, systemStarted, systemActive, systemStop)
case PortBindings => // Obtain listener names and port bindings, mainly used for tests
sender() ! serviceRegistry.portBindings
}
def updateCubes(reports: InitReports) {
val reg = cubes get sender()
reg match {
case Some((registration, _)) =>
cubes = cubes + (sender() -> (registration, Some(reports)))
updateSystemState(checkInitState())
case _ =>
log.warning(s"""Received startup report from non-registered cube "${sender().path}".""")
}
}
def cubeStates: Iterable[LifecycleState] = {
val reportOptions = cubes.values map (_._2)
reportOptions map {
case None => Initializing
case Some(reports) => reports.state
}
}
val checkStateFailed: PartialFunction[Iterable[LifecycleState], LifecycleState] = {
case states if states exists (_ == Failed) =>
if (systemState != Failed) log.warning("Some cubes failed to initialize. Marking system state as Failed")
Failed
case _ if serviceRegistry.isAnyFailedToInitialize =>
if (systemState != Failed) log.warning("Some listeners failed to initialize. Marking system state as Failed")
Failed
case _ if extensions exists (_.exceptions.nonEmpty) =>
if (systemState != Failed) log.warning("Some extensions failed to initialize. Marking the system state as Failed")
Failed
}
val checkStateInitializing: PartialFunction[Iterable[LifecycleState], LifecycleState] = {
case states if states exists (_ == Initializing) => Initializing
case _ if pendingServiceStarts => Initializing
case _ if !activated => Initializing
}
val active = { _: Any => Active }
def failFastStrategy = checkStateFailed orElse checkStateInitializing applyOrElse (cubeStates, active)
def lenientStrategy = checkStateInitializing orElse checkStateFailed applyOrElse (cubeStates, active)
var checkInitState = lenientStrategy _
def pendingServiceStarts = servicesStarted && !serviceRegistry.isListenersBound
def updateSystemState(state: LifecycleState) {
if (state != systemState) {
systemState = state
state match { // Record and log the times.
case Initializing =>
systemStarted = Some(Timestamp(System.nanoTime, System.currentTimeMillis))
val elapsed = (systemStarted.get.nanos - systemStart.get.nanos) / 1000000
stateMXBean.initDuration = elapsed.asInstanceOf[Int]
log.info(s"squbs started in $elapsed milliseconds")
case Active =>
systemActive = Some(Timestamp(System.nanoTime, System.currentTimeMillis))
val elapsed = (systemActive.get.nanos - systemStart.get.nanos) / 1000000
stateMXBean.activationDuration = elapsed.asInstanceOf[Int]
log.info(s"squbs active in $elapsed milliseconds")
case Stopping =>
systemStop = Some(Timestamp(System.nanoTime, System.currentTimeMillis))
val elapsed = (systemStop.get.nanos - systemActive.getOrElse(systemStarted.get).nanos) / 1000000
log.info(s"squbs has been running for $elapsed milliseconds")
case Stopped =>
val current = Timestamp(System.nanoTime, System.currentTimeMillis)
val elapsed = (current.nanos - systemStop.get.nanos) / 1000000
log.info(s"squbs stopped in $elapsed milliseconds")
case _ =>
}
if (state != Stopped) // don't care about Stopped
lifecycleListeners = lifecycleListeners filterNot { case (actorRef, states, remove) =>
if (states.isEmpty || states.contains(state)) {
actorRef ! state // Send state to all listeners.
remove
} else false
}
}
}
}
class CubeSupervisor extends Actor with ActorLogging with GracefulStopHelper {
import context.dispatcher
import scala.collection.JavaConversions._
val cubeName = self.path.name
val actorErrorStatesAgent = Agent[Map[String, ActorErrorState]](Map())
implicit val timeout = UnicomplexBoot.defaultStartupTimeout
class CubeStateBean extends CubeStateMXBean {
override def getName: String = cubeName
override def getCubeState: String = cubeState.toString
override def getWellKnownActors: String = context.children.mkString(",")
override def getActorErrorStates: util.List[ActorErrorState] = actorErrorStatesAgent().values.toList
}
override def preStart() {
import org.squbs.unicomplex.JMX._
val cubeStateMXBean = new CubeStateBean
register(cubeStateMXBean, prefix + cubeStateName + cubeName)
}
override def postStop() {
import org.squbs.unicomplex.JMX._
unregister(prefix + cubeStateName + cubeName)
}
override val supervisorStrategy =
OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) {
case NonFatal(e) =>
val actorPath = sender().path.toStringWithoutAddress
log.warning(s"Received ${e.getClass.getName} with message ${e.getMessage} from $actorPath")
actorErrorStatesAgent.send{states =>
val stringWriter = new StringWriter()
e.printStackTrace(new PrintWriter(stringWriter))
val stackTrace = stringWriter.toString
val state = states.get(actorPath) match {
case Some(s) => s.copy(errorCount = s.errorCount + 1, latestException = stackTrace)
case _ => ActorErrorState(actorPath, 1, stackTrace)
}
states + (actorPath -> state)
}
Restart
}
private var cubeState: LifecycleState = Initializing
private var pendingContexts = 0
private var pendingNotifiees = Seq.empty[ActorRef]
private val initMap = mutable.HashMap.empty[ActorRef, Option[InitReport]]
private var maxChildTimeout = stopTimeout
Unicomplex() ! StopTimeout(maxChildTimeout * 2)
private val stopSet = mutable.Set.empty[ActorRef]
context.become(startupReceive orElse receive, discardOld = false)
case class ContextRegistrationResult(cubeActor: ActorRef, state: Try[RegisterContext])
def startupReceive: Actor.Receive = {
case StartCubeActor(props, name, initRequired) =>
val cubeActor = context.actorOf(props, name)
if (initRequired) initMap += cubeActor -> None
log.info(s"Started actor ${cubeActor.path}")
case StartCubeService(webContext, listeners, props, name, ps, initRequired)
if classOf[FlowSupplier].isAssignableFrom(props.actorClass) =>
val hostActor = context.actorOf(props, name)
initMap += hostActor -> None
pendingContexts += 1
(hostActor ? FlowRequest).mapTo[Try[Flow[RequestContext, RequestContext, NotUsed]]] onSuccess {
case Success(flow) =>
val reg = RegisterContext(listeners, webContext, FlowWrapper(flow, hostActor), ps)
(Unicomplex() ? reg).mapTo[Try[_]].map {
case Success(_) => ContextRegistrationResult(hostActor, Success(reg))
case Failure(t) => ContextRegistrationResult(hostActor, Failure(t))
} .pipeTo(self)
case Failure(e) =>
self ! ContextRegistrationResult(hostActor, Failure(e))
}
case StartCubeService(webContext, listeners, props, name, ps, initRequired) =>
val hostActor = context.actorOf(props, name)
import org.squbs.util.ConfigUtil._
val concurrency = context.system.settings.config.get[Int]("akka.http.server.pipelining-limit")
val flow = Flow[RequestContext].mapAsync(concurrency) { requestContext =>
(hostActor ? requestContext.request)
.collect { case response: HttpResponse => requestContext.copy(response = Some(Success(response))) }
.recover { case e => requestContext.copy(response = Some(Failure(e))) }
}
val wrapper = FlowWrapper(flow, hostActor)
if (initRequired && !(initMap contains hostActor)) initMap += hostActor -> None
val reg = RegisterContext(listeners, webContext, wrapper, ps)
pendingContexts += 1
(Unicomplex() ? reg).mapTo[Try[_]].map {
case Success(_) => ContextRegistrationResult(hostActor, Success(reg))
case Failure(t) => ContextRegistrationResult(hostActor, Failure(t))
} .pipeTo(self)
case ContextRegistrationResult(cubeActor, tr) =>
tr match {
case Failure(t) =>
initMap += cubeActor -> Some(Failure(t))
cubeState = Failed
Unicomplex() ! InitReports(cubeState, initMap.toMap)
case Success(reg) =>
log.info(s"Started service actor ${cubeActor.path} for context ${reg.webContext}")
}
pendingContexts -= 1
if (pendingContexts == 0 && pendingNotifiees.nonEmpty) {
pendingNotifiees.foreach(_ ! Started)
pendingNotifiees = Seq.empty
context.unbecome()
}
case StartFailure(t) =>
// Register the failure with a bogus noop actor so we have all failures.
// The real servicing actor was never created.
initMap += context.actorOf(Props[NoopActor]) -> Some(Failure(t))
cubeState = Failed
Unicomplex() ! InitReports(cubeState, initMap.toMap)
case Started => // Signals end of StartCubeActor messages. No more allowed after this.
if (initMap.isEmpty) {
cubeState = Active
Unicomplex() ! InitReports(cubeState, initMap.toMap)
}
if (pendingContexts == 0) {
sender() ! Started
context.unbecome()
}
else pendingNotifiees :+= sender()
}
def receive = {
case StopTimeout(timeout) =>
if (maxChildTimeout < timeout) {
maxChildTimeout = timeout
Unicomplex() ! StopTimeout(maxChildTimeout * 2)
}
stopSet += sender()
case GracefulStop => // The stop message should only come from the uniActor
if (sender() != Unicomplex())
log.error(s"got GracefulStop from ${sender()} instead of ${Unicomplex()}")
else
defaultMidActorStop(stopSet, maxChildTimeout)
case Initialized(report) =>
initMap get sender() match {
case Some(entry) => // Registered cube
entry match {
// First report or nothing previously marked as failure, just add/overwrite report
case None =>
initMap += sender() -> Some(report)
case Some(prevReport) if prevReport.isSuccess =>
initMap += sender() -> Some(report)
case _ => // There is some issue previously marked, usually a failure. Don't record new report.
// Only first failure should be recorded. Just leave it as failed. Don't touch.
}
// Check that all is initialized and whether it is all good.
if (!(initMap exists (_._2.isEmpty))) {
val finalMap = (initMap mapValues (_.get)).toMap
if (finalMap.exists(_._2.isFailure)) cubeState = Failed else cubeState = Active
Unicomplex() ! InitReports(cubeState, initMap.toMap)
}
case None => // Never registered cube
log.warning(s"""Actor "${sender().path}" updating startup status is not registered. """ +
"Please register by setting init-required = true in squbs-meta.conf")
}
case CheckInitStatus => // Explicitly requested reports have an attached requested flag as a tuple
sender() ! (InitReports(cubeState, initMap.toMap), true)
}
}
/**
* A noop actor used when a stub ActorRef is needed to register failures.
*/
private[unicomplex] class NoopActor extends Actor {
def receive = PartialFunction.empty
}
| SarathChandran/squbs | squbs-unicomplex/src/main/scala/org/squbs/unicomplex/Unicomplex.scala | Scala | apache-2.0 | 27,986 |
/*
* Copyright 2012 Eike Kettner
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eknet.publet.engine.convert
import org.eknet.publet.engine.PubletEngine
import org.eknet.publet.vfs.{Path, ContentResource, ContentType, Content}
import grizzled.slf4j.Logging
/**
*
* @author <a href="mailto:[email protected]">Eike Kettner</a>
* @since 29.03.12 12:46
*/
class DefaultConverterEngine(val name: Symbol) extends PubletEngine with ConverterEngine with ConverterRegistry with Logging {
def this() = this('convert)
def process(path: Path, data: ContentResource, target: ContentType): Option[Content] = {
//if target type is available return it, otherwise try to process
if (data.contentType == target) {
Some(data)
} else {
converterFor(data.contentType, target) match {
case None => {
error("no converter found: "+ data.contentType+" -> "+ target)
None
}
case Some(c) => Option(c(path, data))
}
}
}
}
| eikek/publet | publet/src/main/scala/org/eknet/publet/engine/convert/DefaultConverterEngine.scala | Scala | apache-2.0 | 1,520 |
/*
* ExampleTest.scala
* Tag for example tests.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test
import org.scalatest.Tag
object ExampleTest extends Tag("ExampleTest")
| wkretschmer/figaro | Figaro/src/test/scala/com/cra/figaro/test/ExampleTest.scala | Scala | bsd-3-clause | 465 |
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.query.QueryBuilderTest
import com.websudos.phantom.tables.BasicTable
import com.websudos.phantom.dsl._
import com.websudos.util.testing._
class SelectQuerySerialisationTest extends QueryBuilderTest {
"The select query builder" - {
"should serialize " - {
"serialise an allow filtering clause in the init position" in {
val id = gen[UUID]
val qb = BasicTable.select.where(_.id eqs id).allowFiltering().limit(5).queryString
qb shouldEqual s"SELECT * FROM phantom.BasicTable WHERE id = ${id.toString} LIMIT 5 ALLOW FILTERING;"
}
"serialize an allow filtering clause specified after a limit query" in {
val id = gen[UUID]
val qb = BasicTable.select.where(_.id eqs id).limit(5).allowFiltering().queryString
Console.println(qb)
qb shouldEqual s"SELECT * FROM phantom.BasicTable WHERE id = ${id.toString} LIMIT 5 ALLOW FILTERING;"
}
}
}
}
| analytically/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/SelectQuerySerialisationTest.scala | Scala | bsd-2-clause | 1,020 |
package darkpool.json
import java.util.UUID
import darkpool.models.orders._
import org.scalatest.{Matchers, FunSpec}
import darkpool.models.TradingJsonProtocol._
import spray.json._
class JsonConversionSpec extends FunSpec with Matchers {
describe("MarketOrder") {
it("can serialize and deserialize a market order") {
val marketOrderJson = """{ "orderType": "sell", "orderQuantity": 22, "orderId": "1eb5576d-d983-4073-9574-0d10de9a657a", "accountId": "21bff678-1963-4a2b-9121-6b4e514504df" }"""
val marketOrder = MarketOrder(
SellOrder,
22,
UUID.fromString("1eb5576d-d983-4073-9574-0d10de9a657a"),
UUID.fromString("21bff678-1963-4a2b-9121-6b4e514504df")
)
marketOrderJson.asJson.convertTo[MarketOrder] shouldBe marketOrder
marketOrderJson.asJson.convertTo[Order] shouldBe marketOrder
}
}
describe("LimitOrder") {
it("can serialize and deserialize a limit order") {
val limitOrderJson = """{ "orderType": "sell", "orderThreshold": 10.53, "orderQuantity": 22, "orderId": "1eb5576d-d983-4073-9574-0d10de9a657a", "accountId": "21bff678-1963-4a2b-9121-6b4e514504df" }"""
val limitOrder = LimitOrder(
SellOrder,
22,
10.53,
UUID.fromString("1eb5576d-d983-4073-9574-0d10de9a657a"),
UUID.fromString("21bff678-1963-4a2b-9121-6b4e514504df")
)
limitOrderJson.asJson.convertTo[LimitOrder] shouldBe limitOrder
limitOrderJson.asJson.convertTo[Order] shouldBe limitOrder
}
}
}
| film42/dark-pool | src/test/scala/darkpool/json/JsonConversionSpec.scala | Scala | mit | 1,525 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.livy.thriftserver.cli
import java.io.IOException
import java.net.{InetAddress, UnknownHostException}
import java.util
import java.util.Collections
import javax.security.auth.login.LoginException
import scala.collection.JavaConverters._
import com.google.common.base.Preconditions.checkArgument
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.security.authentication.util.KerberosName
import org.apache.hadoop.security.authorize.ProxyUsers
import org.apache.hadoop.util.StringUtils
import org.apache.hive.service.{ServiceException, ServiceUtils}
import org.apache.hive.service.auth.{HiveAuthConstants, TSetIpAddressProcessor}
import org.apache.hive.service.auth.HiveAuthConstants.AuthTypes
import org.apache.hive.service.cli._
import org.apache.hive.service.rpc.thrift._
import org.apache.thrift.TException
import org.apache.thrift.server.ServerContext
import org.apache.livy.LivyConf
import org.apache.livy.thriftserver.{LivyCLIService, LivyThriftServer, SessionInfo, ThriftService}
import org.apache.livy.thriftserver.auth.AuthFactory
/**
* This class is ported from Hive. We cannot reuse Hive's one because we need to use the
* `LivyCLIService`, `LivyConf` and `AuthFacotry` instead of Hive's one.
*/
abstract class ThriftCLIService(val cliService: LivyCLIService, val serviceName: String)
extends ThriftService(serviceName) with TCLIService.Iface with Runnable {
def hiveAuthFactory: AuthFactory
protected val currentServerContext = new ThreadLocal[ServerContext]
protected var portNum: Int = 0
protected var serverIPAddress: InetAddress = _
protected var hiveHost: String = _
private var isStarted: Boolean = false
protected var isEmbedded: Boolean = false
protected var livyConf: LivyConf = _
protected var minWorkerThreads: Int = 0
protected var maxWorkerThreads: Int = 0
protected var workerKeepAliveTime: Long = 0L
private var serverThread: Thread = _
override def init(conf: LivyConf): Unit = {
livyConf = conf
hiveHost = livyConf.get(LivyConf.THRIFT_BIND_HOST)
try {
if (hiveHost == null || hiveHost.isEmpty) {
serverIPAddress = InetAddress.getLocalHost
} else {
serverIPAddress = InetAddress.getByName(hiveHost)
}
} catch {
case e: UnknownHostException =>
throw new ServiceException(e)
}
portNum = livyConf.getInt(LivyConf.THRIFT_SERVER_PORT)
workerKeepAliveTime = livyConf.getTimeAsMs(LivyConf.THRIFT_WORKER_KEEPALIVE_TIME) / 1000
minWorkerThreads = livyConf.getInt(LivyConf.THRIFT_MIN_WORKER_THREADS)
maxWorkerThreads = livyConf.getInt(LivyConf.THRIFT_MAX_WORKER_THREADS)
super.init(livyConf)
}
protected def getKeyStorePassword(): String =
Option(livyConf.get(LivyConf.SSL_KEYSTORE_PASSWORD)).orElse {
val credentialProviderPath = livyConf.get(LivyConf.HADOOP_CREDENTIAL_PROVIDER_PATH)
val hadoopConf = new Configuration()
if (credentialProviderPath != null) {
hadoopConf.set("hadoop.security.credential.provider.path", credentialProviderPath)
}
Option(hadoopConf.getPassword(LivyConf.SSL_KEYSTORE_PASSWORD.key)).map(_.mkString)
}.getOrElse {
throw new IllegalArgumentException(
"Livy keystore password not configured for SSL connection")
}
protected def initServer(): Unit
override def start(): Unit = {
super.start()
if (!isStarted && !isEmbedded) {
initServer()
serverThread = new Thread(this)
serverThread.setName("Thrift Server")
serverThread.start()
isStarted = true
}
}
protected def stopServer(): Unit
override def stop(): Unit = {
if (isStarted && !isEmbedded) {
if (serverThread != null) {
serverThread.interrupt()
serverThread = null
}
stopServer()
isStarted = false
}
super.stop()
}
def getPortNumber: Int = portNum
def getServerIPAddress: InetAddress = serverIPAddress
@throws[TException]
override def GetDelegationToken(req: TGetDelegationTokenReq): TGetDelegationTokenResp = {
val resp: TGetDelegationTokenResp = new TGetDelegationTokenResp
if (!hiveAuthFactory.isSASLKerberosUser) {
resp.setStatus(unsecureTokenErrorStatus)
} else {
try {
val token = cliService.getDelegationToken(
new SessionHandle(req.getSessionHandle), hiveAuthFactory, req.getOwner, req.getRenewer)
resp.setDelegationToken(token)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: HiveSQLException =>
error("Error obtaining delegation token", e)
val tokenErrorStatus = HiveSQLException.toTStatus(e)
tokenErrorStatus.setSqlState("42000")
resp.setStatus(tokenErrorStatus)
}
}
resp
}
@throws[TException]
override def CancelDelegationToken(req: TCancelDelegationTokenReq): TCancelDelegationTokenResp = {
val resp: TCancelDelegationTokenResp = new TCancelDelegationTokenResp
if (!hiveAuthFactory.isSASLKerberosUser) {
resp.setStatus(unsecureTokenErrorStatus)
} else {
try {
cliService.cancelDelegationToken(
new SessionHandle(req.getSessionHandle), hiveAuthFactory, req.getDelegationToken)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: HiveSQLException =>
error("Error canceling delegation token", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
}
resp
}
@throws[TException]
override def RenewDelegationToken(req: TRenewDelegationTokenReq): TRenewDelegationTokenResp = {
val resp: TRenewDelegationTokenResp = new TRenewDelegationTokenResp
if (!hiveAuthFactory.isSASLKerberosUser) {
resp.setStatus(unsecureTokenErrorStatus)
} else {
try {
cliService.renewDelegationToken(
new SessionHandle(req.getSessionHandle), hiveAuthFactory, req.getDelegationToken)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: HiveSQLException =>
error("Error obtaining renewing token", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
}
resp
}
private def unsecureTokenErrorStatus: TStatus = {
val errorStatus: TStatus = new TStatus(TStatusCode.ERROR_STATUS)
errorStatus.setErrorMessage(
"Delegation token only supported over remote client with kerberos authentication")
errorStatus
}
@throws[TException]
override def OpenSession(req: TOpenSessionReq): TOpenSessionResp = {
info("Client protocol version: " + req.getClient_protocol)
val resp: TOpenSessionResp = new TOpenSessionResp
try {
val sessionHandle = getSessionHandle(req, resp)
resp.setSessionHandle(sessionHandle.toTSessionHandle)
val configurationMap: util.Map[String, String] = new util.HashMap[String, String]
// Set the updated fetch size from the server into the configuration map for the client
val defaultFetchSize =
Integer.toString(livyConf.getInt(LivyConf.THRIFT_RESULTSET_DEFAULT_FETCH_SIZE))
configurationMap.put(LivyConf.THRIFT_RESULTSET_DEFAULT_FETCH_SIZE.key, defaultFetchSize)
resp.setConfiguration(configurationMap)
resp.setStatus(ThriftCLIService.OK_STATUS)
Option(currentServerContext.get).foreach { context =>
context.asInstanceOf[ThriftCLIServerContext].setSessionHandle(sessionHandle)
}
} catch {
case e: Exception =>
warn("Error opening session: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def SetClientInfo(req: TSetClientInfoReq): TSetClientInfoResp = {
// TODO: We don't do anything for now, just log this for debugging.
// We may be able to make use of this later, e.g. for workload management.
if (req.isSetConfiguration) {
val sh = new SessionHandle(req.getSessionHandle)
val sb = new StringBuilder("Client information for ").append(sh).append(": ")
def processEntry(e: util.Map.Entry[String, String]): Unit = {
sb.append(e.getKey).append(" = ").append(e.getValue)
if ("ApplicationName" == e.getKey) {
cliService.setApplicationName(sh, e.getValue)
}
}
val entries = req.getConfiguration.entrySet.asScala.toSeq
try {
entries.headOption.foreach(processEntry)
entries.tail.foreach { e =>
sb.append(", ")
processEntry(e)
}
} catch {
case ex: Exception =>
warn("Error setting application name", ex)
return new TSetClientInfoResp(HiveSQLException.toTStatus(ex))
}
info(sb.toString())
}
new TSetClientInfoResp(ThriftCLIService.OK_STATUS)
}
private def getIpAddress: String = {
// Http transport mode.
// We set the thread local ip address, in ThriftHttpServlet.
val clientIpAddress = if (LivyThriftServer.isHTTPTransportMode(livyConf)) {
SessionInfo.getIpAddress
} else if (hiveAuthFactory.isSASLWithKerberizedHadoop) {
hiveAuthFactory.getIpAddress
} else {
// NOSASL
TSetIpAddressProcessor.getUserIpAddress
}
debug(s"Client's IP Address: $clientIpAddress")
clientIpAddress
}
/**
* Returns the effective username.
* 1. If livy.server.thrift.allow.user.substitution = false: the username of the connecting user
* 2. If livy.server.thrift.allow.user.substitution = true: the username of the end user,
* that the connecting user is trying to proxy for.
* This includes a check whether the connecting user is allowed to proxy for the end user.
*/
@throws[HiveSQLException]
@throws[IOException]
private def getUserName(req: TOpenSessionReq): String = {
val username = if (LivyThriftServer.isHTTPTransportMode(livyConf)) {
Option(SessionInfo.getUserName).getOrElse(req.getUsername)
} else if (hiveAuthFactory.isSASLWithKerberizedHadoop) {
Option(hiveAuthFactory.getRemoteUser).orElse(Option(TSetIpAddressProcessor.getUserName))
.getOrElse(req.getUsername)
} else {
Option(TSetIpAddressProcessor.getUserName).getOrElse(req.getUsername)
}
val effectiveClientUser =
getProxyUser(getShortName(username), req.getConfiguration, getIpAddress)
debug(s"Client's username: $effectiveClientUser")
effectiveClientUser
}
@throws[IOException]
private def getShortName(userName: String): String = {
Option(userName).map { un =>
if (hiveAuthFactory.isSASLKerberosUser) {
// KerberosName.getShorName can only be used for kerberos user
new KerberosName(un).getShortName
} else {
val indexOfDomainMatch = ServiceUtils.indexOfDomainMatch(un)
if (indexOfDomainMatch <= 0) {
un
} else {
un.substring(0, indexOfDomainMatch)
}
}
}.orNull
}
/**
* Create a session handle
*/
@throws[HiveSQLException]
@throws[LoginException]
@throws[IOException]
private[thriftserver] def getSessionHandle(
req: TOpenSessionReq, res: TOpenSessionResp): SessionHandle = {
val userName = getUserName(req)
val ipAddress = getIpAddress
val protocol = getMinVersion(LivyCLIService.SERVER_VERSION, req.getClient_protocol)
val sessionHandle =
if (livyConf.getBoolean(LivyConf.THRIFT_ENABLE_DOAS) && (userName != null)) {
cliService.openSessionWithImpersonation(
protocol, userName, req.getPassword, ipAddress, req.getConfiguration, null)
} else {
cliService.openSession(protocol, userName, req.getPassword, ipAddress, req.getConfiguration)
}
res.setServerProtocolVersion(protocol)
sessionHandle
}
@throws[HiveSQLException]
private def getProgressedPercentage(opHandle: OperationHandle): Double = {
checkArgument(OperationType.EXECUTE_STATEMENT == opHandle.getOperationType)
0.0
}
private def getMinVersion(versions: TProtocolVersion*): TProtocolVersion = {
val values = TProtocolVersion.values
var current = values(values.length - 1).getValue
versions.foreach { version =>
if (current > version.getValue) {
current = version.getValue
}
}
val res = values.find(_.getValue == current)
assert(res.isDefined)
res.get
}
@throws[TException]
override def CloseSession(req: TCloseSessionReq): TCloseSessionResp = {
val resp = new TCloseSessionResp
try {
val sessionHandle = new SessionHandle(req.getSessionHandle)
cliService.closeSession(sessionHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
Option(currentServerContext.get).foreach { ctx =>
ctx.asInstanceOf[ThriftCLIServerContext].setSessionHandle(null)
}
} catch {
case e: Exception =>
warn("Error closing session: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetInfo(req: TGetInfoReq): TGetInfoResp = {
val resp = new TGetInfoResp
try {
val getInfoValue = cliService.getInfo(
new SessionHandle(req.getSessionHandle), GetInfoType.getGetInfoType(req.getInfoType))
resp.setInfoValue(getInfoValue.toTGetInfoValue)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting info: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def ExecuteStatement(req: TExecuteStatementReq): TExecuteStatementResp = {
val resp = new TExecuteStatementResp
try {
val sessionHandle = new SessionHandle(req.getSessionHandle)
val statement = req.getStatement
val confOverlay = req.getConfOverlay
val runAsync = req.isRunAsync
val queryTimeout = req.getQueryTimeout
val operationHandle = if (runAsync) {
cliService.executeStatementAsync(sessionHandle, statement, confOverlay, queryTimeout)
} else {
cliService.executeStatement(sessionHandle, statement, confOverlay, queryTimeout)
}
resp.setOperationHandle(operationHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error executing statement: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetTypeInfo(req: TGetTypeInfoReq): TGetTypeInfoResp = {
val resp = new TGetTypeInfoResp
try {
val operationHandle = cliService.getTypeInfo(createSessionHandle(req.getSessionHandle))
resp.setOperationHandle(operationHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting type info: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetCatalogs(req: TGetCatalogsReq): TGetCatalogsResp = {
val resp = new TGetCatalogsResp
try {
val opHandle = cliService.getCatalogs(createSessionHandle(req.getSessionHandle))
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting catalogs: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetSchemas(req: TGetSchemasReq): TGetSchemasResp = {
val resp = new TGetSchemasResp
try {
val opHandle = cliService.getSchemas(createSessionHandle(req.getSessionHandle),
req.getCatalogName, req.getSchemaName)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting schemas: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetTables(req: TGetTablesReq): TGetTablesResp = {
val resp = new TGetTablesResp
try {
val opHandle = cliService.getTables(
createSessionHandle(req.getSessionHandle),
req.getCatalogName,
req.getSchemaName,
req.getTableName,
req.getTableTypes)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting tables: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetTableTypes(req: TGetTableTypesReq): TGetTableTypesResp = {
val resp = new TGetTableTypesResp
try {
val opHandle = cliService.getTableTypes(createSessionHandle(req.getSessionHandle))
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting table types: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetColumns(req: TGetColumnsReq): TGetColumnsResp = {
val resp = new TGetColumnsResp
try {
val opHandle = cliService.getColumns(
createSessionHandle(req.getSessionHandle),
req.getCatalogName,
req.getSchemaName,
req.getTableName,
req.getColumnName)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting columns: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetFunctions(req: TGetFunctionsReq): TGetFunctionsResp = {
val resp = new TGetFunctionsResp
try {
val opHandle = cliService.getFunctions(
createSessionHandle(req.getSessionHandle),
req.getCatalogName,
req.getSchemaName,
req.getFunctionName)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting functions: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetOperationStatus(req: TGetOperationStatusReq): TGetOperationStatusResp = {
val resp = new TGetOperationStatusResp
val operationHandle = new OperationHandle(req.getOperationHandle)
try {
val operationStatus = cliService.getOperationStatus(operationHandle, req.isGetProgressUpdate)
resp.setOperationState(operationStatus.state.toTOperationState)
resp.setErrorMessage(operationStatus.state.getErrorMessage)
val opException = operationStatus.operationException
resp.setOperationStarted(operationStatus.operationStarted)
resp.setOperationCompleted(operationStatus.operationCompleted)
resp.setHasResultSet(operationStatus.hasResultSet)
val executionStatus = TJobExecutionStatus.NOT_AVAILABLE
resp.setProgressUpdateResponse(new TProgressUpdateResp(
Collections.emptyList[String],
Collections.emptyList[util.List[String]],
0.0D,
executionStatus,
"",
0L))
if (opException != null) {
resp.setSqlState(opException.getSQLState)
resp.setErrorCode(opException.getErrorCode)
if (opException.getErrorCode == 29999) {
resp.setErrorMessage(StringUtils.stringifyException(opException))
} else {
resp.setErrorMessage(opException.getMessage)
}
} else if (OperationType.EXECUTE_STATEMENT == operationHandle.getOperationType) {
resp.getProgressUpdateResponse.setProgressedPercentage(
getProgressedPercentage(operationHandle))
}
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting operation status: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def CancelOperation(req: TCancelOperationReq): TCancelOperationResp = {
val resp = new TCancelOperationResp
try {
cliService.cancelOperation(new OperationHandle(req.getOperationHandle))
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error cancelling operation: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def CloseOperation(req: TCloseOperationReq): TCloseOperationResp = {
val resp = new TCloseOperationResp
try {
cliService.closeOperation(new OperationHandle(req.getOperationHandle))
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error closing operation: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetResultSetMetadata(req: TGetResultSetMetadataReq): TGetResultSetMetadataResp = {
val resp = new TGetResultSetMetadataResp
try {
val schema = cliService.getResultSetMetadata(new OperationHandle(req.getOperationHandle))
resp.setSchema(schema.toTTableSchema)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting result set metadata: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def FetchResults(req: TFetchResultsReq): TFetchResultsResp = {
val resp = new TFetchResultsResp
try {
// Set fetch size
val maxFetchSize = livyConf.getInt(LivyConf.THRIFT_RESULTSET_MAX_FETCH_SIZE)
if (req.getMaxRows > maxFetchSize) {
req.setMaxRows(maxFetchSize)
}
val rowSet = cliService.fetchResults(
new OperationHandle(req.getOperationHandle),
FetchOrientation.getFetchOrientation(req.getOrientation),
req.getMaxRows,
FetchType.getFetchType(req.getFetchType))
resp.setResults(rowSet.toTRowSet)
resp.setHasMoreRows(false)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error fetching results: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetPrimaryKeys(req: TGetPrimaryKeysReq): TGetPrimaryKeysResp = {
val resp = new TGetPrimaryKeysResp
try {
val opHandle = cliService.getPrimaryKeys(
new SessionHandle(req.getSessionHandle),
req.getCatalogName,
req.getSchemaName,
req.getTableName)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting functions: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetCrossReference(req: TGetCrossReferenceReq): TGetCrossReferenceResp = {
val resp = new TGetCrossReferenceResp
try {
val opHandle = cliService.getCrossReference(
new SessionHandle(req.getSessionHandle),
req.getParentCatalogName,
req.getParentSchemaName,
req.getParentTableName,
req.getForeignCatalogName,
req.getForeignSchemaName,
req.getForeignTableName)
resp.setOperationHandle(opHandle.toTOperationHandle)
resp.setStatus(ThriftCLIService.OK_STATUS)
} catch {
case e: Exception =>
warn("Error getting functions: ", e)
resp.setStatus(HiveSQLException.toTStatus(e))
}
resp
}
@throws[TException]
override def GetQueryId(req: TGetQueryIdReq): TGetQueryIdResp = {
try {
new TGetQueryIdResp(cliService.getQueryId(req.getOperationHandle))
} catch {
case e: HiveSQLException => throw new TException(e)
}
}
override def run(): Unit
/**
* If the proxy user name is provided then check privileges to substitute the user.
*/
@throws[HiveSQLException]
private def getProxyUser(
realUser: String,
sessionConf: util.Map[String, String],
ipAddress: String): String = {
var proxyUser: String = null
// We set the thread local proxy username, in ThriftHttpServlet.
if (livyConf.get(LivyConf.THRIFT_TRANSPORT_MODE).equalsIgnoreCase("http")) {
proxyUser = SessionInfo.getProxyUserName
debug("Proxy user from query string: " + proxyUser)
}
if (proxyUser == null && sessionConf != null &&
sessionConf.containsKey(HiveAuthConstants.HS2_PROXY_USER)) {
val proxyUserFromThriftBody = sessionConf.get(HiveAuthConstants.HS2_PROXY_USER)
debug("Proxy user from thrift body: " + proxyUserFromThriftBody)
proxyUser = proxyUserFromThriftBody
}
if (proxyUser == null) return realUser
// check whether substitution is allowed
if (!livyConf.getBoolean(LivyConf.THRIFT_ALLOW_USER_SUBSTITUTION)) {
throw new HiveSQLException("Proxy user substitution is not allowed")
}
// If there's no authentication, then directly substitute the user
if (AuthTypes.NONE.toString.equalsIgnoreCase(livyConf.get(LivyConf.THRIFT_AUTHENTICATION))) {
return proxyUser
}
// Verify proxy user privilege of the realUser for the proxyUser
verifyProxyAccess(realUser, proxyUser, ipAddress)
debug("Verified proxy user: " + proxyUser)
proxyUser
}
@throws[HiveSQLException]
private def verifyProxyAccess(realUser: String, proxyUser: String, ipAddress: String): Unit = {
try {
val sessionUgi = if (UserGroupInformation.isSecurityEnabled) {
UserGroupInformation.createProxyUser(
new KerberosName(realUser).getServiceName, UserGroupInformation.getLoginUser)
} else {
UserGroupInformation.createRemoteUser(realUser)
}
if (!proxyUser.equalsIgnoreCase(realUser)) {
ProxyUsers.refreshSuperUserGroupsConfiguration()
ProxyUsers.authorize(UserGroupInformation.createProxyUser(proxyUser, sessionUgi), ipAddress)
}
} catch {
case e: IOException =>
throw new HiveSQLException(
s"Failed to validate proxy privilege of $realUser for $proxyUser", "08S01", e)
}
}
private def createSessionHandle(tHandle: TSessionHandle): SessionHandle = {
val protocolVersion = cliService.getSessionManager
.getSessionInfo(new SessionHandle(tHandle))
.protocolVersion
new SessionHandle(tHandle, protocolVersion)
}
}
object ThriftCLIService {
private val OK_STATUS: TStatus = new TStatus(TStatusCode.SUCCESS_STATUS)
}
private[thriftserver] class ThriftCLIServerContext extends ServerContext {
private var sessionHandle: SessionHandle = _
def setSessionHandle(sessionHandle: SessionHandle): Unit = {
this.sessionHandle = sessionHandle
}
def getSessionHandle: SessionHandle = sessionHandle
}
| ajbozarth/incubator-livy | thriftserver/server/src/main/scala/org/apache/livy/thriftserver/cli/ThriftCLIService.scala | Scala | apache-2.0 | 27,629 |
package com.github.chengpohi.infrastructure
import com.github.chengpohi.infrastructure.utils.HttpSupport
import com.github.chengpohi.infrastructure.web.WebSecurity
trait BaseController extends HttpSupport with WebSecurity {
repository: Repository =>
}
| chengpohi/coolmarks | src/main/scala/com/github/chengpohi/infrastructure/BaseController.scala | Scala | apache-2.0 | 256 |
package korolev.server
import korolev.Async
import scala.collection.concurrent.TrieMap
import scala.language.higherKinds
/**
* @author Aleksey Fomkin <[email protected]>
*/
abstract class StateStorage[F[+_]: Async, T] {
import StateStorage.{DeviceId, SessionId}
/**
* Initialize a new state for a new session under the device
* @param deviceId Identifier of device
* @return Future with new state
*/
def initial(deviceId: DeviceId): F[T]
/**
* Restore session from storage on initialize a new one
* @return Future with result if session
* already exists or future
* with None with if doesn't
*/
def read(deviceId: DeviceId, sessionId: SessionId): F[T]
/**
* Save session to storage
* @return Future of successful saving
*/
def write(deviceId: DeviceId, sessionId: SessionId, value: T): F[T]
}
object StateStorage {
type DeviceId = String
type SessionId = String
/**
* Initialize simple in-memory storage (based on TrieMap).
* @param initialState State factory
* @tparam T Type of state
* @return The state storage
*/
def default[F[+_]: Async, T](initialState: => T): StateStorage[F, T] = new StateStorage[F, T] {
val storage = TrieMap.empty[String, T]
def read(deviceId: DeviceId, sessionId: SessionId): F[T] = {
val state = storage.getOrElseUpdate(deviceId + sessionId, initialState)
Async[F].pure(state)
}
def write(deviceId: String, sessionId: String, value: T): F[T] = {
storage.put(deviceId + sessionId, value)
Async[F].pure(value)
}
def initial(deviceId: String): F[T] = Async[F].pure(initialState)
}
}
| PhilAndrew/JumpMicro | JMSangriaGraphql/src/main/scala/korolev/server/StateStorage.scala | Scala | mit | 1,690 |
// scalac: -Xfatal-warnings
//
object OhNoes {
sealed trait F
sealed abstract class FA extends F
sealed abstract class FB extends F
case object FA1 extends FA
case object FB1 extends FB
case object FB2 extends FB
sealed trait G
case object G1 extends G
case object G2 extends G
sealed trait H
case class H1(a: FB, b: G) extends H
case class H2(a: F) extends H
val demo: H => Unit = {
case H1(FB1, G1) =>
case H1(FB2, G2) =>
case H2(_: FB) =>
case H2(_: FA) =>
case H1(FB1, G2) =>
case H1(FB2, G1) =>
}
}
| lrytz/scala | test/files/pos/t9411a.scala | Scala | apache-2.0 | 568 |
package org.openapitools.client.api
import argonaut._
import argonaut.EncodeJson._
import argonaut.DecodeJson._
import org.http4s.{EntityDecoder, EntityEncoder}
import org.http4s.argonaut._
import org.joda.time.DateTime
import ResponseTimeMonitorData._
case class ResponseTimeMonitorData (
`class`: Option[String],
timestamp: Option[Integer],
average: Option[Integer])
object ResponseTimeMonitorData {
import DateTimeCodecs._
implicit val ResponseTimeMonitorDataCodecJson: CodecJson[ResponseTimeMonitorData] = CodecJson.derive[ResponseTimeMonitorData]
implicit val ResponseTimeMonitorDataDecoder: EntityDecoder[ResponseTimeMonitorData] = jsonOf[ResponseTimeMonitorData]
implicit val ResponseTimeMonitorDataEncoder: EntityEncoder[ResponseTimeMonitorData] = jsonEncoderOf[ResponseTimeMonitorData]
}
| cliffano/swaggy-jenkins | clients/scalaz/generated/src/main/scala/org/openapitools/client/api/ResponseTimeMonitorData.scala | Scala | mit | 814 |
package ml.wolfe.term
import ml.wolfe.term
/**
* @author sameer
* @since 4/9/15.
trait Marginalizer {
val input: Settings
val inputMsgs: Msgs
val outputMsgs: Msgs
def margs()(implicit execution: Execution)
}
trait MarginalizerFactory {
def marginalizer(term: DoubleTerm, wrt: Seq[Var[Dom]])(obs: Settings, msgs: Msgs): Marginalizer
}
object Marginalizer {
def sumProduct(implicit params: MaxProductParameters) = new ArgmaxerFactory {
def argmaxer(term: DoubleTerm, wrt: Seq[Var[Dom]])(obs: Settings, msgs: Msgs) =
new MaxProductBP(term, wrt, obs, msgs)(params)
}
}
/**
* @author sameer
*/
class ExhaustiveSearchMarginalizer(val obj: DoubleTerm, val wrt: Seq[Var[Dom]], val observed: Seq[Var[Dom]],
val input: Settings, val inputMsgs: Msgs) extends Marginalizer {
require(wrt.forall(_.domain.isDiscrete), "Cannot do exhaustive search over continuous domains")
val target = obj.vars.filterNot(v => wrt.contains(v) || observed.contains(v))
val varyingVars = (wrt ++ target).distinct
val settingsToVary = Settings.fromSeq(varyingVars.map(_.domain.createSetting()))
val objInput = obj.createInputSettings()
val toVary2wrt = VariableMapping(varyingVars, wrt)
val toVary2target = VariableMapping(varyingVars, target)
val toVary2obj = VariableMapping(varyingVars, obj.vars)
val obs2full = VariableMapping(observed, obj.vars)
val allSettings = new term.AllSettings(varyingVars.map(_.domain).toIndexedSeq, settingsToVary)(_ => {})
//link varying settings and observed settings to the input settings of the body evaluator
toVary2obj.linkTargetsToSource(settingsToVary, objInput)
obs2full.linkTargetsToSource(input, objInput)
val objEval = obj.evaluatorImpl(objInput)
val outputMsgs = Msgs(target.map(_.domain.createZeroMsg()))
def margs()(implicit execution: Execution) = {
for (i <- 0 until outputMsgs.length) outputMsgs(i) := Double.NegativeInfinity
allSettings.loopSettings { settings =>
objEval.eval()
//add penalties from incoming messages based on current setting
var penalized = objEval.output.cont(0)
for ((toVaryIndex, wrtIndex) <- toVary2wrt.pairs) {
val currentSetting = settings(toVaryIndex)
for (i <- 0 until inputMsgs(wrtIndex).disc.length) {
val currentValue = currentSetting.disc(i)
val currentMsg = inputMsgs(wrtIndex).disc(i).msg(currentValue)
penalized += currentMsg
}
}
//now update outgoing messages with the max of their current value and the new score
for ((toVaryIndex, targetIndex) <- toVary2target.pairs) {
val currentSetting = settings(toVaryIndex)
for (i <- 0 until outputMsgs(targetIndex).disc.length) {
val currentValue = currentSetting.disc(i)
val tgt = outputMsgs(targetIndex).disc(i)
tgt.msg(currentValue) = (tgt.msg(currentValue) + penalized)
}
}
}
}
}
*/
| wolfe-pack/wolfe | wolfe-core/src/main/scala/ml/wolfe/term/Marginals.scala | Scala | apache-2.0 | 2,963 |
/*
Copyright (c) 2016, Rice University
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of Rice University
nor the names of its contributors may be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.cl._
import Array._
import scala.math._
import org.apache.spark.rdd._
import java.net._
object SparkSimple {
def main(args : Array[String]) {
if (args.length < 1) {
println("usage: SparkSimple cmd")
return;
}
val cmd = args(0)
if (cmd == "convert") {
convert(args.slice(1, args.length))
} else if (cmd == "run") {
run_simple(args.slice(2, args.length), args(1).toBoolean)
} else if (cmd == "check") {
val correct : Array[Int] = run_simple(args.slice(1, args.length), false)
val actual : Array[Int] = run_simple(args.slice(1, args.length), true)
assert(correct.length == actual.length)
for (i <- 0 until correct.length) {
val a : Int = correct(i)
val b : Int = actual(i)
var error : Boolean = false
if (a != b) {
System.err.println(i + " expected " + a + " but got " + b)
error = true
}
if (error) System.exit(1)
}
System.err.println("PASSED")
}
}
def get_spark_context(appName : String) : SparkContext = {
val conf = new SparkConf()
conf.setAppName(appName)
val localhost = InetAddress.getLocalHost
conf.setMaster("spark://" + localhost.getHostName + ":7077") // 7077 is the default port
return new SparkContext(conf)
}
def run_simple(args : Array[String], useSwat : Boolean) : Array[Int] = {
if (args.length != 1) {
println("usage: SparkSimple run input-path");
return new Array[Int](0);
}
val sc = get_spark_context("Spark Simple");
val inputPath = args(0)
val inputs_raw : RDD[Int] = sc.objectFile[Int](inputPath).cache
val inputs = if (useSwat) CLWrapper.cl[Int](inputs_raw) else inputs_raw
val arr : Array[Int] = Array(1, 2, 3, 4, 5)
val broadcasted = sc.broadcast(arr)
val outputs : RDD[Int] = inputs.map(v => {
var sum = v
var i = 0
while (i < 5) {
sum += broadcasted.value(i)
i += 1
}
sum
})
val outputs2 : Array[Int] = outputs.collect
broadcasted.unpersist
sc.stop
outputs2
}
def convert(args : Array[String]) {
if (args.length != 2) {
println("usage: SparkSimple convert input-dir output-dir");
return
}
val sc = get_spark_context("Spark KMeans Converter");
val inputDir = args(0)
var outputDir = args(1)
val input = sc.textFile(inputDir)
val converted = input.map(line => {
line.toInt })
converted.saveAsObjectFile(outputDir)
}
}
| agrippa/spark-swat | functional-tests/broadcast-var/src/main/scala/dense-vector-input/SparkSimple.scala | Scala | bsd-3-clause | 4,508 |
object CommandLineArguments {
def main(args: Array[String]) {
val size = args.length
println(s"$size arguments were provided:")
args.foreach(println(_))
}
} | DWiechert/rosetta-stone | basics/command-line-arguments/scala/CommandLineArguments.scala | Scala | apache-2.0 | 178 |
package typeprogramming
/**
* Created by ariwaranosai on 16/3/25.
*
*/
object SKI {
def main(args: Array[String]) {
trait Term {
type ap[x <: Term] <: Term
type eval <: Term
}
// The S combinator
trait S extends Term {
type ap[x <: Term] = S1[x]
type eval = S
}
trait S1[x <: Term] extends Term {
type ap[y <: Term] = S2[x, y]
type eval = S1[x]
}
trait S2[x <: Term, y <: Term] extends Term {
type ap[z <: Term] = S3[x, y, z]
type eval = S2[x, y]
}
trait S3[x <: Term, y <: Term, z <: Term] extends Term {
type ap[v <: Term] = eval#ap[v]
type eval = x#ap[z]#ap[y#ap[z]]#eval
}
// The K combinator
trait K extends Term {
type ap[x <: Term] = K1[x]
type eval = K
}
trait K1[x <: Term] extends Term {
type ap[y <: Term] = K2[x, y]
type eval = K1[x]
}
trait K2[x <: Term, y <: Term] extends Term {
type ap[z <: Term] = eval#ap[z]
type eval = x#eval
}
// The I combinator
trait I extends Term {
type ap[x <: Term] = I1[x]
type eval = I
}
trait I1[x <: Term] extends Term {
type ap[y <: Term] = eval#ap[y]
type eval = x#eval
}
trait c extends Term {
type ap[x <: Term] = c
type eval = c
}
trait d extends Term {
type ap[x <: Term] = d
type eval = d
}
trait e extends Term {
type ap[x <: Term] = e
type eval = e
}
case class Equals[A >: B <:B , B]()
Equals[Int, Int] // compiles fine
// Equals[String, Int] // won't compile
// Ic -> c
Equals[I#ap[c]#eval, c]
// Kcd -> c
Equals[K#ap[c]#ap[d]#eval, c]
// KKcde -> d
Equals[K#ap[K]#ap[c]#ap[d]#ap[e]#eval, d]
// SIIIc -> Ic
Equals[S#ap[I]#ap[I]#ap[I]#ap[c]#eval, c]
// SKKc -> Ic
Equals[S#ap[K]#ap[K]#ap[c]#eval, c]
// SIIKc -> KKc
Equals[S#ap[I]#ap[I]#ap[K]#ap[c]#eval, K#ap[K]#ap[c]#eval]
// SIKKc -> K(KK)c
Equals[S#ap[I]#ap[K]#ap[K]#ap[c]#eval, K#ap[K#ap[K]]#ap[c]#eval]
// SIKIc -> KIc
Equals[S#ap[I]#ap[K]#ap[I]#ap[c]#eval, K#ap[I]#ap[c]#eval]
// SKIc -> Ic
Equals[S#ap[K]#ap[I]#ap[c]#eval, c]
// R = S(K(SI))K (reverse)
type R = S#ap[K#ap[S#ap[I]]]#ap[K]
Equals[R#ap[c]#ap[d]#eval, d#ap[c]#eval]
type b[a <: Term] = S#ap[K#ap[a]]#ap[S#ap[I]#ap[I]]
trait A0 extends Term {
type ap[x <: Term] = c
type eval = A0
}
trait A1 extends Term {
type ap[x <: Term] = x#ap[A0]#eval
type eval = A1
}
trait A2 extends Term {
type ap[x <: Term] = x#ap[A1]#eval
type eval = A2
}
// Single iteration
type NN1 = b[R]#ap[b[R]]#ap[A0]
Equals[NN1#eval, c]
// Double iteration
type NN2 = b[R]#ap[b[R]]#ap[A1]
Equals[NN2#eval, c]
// Triple iteration
type NN3 = b[R]#ap[b[R]]#ap[A2]
Equals[NN3#eval, c]
trait An extends Term {
type ap[x <: Term] = x#ap[An]#eval
type eval = An
}
// Infinite iteration: Smashes scalac's stack
}
}
| ariwaranosai/scalaLab | src/main/scala/typeprogramming/SKI.scala | Scala | bsd-2-clause | 3,586 |
package io.surfkit.clientlib.webrtc
import org.scalajs.dom.experimental.webrtc._
import org.scalajs.dom.experimental.mediastream._
import org.scalajs.dom.raw.AnalyserNode
import org.scalajs.dom.AudioContext
import org.scalajs.dom
/**
* Created by coreyauger on 25/11/15.
*/
class GainController(val stream:MediaStream) {
val context = new AudioContext
val microphone = context.createMediaStreamSource(stream)
val gainFilter = context.createGain()
val destination = context.createMediaStreamDestination()
val outputStream = destination.stream
microphone.connect(this.gainFilter)
gainFilter.connect(this.destination)
stream.addTrack(outputStream.getAudioTracks()(0))
stream.removeTrack(stream.getAudioTracks()(0))
def setGain(gain:Double):Unit = gainFilter.gain.value = gain
def gain():Double = gainFilter.gain.value
def off():Unit = gainFilter.gain.value = 0
def on():Unit = gainFilter.gain.value = 1
}
| coreyauger/scala-js-webrtc | src/main/scala/io/surfkit/clientlib/webrtc/GainController.scala | Scala | mit | 936 |
package com.gx.strategy.oo
/**
* Copyright 2017 josephguan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
object App extends App {
val sorter = new Sorter(new QuickSortStrategy)
val sortedList = sorter.sort(List(5, 4, 3, 2, 1))
println(sortedList)
// case class Box(i: Int) extends Ordered[Box] {
// override def compare(that: Box): Int = i - that.i
// }
// println(sorter.sort(List(Box(1), Box(3), Box(2))))
}
| josephguan/scala-design-patterns | behavioral/strategy/src/main/scala/com/gx/strategy/oo/App.scala | Scala | apache-2.0 | 964 |
package upickle
import upickle.core.Visitor
import upickle.implicits.MacroImplicits
trait MsgReadWriters extends upickle.core.Types with MacroImplicits{
implicit val MsgValueR: Reader[upack.Msg] = new Reader.Delegate(upack.Msg)
implicit val MsgValueW: Writer[upack.Msg] = new Writer[upack.Msg] {
def write0[R](out: Visitor[_, R], v: upack.Msg): R = upack.transform(v, out)
}
} | lihaoyi/upickle-pprint | upickle/src/upickle/MsgReadWriters.scala | Scala | mit | 389 |
package models
import java.security.MessageDigest
import java.sql.Timestamp
import java.util.Date
import utils.Utils
import scala.concurrent.Future
import models.Models._
import slick.driver.MySQLDriver.api._
import play.api.libs.concurrent.Execution.Implicits.defaultContext
/**
* Created by pnagarjuna on 27/05/15.
*/
object DAO {
def init() = {
import Tables._
val init = DBIO.seq((users.schema ++ entities.schema).create)
DB.db.run(init)
}
def getUser(userId: String): Future[User] = {
val q = for(user <- Tables.users.filter(_.userId === userId)) yield user
DB.db.run(q.result).map(_ head)
}
def getUserWithEmail(email: String): Future[User] = {
val q = for(user <- Tables.users.filter(_.email === email)) yield user
DB.db.run(q.result).map(_ head)
}
def auth(email: String, password: String): Future[Boolean] = {
val q = for(user <- Tables.users.filter(_.email === email).filter(_.password === password)) yield user
DB.db.run(q.exists.result)
}
def getAuthUser(email: String, password: String): Future[User] = {
val q = for(user <- Tables.users.filter(_.email === email).filter(_.password === password)) yield user
DB.db.run(q.result).map(_ head)
}
def exists(email: String): Future[Boolean] = {
val q = for(user <- Tables.users.filter(_.email === email)) yield user
DB.db.run(q.exists.result)
}
def createUser(email: String, password: String): Future[Unit] = {
val timestamp = new Timestamp(new Date().getTime())
val q = DBIO.seq(Tables.users += User(s"${Utils.randomStr}${timestamp.getTime}", email, password, timestamp))
DB.db.run(q.transactionally)
}
}
| pamu/engage | app/models/DAO.scala | Scala | apache-2.0 | 1,663 |
package io.peregrine.config
import com.twitter.app.GlobalFlag
object env {
private[this] val environment = System.getenv("PEREGRINE_ENV") match {
case e: String => e
case _ => "development"
}
def apply(): String = environment
}
object port extends GlobalFlag[String](":5000", "Http Port")
object sslPort extends GlobalFlag[String](":5043", "Https Port")
object pidEnabled extends GlobalFlag[Boolean](false, "whether to write pid file")
object pidPath extends GlobalFlag[String]("", "path to pid file")
object logPath extends GlobalFlag[String]("logs/peregrine.log", "path to log")
object logLevel extends GlobalFlag[String]("INFO", "log level")
object logNode extends GlobalFlag[String]("peregrine", "Logging node")
object templatePath extends GlobalFlag[String]("/views", "path to templates")
object assetPath extends GlobalFlag[String]("/public", "path to assets")
object assetsPathPrefix extends GlobalFlag[String]("/assets/", "the prefix used to prefix assets url")
object docRoot extends GlobalFlag[String]("src/main/resources", "path to docroot")
object maxRequestSize extends GlobalFlag[Int](5, "maximum request size (in megabytes)")
object certificatePath extends GlobalFlag[String]("", "path to SSL certificate")
object keyPath extends GlobalFlag[String]("", "path to SSL key")
object showDirectories extends GlobalFlag[Boolean](false, "allow directory view in asset path")
object debugAssets extends GlobalFlag[Boolean](false, "enable to show assets requests in logs")
| pairi/pairi | src/main/scala/io/peregrine/config/configs.scala | Scala | apache-2.0 | 1,504 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package xml
import scala.collection.Seq
/**
* A document information item (according to InfoSet spec). The comments
* are copied from the Infoset spec, only augmented with some information
* on the Scala types for definitions that might have no value.
* Also plays the role of an `XMLEvent` for pull parsing.
*
* @author Burak Emir
*/
@SerialVersionUID(-2289320563321795109L)
class Document extends NodeSeq with Serializable {
/**
* An ordered list of child information items, in document
* order. The list contains exactly one element information item. The
* list also contains one processing instruction information item for
* each processing instruction outside the document element, and one
* comment information item for each comment outside the document
* element. Processing instructions and comments within the DTD are
* excluded. If there is a document type declaration, the list also
* contains a document type declaration information item.
*/
var children: Seq[Node] = _
/** The element information item corresponding to the document element. */
var docElem: Node = _
/** The dtd that comes with the document, if any */
var dtd: scala.xml.dtd.DTD = _
/**
* An unordered set of notation information items, one for each notation
* declared in the DTD. If any notation is multiply declared, this property
* has no value.
*/
def notations: Seq[scala.xml.dtd.NotationDecl] =
dtd.notations
/**
* An unordered set of unparsed entity information items, one for each
* unparsed entity declared in the DTD.
*/
def unparsedEntities: Seq[scala.xml.dtd.EntityDecl] =
dtd.unparsedEntities
/** The base URI of the document entity. */
var baseURI: String = _
/**
* The name of the character encoding scheme in which the document entity
* is expressed.
*/
var encoding: Option[String] = _
/**
* An indication of the standalone status of the document, either
* true or false. This property is derived from the optional standalone
* document declaration in the XML declaration at the beginning of the
* document entity, and has no value (`None`) if there is no
* standalone document declaration.
*/
var standAlone: Option[Boolean] = _
/**
* A string representing the XML version of the document. This
* property is derived from the XML declaration optionally present at
* the beginning of the document entity, and has no value (`None`)
* if there is no XML declaration.
*/
var version: Option[String] = _
/**
* 9. This property is not strictly speaking part of the infoset of
* the document. Rather it is an indication of whether the processor
* has read the complete DTD. Its value is a boolean. If it is false,
* then certain properties (indicated in their descriptions below) may
* be unknown. If it is true, those properties are never unknown.
*/
var allDeclarationsProcessed = false
// methods for NodeSeq
def theSeq: Seq[Node] = this.docElem
override def canEqual(other: Any) = other match {
case _: Document => true
case _ => false
}
}
| scala/scala-xml | shared/src/main/scala/scala/xml/Document.scala | Scala | apache-2.0 | 3,470 |
package me.apidoc.swagger.translators
import me.apidoc.swagger.Util
import io.swagger.{ models => swagger }
object ExternalDoc {
def apply(docs: Option[swagger.ExternalDocs]): Option[String] = {
docs.flatMap { doc =>
Util.combine(Seq(Option(doc.getDescription), Option(doc.getUrl)), ": ")
}
}
}
| movio/apidoc | swagger/src/main/scala/me/apidoc/swagger/translators/ExternalDoc.scala | Scala | mit | 317 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.http.scaladsl.model.Uri
import com.netflix.atlas.core.model.CustomVocabulary
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.ModelExtractors
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.stacklang.Interpreter
import com.netflix.atlas.eval.stream.Evaluator.DataSource
import com.netflix.atlas.eval.stream.Evaluator.DataSources
import com.netflix.atlas.eval.util.HostRewriter
import com.typesafe.config.Config
private[stream] class ExprInterpreter(config: Config) {
private val interpreter = Interpreter(new CustomVocabulary(config).allWords)
private val hostRewriter = new HostRewriter(config.getConfig("atlas.eval.host-rewrite"))
def eval(expr: String): List[StyleExpr] = {
interpreter.execute(expr).stack.map {
case ModelExtractors.PresentationType(t) => t
case v => throw new MatchError(v)
}
}
def eval(uri: Uri): List[StyleExpr] = {
val expr = uri.query().get("q").getOrElse {
throw new IllegalArgumentException(s"missing required URI parameter `q`: $uri")
}
// Check that data expressions are supported. The streaming path doesn't support
// time shifts.
val results = eval(expr).flatMap(_.perOffset)
results.foreach { result =>
result.expr.dataExprs.foreach { dataExpr =>
if (!dataExpr.offset.isZero) {
throw new IllegalArgumentException(
s":offset not supported for streaming evaluation [[$dataExpr]]"
)
}
}
}
// Perform host rewrites based on the Atlas hostname
val host = uri.authority.host.toString()
hostRewriter.rewrite(host, results)
}
def dataExprMap(ds: DataSources): Map[DataExpr, List[DataSource]] = {
import scala.jdk.CollectionConverters._
ds.getSources.asScala.toList
.flatMap { s =>
val exprs = eval(Uri(s.getUri)).flatMap(_.expr.dataExprs).distinct
exprs.map(_ -> s)
}
.groupBy(_._1)
.map {
case (expr, vs) => expr -> vs.map(_._2)
}
}
}
| brharrington/atlas | atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/ExprInterpreter.scala | Scala | apache-2.0 | 2,720 |
package com.danielasfregola.twitter4s.http.clients.rest.media.parameters
import com.danielasfregola.twitter4s.http.marshalling.Parameters
private[twitter4s] final case class MediaFinalizeParameters(media_id: Long, command: String = "FINALIZE")
extends Parameters
| DanielaSfregola/twitter4s | src/main/scala/com/danielasfregola/twitter4s/http/clients/rest/media/parameters/MediaFinalizeParameters.scala | Scala | apache-2.0 | 269 |
package org.flowpaint.ui.editors
import java.awt.event._
import java.awt.{Graphics2D, Dimension, Graphics, Color}
import javax.swing.{JPanel, JComponent}
import java.awt.image.BufferedImage
import org.flowpaint.util.{GraphicsUtils, StringUtils, MathUtils}
/**
* Provides Axis utility class.
*
* @author Hans Haggstrom
*/
abstract class EditorWithAxes extends Editor {
protected val blackColor: Color = new java.awt.Color( 0,0,0)
protected val darkColor: Color = new java.awt.Color( 0.25f, 0.25f, 0.25f )
protected val mediumColor: Color = new java.awt.Color( 0.75f, 0.75f, 0.75f)
protected val lightColor: Color = new java.awt.Color( 1f,1f,1f )
protected val borderSize = 3
class BackgroundCachingImagePanel(backgroundPainter: (Graphics2D, Int, Int) => Unit,
foregroundPainter: (Graphics2D, Int, Int) => Unit) extends JPanel {
addComponentListener(new ComponentListener {
def componentMoved(e: ComponentEvent) = {}
def componentShown(e: ComponentEvent) = {}
def componentHidden(e: ComponentEvent) = {}
def componentResized(e: ComponentEvent) = {
repaintBackground()
}
})
repaintBackground()
private var backgroundBuffer: BufferedImage = null
def repaintBackground() {
val w: Int = getWidth- borderSize*2
val h: Int = getHeight- borderSize*2
if ( w > 0 && h > 0) {
backgroundBuffer = new BufferedImage(w, h, BufferedImage.TYPE_INT_ARGB)
backgroundPainter(GraphicsUtils.toG2(backgroundBuffer.getGraphics), w, h)
}
}
override def paintComponent(g: Graphics) {
val g2: Graphics2D = g.asInstanceOf[Graphics2D]
val w = getWidth()
val h = getHeight()
if (w > 0 && h > 0 ) {
if (backgroundBuffer == null) repaintBackground
g2.drawImage(backgroundBuffer, borderSize, borderSize, null)
paintBackgroundBorder(g2, w, h)
foregroundPainter(g2, w, h)
paintForegroundBorder(g2, w, h)
}
}
}
class Axis {
var startValue = 0f
var endValue = 1f
var parameter: String = null
var relativePosition = 0f
var description: String = null
def initialize(prefix: String) {
def id(propertyName: String) = StringUtils.addPrefix(prefix, propertyName)
parameter = getStringProperty(id("editedParameter"), null)
startValue = getFloatProperty(id("startValue"), 0f)
endValue = getFloatProperty(id("endValue"), 1f)
description = getStringProperty(id("description"), parameter)
updateRelativePosition()
}
def updateEditedData() {
val pos = MathUtils.clampToZeroToOne(relativePosition)
val value = MathUtils.interpolate(pos, startValue, endValue)
editedData.setFloatProperty(parameter, value)
}
def updateRelativePosition() {
if (parameter != null) {
val value = editedData.getFloatProperty(parameter, 0.5f * (startValue + endValue))
relativePosition = if (startValue == endValue) startValue
else (value - startValue) / (endValue - startValue)
}
}
}
protected def updateBrush()
protected def updateRelativePosition( relativeX : Float, relativeY : Float )
protected def updateAxisFromMouseWheelEvent( rotation : Int )
protected def initializeAxis()
protected def description : String
protected def paintBackground( g2 : Graphics2D, width : Int, height : Int)
protected def paintIndicator( g2 : Graphics2D, width : Int, height : Int)
protected def minSize : Int
protected var view : BackgroundCachingImagePanel = null
private var updateOngoing = false
protected final def createUi(): JComponent = {
initializeAxis()
view = new BackgroundCachingImagePanel( paintBackground, paintIndicator )
view.setPreferredSize(new Dimension(minSize, minSize))
view.setToolTipText(description)
view.addMouseListener(mouseUpdateListener)
view.addMouseMotionListener(mouseUpdateListener)
view.addMouseWheelListener(mouseUpdateListener)
onEditorCreated()
return view
}
def onEditorCreated() {}
private val mouseUpdateListener = new MouseAdapter() {
override def mousePressed(e: MouseEvent) {updatePosition(e)}
override def mouseReleased(e: MouseEvent) {updatePosition(e)}
override def mouseDragged(e: MouseEvent) {updatePosition(e)}
override def mouseWheelMoved(e: MouseWheelEvent) {
val amount = e.getWheelRotation()
updateAxisFromMouseWheelEvent(amount)
updateBrushAndIgnoreChangeEvents()
}
}
private def updatePosition(e: MouseEvent) {
val x = e.getX
val y = e.getY
val rx = (1.0f * x) / (1.0f * view.getWidth())
val ry = (1.0f * y) / (1.0f * view.getHeight())
updateRelativePosition(rx, ry)
updateBrushAndIgnoreChangeEvents()
}
def updateBrushAndIgnoreChangeEvents() {
updateOngoing = true
updateBrush()
updateOngoing = false
}
override def onEditedDataChanged(changedProperty : String) {
if (!updateOngoing) updateAxisFromEditedData()
if ( view != null && (!propertiesThatShouldCauseBackgroundRedraw.isEmpty && changedProperty == null) ||
propertiesThatShouldCauseBackgroundRedraw.contains( changedProperty ) )
{
view.repaintBackground()
}
if (view != null) view.repaint()
}
var propertiesThatShouldCauseBackgroundRedraw : List[String] = Nil
def updateAxisFromEditedData()
protected def paintBackgroundBorder(g2: Graphics2D, width : Int, height: Int): Unit = {
val w = width
val h = height
g2.setColor(darkColor)
g2.drawRect( 2,2,w-5,h-5 )
}
protected def paintForegroundBorder(g2: Graphics2D, width : Int, height: Int): Unit = {
val w = width
val h = height
g2.setColor(mediumColor)
g2.drawRect( 1,1,w-3,h-3 )
g2.setColor(mediumColor)
g2.drawRect( 0,0,w-1,h-1 )
}
} | zzorn/flowpaint | src/main/scala/org/flowpaint/ui/editors/EditorWithAxes.scala | Scala | gpl-2.0 | 5,894 |
package stgy
import core.Delta
import core.CoreMessage.CallTrace
import kamon.Kamon
import scala.concurrent.duration._
import scala.swing._
object Stgy extends App {
val main = new Delta[StgyHost, StgyProvider, StgyHostObserver]()
main.numberOfClient = 100
val hosts = (0 until 25).map { i => {
val x = (i % 5) * 600
val y = (i / 5) * 600
val zone = new SquareZone(x, y, 600, 600)
new StgyHost(zone)
}
}
val hostObserver = new StgyHostObserver()
main.launch(hosts, hostObserver)
hosts.map(h => h.zone).foreach(z => {
main.HP.hosts(z).call(h1 => {
val h1zone = h1.zone.asInstanceOf[SquareZone]
h1.neighbours = main.HP.getHosts(new SquareZone(h1zone.x - 10, h1zone.y - 10, h1zone.w + 20, h1zone.h + 20)).filter(_ != main.HP.hosts(z)).toList
})
})
main.HP.hosts.values.foreach(hr => main.setHostInterval(hr, 16, h => h.tick))
main.setHostObserverInterval(16, h => h.tick)
}
| DeltaIMT/Delta | stgy_scalajs_server/src/main/scala/stgy/Stgy.scala | Scala | mit | 937 |
package org.openurp.edu.eams.teach.election
import org.beangle.data.model.Entity
import org.openurp.base.Semester
import org.openurp.code.edu.Education
import org.openurp.edu.base.code.CourseType
trait CourseTypeCreditConstraint extends Entity[Long] {
def getGrades(): String
def setGrades(grades: String): Unit
def getSemester(): Semester
def setSemester(semester: Semester): Unit
def getEducation(): Education
def setEducation(education: Education): Unit
def getCourseType(): CourseType
def setCourseType(courseType: CourseType): Unit
def getLimitCredit(): Float
def setLimitCredit(limitCredit: Float): Unit
}
| openurp/edu-eams-webapp | election/src/main/scala/org/openurp/edu/eams/teach/election/CourseTypeCreditConstraint.scala | Scala | gpl-3.0 | 646 |
import scala.concurrent.duration.FiniteDuration
import reactivemongo.api._
import reactivemongo.api.commands.CommandException.Code
import reactivemongo.api.bson.{ BSONDocument, BSONString }
import reactivemongo.api.bson.collection.BSONCollection
// TODO: Separate Spec?
trait CollectionMetaSpec { collSpec: CollectionSpec =>
import _root_.tests.Common
import Common._
def metaSpec = {
"with the default connection" >> {
val colName = s"collmeta${System identityHashCode this}"
"be created" in {
db(colName).create() must beTypedEqualTo({}).await(1, timeout)
}
cappedSpec(db(colName), timeout)
listSpec(db, timeout)
"be renamed" >> {
successfulRename(db, connection, timeout)
failedRename(db, connection, s"missing-${colName}", timeout)
}
dropSpec(db, colName, timeout)
}
"with the slow connection" >> {
val colName = s"slowmeta${System identityHashCode db}"
"be created" in {
slowDb(colName).create() must beTypedEqualTo({}).await(1, slowTimeout)
}
cappedSpec(slowDb(colName), slowTimeout)
listSpec(slowDb, slowTimeout)
"be renamed" >> {
successfulRename(slowDb, slowConnection, slowTimeout)
failedRename(slowDb, slowConnection, s"missing-${colName}", slowTimeout)
}
dropSpec(slowDb, colName, slowTimeout)
}
}
// ---
val cappedMaxSize: Long = 2 * 1024 * 1024
def cappedSpec(c: BSONCollection, timeout: FiniteDuration) =
"be capped" >> {
"after conversion" in {
c.convertToCapped(cappedMaxSize, None) must beEqualTo({}).
await(1, timeout)
}
"with statistics (MongoDB >= 3.0)" in {
c.stats() must beLike[CollectionStats] {
case stats => stats.capped must beTrue and (
stats.maxSize must beSome(cappedMaxSize))
}.await(1, timeout)
}
}
def successfulRename(
_db: DB, c: MongoConnection, timeout: FiniteDuration) =
"successfully" in {
val coll = _db.collection(s"foo_${System identityHashCode timeout}")
(for {
adminDb <- c.database("admin")
_ <- coll.create()
_ <- adminDb.renameCollection(
_db.name, coll.name, s"renamed${System identityHashCode coll}")
} yield ()) must beTypedEqualTo({}).await(0, timeout)
}
def failedRename(
_db: DB,
c: MongoConnection,
colName: String,
timeout: FiniteDuration) = "with failure" in {
(for {
adminDb <- c.database("admin")
_ <- adminDb.renameCollection(_db.name, colName, "renamed")
} yield false).recover({
case Code(c) => c == 26 // source doesn't exist
}) must beTrue.await(1, timeout)
}
def listSpec(db2: DB, timeout: FiniteDuration) = "be listed" in {
val doc = BSONDocument("foo" -> BSONString("bar"))
val name1 = s"collection_one${System identityHashCode doc}"
val name2 = s"collection_two${System identityHashCode doc}"
def i1 = db2(name1).insert.one(doc).map(_.n)
def i2 = db2(name2).insert.one(doc).map(_.n)
i1 aka "insert #1" must beTypedEqualTo(1).await(1, timeout) and {
i2 aka "insert #2" must beTypedEqualTo(1).await(1, timeout)
} and {
db2.collectionNames must contain(atLeast(name1, name2)).await(2, timeout)
// ... as the concurrent tests could create other collections
}
}
def dropSpec(_db: DB, name: String, timeout: FiniteDuration) = {
def col = _db(name)
"be dropped successfully if exist" in {
col.drop(false) aka "legacy drop" must beTrue.await(1, timeout) and {
col.create().flatMap(_ => col.drop(false)).
aka("dropped") must beTrue.await(1, timeout)
}
}
"be dropped successfully if doesn't exist" in {
col.drop(false) aka "dropped" must beFalse.await(1, timeout)
}
}
object & {
def unapply[T](any: T): Option[(T, T)] = Some(any -> any)
}
}
| ReactiveMongo/ReactiveMongo | driver/src/test/scala/CollectionMetaSpec.scala | Scala | apache-2.0 | 3,933 |
object Test extends App {
val xs =
Array.empty[Double]
val ys =
Array(0.0)
assert(xs.intersect(ys).getClass.getComponentType == classOf[Double])
assert(Array.empty[Double].intersect(Array(0.0)).getClass.getComponentType == classOf[Double])
}
| scala/scala | test/files/run/t12403.scala | Scala | apache-2.0 | 259 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka08
import kafka.admin.AdminUtils
import kafka.client.ClientUtils
import kafka.consumer.AssignmentContext
import kafka.network.BlockingChannel
import org.I0Itec.zkclient.ZkClient
import org.apache.zookeeper.data.Stat
case class ZkUtils08(zkClient: ZkClient){
def channelToOffsetManager(groupId: String, socketTimeoutMs: Int, retryBackOffMs: Int): BlockingChannel =
ClientUtils.channelToOffsetManager(groupId, zkClient, socketTimeoutMs, retryBackOffMs)
def deleteTopic(topic: String): Unit = AdminUtils.deleteTopic(zkClient, topic)
def topicExists(topic: String): Boolean = AdminUtils.topicExists(zkClient, topic)
def getAllTopics: Seq[String] = kafka.utils.ZkUtils.getAllTopics(zkClient)
def createTopic(topic: String, partitions: Int, replication: Int) = AdminUtils.createTopic(zkClient, topic, partitions, replication)
def getLeaderForPartition(topic: String, partition: Int): Option[Int] = kafka.utils.ZkUtils.getLeaderForPartition(zkClient, topic, partition)
def createEphemeralPathExpectConflict(path: String, data: String): Unit = kafka.utils.ZkUtils.createEphemeralPathExpectConflict(zkClient, path, data)
def createEphemeralPathExpectConflictHandleZKBug(path: String,
data: String,
expectedCallerData: Any,
checker: (String, Any) => Boolean,
backoffTime: Int): Unit =
kafka.utils.ZkUtils.createEphemeralPathExpectConflictHandleZKBug(zkClient, path, data, expectedCallerData, checker, backoffTime)
def deletePath(path: String) = kafka.utils.ZkUtils.deletePath(zkClient, path)
def getConsumerPartitionOwnerPath(groupId: String, topic: String, partition: Int): String =
kafka.utils.ZkUtils.getConsumerPartitionOwnerPath(groupId, topic, partition)
def getChildrenParentMayNotExist(path: String): Seq[String] = kafka.utils.ZkUtils.getChildrenParentMayNotExist(zkClient, path)
def getAllBrokersInCluster: Seq[kafka.cluster.Broker] = kafka.utils.ZkUtils.getAllBrokersInCluster(zkClient)
def createAssignmentContext(group: String, consumerId: String, excludeInternalTopics: Boolean): AssignmentContext =
new AssignmentContext(group, consumerId, excludeInternalTopics, zkClient)
def readData(path: String): (String, Stat) = kafka.utils.ZkUtils.readData(zkClient, path)
def fetchTopicMetadataFromZk(topic: String) = {
val metadata = AdminUtils.fetchTopicMetadataFromZk(topic, zkClient)
KafkaTopicMetadata(metadata.topic, metadata.partitionsMetadata.size)
}
def close(): Unit = zkClient.close()
}
case class KafkaTopicMetadata(topicName: String, numberOfPartitions: Int) | tkunicki/geomesa | geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-08-datastore/src/main/scala/org/locationtech/geomesa/kafka08/ZkUtils08.scala | Scala | apache-2.0 | 3,267 |
/*
* Copyright 2014 JHC Systems Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sqlest.ast.syntax
import sqlest.ast._
trait UpdateSyntax {
/** Update table */
def apply(table: Table) = UpdateSetterBuilder(table)
}
case class UpdateSetterBuilder(table: Table) {
def set(setters: Setter[_, _]*): UpdateWhereBuilder =
UpdateWhereBuilder(this.table, setters)
}
case class UpdateWhereBuilder(table: Table, setters: Seq[Setter[_, _]]) {
def where(expr: Column[Boolean]): Update =
Update(table = this.table, set = setters, where = Some(expr))
}
| andrewjskatz/sqlest | src/main/scala/sqlest/ast/syntax/UpdateSyntax.scala | Scala | apache-2.0 | 1,090 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.bin
import java.io.{ByteArrayOutputStream, OutputStream}
import java.nio.charset.StandardCharsets
import java.nio.{ByteBuffer, ByteOrder}
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.{Geometry, LineString, Point}
import org.locationtech.geomesa.utils.bin.BinaryEncodeCallback.{ByteArrayCallback, ByteStreamCallback}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.ToValues
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.AttributeSpec.ListAttributeSpec
import org.locationtech.geomesa.utils.geotools.{SimpleFeatureSpecParser, SimpleFeatureTypes}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
class BinaryOutputEncoder private (toValues: ToValues) {
def encode(f: SimpleFeature): Array[Byte] = {
toValues(f, ByteArrayCallback)
ByteArrayCallback.result
}
def encode(f: SimpleFeature, callback: BinaryOutputCallback): Unit = toValues(f, callback)
def encode(f: CloseableIterator[SimpleFeature], os: OutputStream, sort: Boolean = false): Long = {
if (sort) {
val byteStream = new ByteArrayOutputStream
val callback = new ByteStreamCallback(byteStream)
try { f.foreach(toValues(_, callback)) } finally {
f.close()
}
val count = callback.result
val bytes = byteStream.toByteArray
val size = (bytes.length / count).toInt
bytes.grouped(size).toSeq.sorted(BinaryOutputEncoder.DateOrdering).foreach(os.write)
count
} else {
val callback = new ByteStreamCallback(os)
try { f.foreach(toValues(_, callback)) } finally {
f.close()
}
callback.result
}
}
}
object BinaryOutputEncoder extends LazyLogging {
import AxisOrder._
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val BinEncodedSft = SimpleFeatureTypes.createType("bin", "bin:Bytes,*geom:Point:srid=4326")
val BIN_ATTRIBUTE_INDEX = 0 // index of 'bin' attribute in BinEncodedSft
// compares the 4 bytes representing the date in a bin array
private val DateOrdering = new Ordering[Array[Byte]] {
override def compare(x: Array[Byte], y: Array[Byte]): Int = {
val compare1 = Ordering.Byte.compare(x(4), y(4))
if (compare1 != 0) { return compare1 }
val compare2 = Ordering.Byte.compare(x(5), y(5))
if (compare2 != 0) { return compare2 }
val compare3 = Ordering.Byte.compare(x(6), y(6))
if (compare3 != 0) { return compare3 }
Ordering.Byte.compare(x(7), y(7))
}
}
case class EncodingOptions(geomField: Option[Int],
dtgField: Option[Int],
trackIdField: Option[Int],
labelField: Option[Int] = None,
axisOrder: Option[AxisOrder] = None)
case class EncodedValues(trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long)
def apply(sft: SimpleFeatureType, options: EncodingOptions): BinaryOutputEncoder =
new BinaryOutputEncoder(toValues(sft, options))
def convertToTrack(f: SimpleFeature, i: Int): Int = convertToTrack(f.getAttribute(i))
def convertToTrack(track: AnyRef): Int = if (track == null) { 0 } else { track.hashCode }
// TODO could use `.getDateAsLong` if we know we have a KryoBufferSimpleFeature
def convertToDate(f: SimpleFeature, i: Int): Long = convertToDate(f.getAttribute(i).asInstanceOf[Date])
def convertToDate(date: Date): Long = if (date == null) { 0L } else { date.getTime }
def convertToLabel(f: SimpleFeature, i: Int): Long = convertToLabel(f.getAttribute(i))
def convertToLabel(label: AnyRef): Long = label match {
case null => 0L
case n: Number => n.longValue()
case _ =>
import org.locationtech.geomesa.utils.conversions.ScalaImplicits.RichTraversableOnce
var sum = 0L
label.toString.getBytes(StandardCharsets.UTF_8).iterator.take(8).foreachIndex {
case (b, i) => sum += (b & 0xffL) << (8 * i)
}
sum
}
/**
* Decodes a byte array
*
* @param encoded encoded byte array
* @param callback callback for results
*/
def decode(encoded: Array[Byte], callback: BinaryOutputCallback): Unit = {
val buf = ByteBuffer.wrap(encoded).order(ByteOrder.LITTLE_ENDIAN)
val trackId = buf.getInt
val time = buf.getInt * 1000L
val lat = buf.getFloat
val lon = buf.getFloat
if (encoded.length > 16) {
val label = buf.getLong
callback(trackId, lat, lon, time, label)
} else {
callback(trackId, lat, lon, time)
}
}
def decode(encoded: Array[Byte]): EncodedValues = {
var values: EncodedValues = null
decode(encoded, new BinaryOutputCallback() {
override def apply(trackId: Int, lat: Float, lon: Float, dtg: Long): Unit =
values = EncodedValues(trackId, lat, lon, dtg, -1L)
override def apply(trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long): Unit =
values = EncodedValues(trackId, lat, lon, dtg, label)
})
values
}
/**
* Creates the function to map a simple feature to a bin-encoded buffer
*
* @param sft simple feature type
* @param options encoding options
* @return
*/
private def toValues(sft: SimpleFeatureType, options: EncodingOptions): ToValues = {
val geomIndex = options.geomField.getOrElse(sft.getGeomIndex)
if (geomIndex == -1) {
throw new IllegalArgumentException(s"Invalid geometry field requested for feature type ${sft.getTypeName}")
}
val dtgIndex = options.dtgField.orElse(sft.getDtgIndex).getOrElse(-1)
if (dtgIndex == -1) {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
val isSingleDate = sft.getDescriptor(dtgIndex).getType.getBinding == classOf[Date]
val axisOrder = options.axisOrder.getOrElse(AxisOrder.LonLat)
val (isPoint, isLineString) = {
val binding = sft.getDescriptor(geomIndex).getType.getBinding
(binding == classOf[Point], binding == classOf[LineString])
}
// noinspection ExistsEquals
if (options.trackIdField.exists(_ == -1)) {
throw new IllegalArgumentException(s"Invalid track field requested for feature type ${sft.getTypeName}")
} else if (options.labelField.exists(_ == -1)) {
throw new IllegalArgumentException(s"Invalid label field requested for feature type ${sft.getTypeName}")
} else if (!isSingleDate) {
if (isLineString) {
val dtgField = sft.getDescriptor(dtgIndex).getLocalName
val sftAttributes = SimpleFeatureSpecParser.parse(SimpleFeatureTypes.encodeType(sft)).attributes
sftAttributes.find(_.name == dtgField).foreach { spec =>
if (!spec.isInstanceOf[ListAttributeSpec] || spec.asInstanceOf[ListAttributeSpec].subClass != classOf[Date]) {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
}
} else {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
}
// gets the track id from a feature
val getTrackId: (SimpleFeature) => Int = options.trackIdField match {
case None => (f) => f.getID.hashCode
case Some(trackId) => convertToTrack(_, trackId)
}
// gets the label from a feature
val getLabelOption: Option[(SimpleFeature) => Long] = options.labelField.map { labelIndex =>
convertToLabel(_, labelIndex)
}
if (isLineString) {
// for linestrings, we return each point - use an array so we get constant-time lookup
// depending on srs requested and wfs versions, axis order can be flipped
val getLineLatLon: (SimpleFeature) => Array[(Float, Float)] = axisOrder match {
case LatLon => lineToXY(_, geomIndex)
case LonLat => lineToYX(_, geomIndex)
}
if (isSingleDate) {
getLabelOption match {
case None => new ToValuesLines(getTrackId, getLineLatLon, dtgIndex)
case Some(getLabel) => new ToValuesLinesLabels(getTrackId, getLineLatLon, getLabel, dtgIndex)
}
} else {
// for line strings, we need an array of dates corresponding to the points in the line
val getLineDtg: (SimpleFeature) => Array[Long] = dateArray(_, dtgIndex)
getLabelOption match {
case None => new ToValuesLinesDates(getTrackId, getLineLatLon, getLineDtg)
case Some(getLabel) => new ToValuesLinesDatesLabels(getTrackId, getLineLatLon, getLineDtg, getLabel)
}
}
} else {
// get lat/lon as floats
// depending on srs requested and wfs versions, axis order can be flipped
val getLatLon: (SimpleFeature) => (Float, Float) = (isPoint, axisOrder) match {
case (true, LatLon) => pointToXY(_, geomIndex)
case (true, LonLat) => pointToYX(_, geomIndex)
case (false, LatLon) => geomToXY(_, geomIndex)
case (false, LonLat) => geomToYX(_, geomIndex)
}
getLabelOption match {
case None => new ToValuesPoints(getTrackId, getLatLon, dtgIndex)
case Some(getLabel) => new ToValuesPointsLabels(getTrackId, getLatLon, getLabel, dtgIndex)
}
}
}
private def pointToXY(p: Point): (Float, Float) = (p.getX.toFloat, p.getY.toFloat)
private def pointToYX(p: Point): (Float, Float) = (p.getY.toFloat, p.getX.toFloat)
private def pointToXY(f: SimpleFeature, i: Int): (Float, Float) =
pointToXY(f.getAttribute(i).asInstanceOf[Point])
private def pointToYX(f: SimpleFeature, i: Int): (Float, Float) =
pointToYX(f.getAttribute(i).asInstanceOf[Point])
private def geomToXY(f: SimpleFeature, i: Int): (Float, Float) =
pointToXY(f.getAttribute(i).asInstanceOf[Geometry].safeCentroid())
private def geomToYX(f: SimpleFeature, i: Int): (Float, Float) =
pointToYX(f.getAttribute(i).asInstanceOf[Geometry].safeCentroid())
private def lineToXY(f: SimpleFeature, i: Int): Array[(Float, Float)] = {
val line = f.getAttribute(i).asInstanceOf[LineString]
Array.tabulate(line.getNumPoints)(i => pointToXY(line.getPointN(i)))
}
private def lineToYX(f: SimpleFeature, i: Int): Array[(Float, Float)] = {
val line = f.getAttribute(i).asInstanceOf[LineString]
Array.tabulate(line.getNumPoints)(i => pointToYX(line.getPointN(i)))
}
private def dateArray(f: SimpleFeature, i: Int): Array[Long] = {
val dates = f.getAttribute(i).asInstanceOf[java.util.List[Date]]
if (dates == null) { Array.empty } else { dates.map(_.getTime).toArray }
}
private trait ToValues {
def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit
}
private class ToValuesPoints(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => (Float, Float),
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val (lat, lon) = getLatLon(f)
callback(getTrackId(f), lat, lon, convertToDate(f, dtgIndex))
}
}
private class ToValuesPointsLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => (Float, Float),
getLabel: (SimpleFeature) => Long,
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val (lat, lon) = getLatLon(f)
callback(getTrackId(f), lat, lon, convertToDate(f, dtgIndex), getLabel(f))
}
}
private class ToValuesLines(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val date = convertToDate(f, dtgIndex)
var i = 0
while (i < points.length) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, date)
i += 1
}
}
}
private class ToValuesLinesLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLabel: (SimpleFeature) => Long,
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val date = convertToDate(f, dtgIndex)
val label = getLabel(f)
var i = 0
while (i < points.length) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, date, label)
i += 1
}
}
}
private class ToValuesLinesDates(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLineDtg: (SimpleFeature) => Array[Long]) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val dates = getLineDtg(f)
val size = if (points.length == dates.length) { points.length } else {
logger.warn(s"Mismatched geometries and dates for simple feature $f: ${points.toList} ${dates.toList}")
math.min(points.length, dates.length)
}
var i = 0
while (i < size) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, dates(i))
i += 1
}
}
}
private class ToValuesLinesDatesLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLineDtg: (SimpleFeature) => Array[Long],
getLabel: (SimpleFeature) => Long) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val dates = getLineDtg(f)
val size = if (points.length == dates.length) { points.length } else {
logger.warn(s"Mismatched geometries and dates for simple feature $f: ${points.toList} ${dates.toList}")
math.min(points.length, dates.length)
}
val label = getLabel(f)
var i = 0
while (i < size) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, dates(i), label)
i += 1
}
}
}
}
| ronq/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/bin/BinaryOutputEncoder.scala | Scala | apache-2.0 | 15,302 |
package drt.chroma
import com.typesafe.config.ConfigFactory
import spray.http.FormData
trait ChromaConfig {
lazy val config = ConfigFactory.load()
val chromaTokenRequestCredentials = FormData(Seq(
"username" -> config.getString("chroma.username"),
"password" -> config.getString("chroma.password"),
"grant_type" -> "password"
))
val tokenUrl: String = config.getString("chroma.url.token")
val url: String = config.getString("chroma.url.live")
}
| somanythings/drt-scalajs-spa-exploration | server/src/main/scala/drt/chroma/ChromaConfig.scala | Scala | apache-2.0 | 470 |
package org.jetbrains.plugins.scala
package lang
package formatting
import java.util
import com.intellij.formatting._
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.{Key, TextRange}
import com.intellij.psi._
import com.intellij.psi.codeStyle.{CodeStyleSettings, CommonCodeStyleSettings}
import com.intellij.psi.tree._
import com.intellij.psi.util.PsiTreeUtil
import org.apache.commons.lang3.StringUtils
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, _}
import org.jetbrains.plugins.scala.lang.formatting.ScalaWrapManager._
import org.jetbrains.plugins.scala.lang.formatting.getDummyBlocks._
import org.jetbrains.plugins.scala.lang.formatting.processors._
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes._
import org.jetbrains.plugins.scala.lang.parser.util.ParserUtils
import org.jetbrains.plugins.scala.lang.parser.{ScCodeBlockElementType, ScalaElementType}
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScInterpolatedStringLiteral, ScLiteral}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.expr.xml._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScModifierListOwner, ScPackaging}
import org.jetbrains.plugins.scala.lang.scaladoc.lexer.ScalaDocTokenType
import org.jetbrains.plugins.scala.lang.scaladoc.parser.ScalaDocElementTypes
import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.{ScDocComment, ScDocListItem, ScDocTag}
import org.jetbrains.plugins.scala.project.UserDataHolderExt
import org.jetbrains.plugins.scala.util.MultilineStringUtil
import org.jetbrains.plugins.scala.util.MultilineStringUtil.MultilineQuotes
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
// TODO: rename it to some Builder/Producer/etc...
object getDummyBlocks {
private case class InterpolatedStringAlignments(quotes: Alignment, marginChar: Alignment)
private val interpolatedStringAlignmentsKey: Key[InterpolatedStringAlignments] = Key.create("interpolated.string.alignment")
/** the alignment can be applied both to the colon and type annotation itself, depending on ScalaCodeStyleSettings.ALIGN_PARAMETER_TYPES_IN_MULTILINE_DECLARATIONS */
private val typeParameterTypeAnnotationAlignmentsKey: Key[Alignment] = Key.create("colon.in.type.annotation.alignments.key")
private val fieldGroupAlignmentKey: Key[Alignment] = Key.create("field.group.alignment.key")
private val InfixElementsTokenSet = TokenSet.create(
ScalaElementType.INFIX_EXPR,
ScalaElementType.INFIX_PATTERN,
ScalaElementType.INFIX_TYPE
)
private val FieldGroupSubBlocksTokenSet = TokenSet.orSet(
TokenSet.create(tCOLON, tASSIGN),
VAL_VAR_TOKEN_SET
)
private val FunctionTypeTokenSet = TokenSet.create(
tFUNTYPE,
tFUNTYPE_ASCII
)
def apply(block: ScalaBlock): getDummyBlocks = new getDummyBlocks(block)
private def cachedAlignment(literal: ScInterpolatedStringLiteral): Option[InterpolatedStringAlignments] =
Option(literal.getUserData(interpolatedStringAlignmentsKey))
private def cachedParameterTypeAnnotationAlignment(clause: ScParameterClause): Option[Alignment] =
Option(clause.getUserData(typeParameterTypeAnnotationAlignmentsKey))
// TODO: rename to isNonEmptyNode
// TODO: rename FormatterUtil to ScalaFormatterUtil
/** see [[com.intellij.psi.formatter.java.SimpleJavaBlock.isNotEmptyNode]] */
private def isNotEmptyNode(node: ASTNode): Boolean =
!com.intellij.psi.formatter.FormatterUtil.containsWhiteSpacesOnly(node) &&
node.getTextLength > 0
private def isNotEmptyDocNode(node: ASTNode): Boolean =
!isEmptyDocNode(node)
private def isEmptyDocNode(node: ASTNode): Boolean =
node.getElementType match {
case ScalaDocTokenType.DOC_WHITESPACE => true
case ScalaDocTokenType.DOC_COMMENT_DATA |
ScalaDocTokenType.DOC_INNER_CODE => StringUtils.isBlank(node.getText)
case _ => node.getTextLength == 0
}
private class StringLineScalaBlock(myTextRange: TextRange, mainNode: ASTNode, myParentBlock: ScalaBlock,
myAlignment: Alignment, myIndent: Indent, myWrap: Wrap, mySettings: CodeStyleSettings)
extends ScalaBlock(myParentBlock, mainNode, null, myAlignment, myIndent, myWrap, mySettings) {
override def getTextRange: TextRange = myTextRange
override def isLeaf = true
override def isLeaf(node: ASTNode): Boolean = true
override def getChildAttributes(newChildIndex: Int): ChildAttributes = new ChildAttributes(Indent.getNoneIndent, null)
override def getSpacing(child1: Block, child2: Block): Spacing = Spacing.getReadOnlySpacing
override def getSubBlocks: util.List[Block] = {
if (subBlocks == null) {
subBlocks = new util.ArrayList[Block]()
}
subBlocks
}
}
}
//noinspection RedundantDefaultArgument
class getDummyBlocks(private val block: ScalaBlock) {
private val settings: CodeStyleSettings = block.settings
private val commonSettings: CommonCodeStyleSettings = settings.getCommonSettings(ScalaLanguage.INSTANCE)
private implicit val scalaSettings: ScalaCodeStyleSettings = settings.getCustomSettings(classOf[ScalaCodeStyleSettings])
// shortcuts to simplify long conditions that operate with settings
@inline private def cs = commonSettings
@inline private def ss = scalaSettings
// TODO: there are quite many unnecessary array allocations and copies, consider passing
// mutable buffer/list to submethods, and measure the performance!
def apply(firstNode: ASTNode, lastNode: ASTNode): util.ArrayList[Block] = {
if (isScalaDocNode(firstNode))
if (lastNode != null)
applyInnerScaladoc(firstNode, lastNode)
else
applyInnerScaladoc(firstNode)
else
if (lastNode != null)
applyInner(firstNode, lastNode)
else
applyInner(firstNode)
}
private def applyInner(node: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val nodePsi = node.getPsi
nodePsi match {
case _: ScValue | _: ScVariable if cs.ALIGN_GROUP_FIELD_DECLARATIONS =>
subBlocks.addAll(getFieldGroupSubBlocks(node))
return subBlocks
case _: ScCaseClause if ss.ALIGN_IN_COLUMNS_CASE_BRANCH =>
subBlocks.addAll(getCaseClauseGroupSubBlocks(node))
return subBlocks
case _: ScIf =>
val alignment = if (ss.ALIGN_IF_ELSE) Alignment.createAlignment
else null
subBlocks.addAll(getIfSubBlocks(node, alignment))
return subBlocks
case _: ScInfixExpr | _: ScInfixPattern | _: ScInfixTypeElement =>
subBlocks.addAll(getInfixBlocks(node))
return subBlocks
case extendsBlock: ScExtendsBlock =>
subBlocks.addAll(getExtendsSubBlocks(node, extendsBlock))
return subBlocks
case _: ScFor =>
subBlocks.addAll(getForSubBlocks(node, node.getChildren(null)))
return subBlocks
case _: ScReferenceExpression | _: ScThisReference | _: ScSuperReference =>
subBlocks.addAll(getMethodCallOrRefExprSubBlocks(node))
return subBlocks
case _: ScMethodCall =>
subBlocks.addAll(getMethodCallOrRefExprSubBlocks(node))
return subBlocks
case _: ScLiteral if node.getFirstChildNode != null &&
node.getFirstChildNode.getElementType == tMULTILINE_STRING && ss.supportMultilineString =>
subBlocks.addAll(getMultilineStringBlocks(node))
return subBlocks
case pack: ScPackaging =>
/** see [[ScPackaging.findExplicitMarker]] doc */
val explicitMarker = pack.findExplicitMarker
explicitMarker match {
case Some(marker) =>
val markerNode = marker.getNode
val correctChildren = node.getChildren(null).filter(isNotEmptyNode)
val (beforeMarker, afterMarker) = correctChildren.span(_ != markerNode)
val hasValidTail = afterMarker.nonEmpty && (
afterMarker.head.getElementType == tLBRACE && afterMarker.last.getElementType == tRBRACE ||
afterMarker.head.getElementType == tCOLON
)
for (child <- if (hasValidTail) beforeMarker else correctChildren) {
subBlocks.add(subBlock(child))
}
if (hasValidTail) {
subBlocks.add(subBlock(afterMarker.head, afterMarker.last))
}
return subBlocks
case _ =>
}
case interpolated: ScInterpolatedStringLiteral =>
//create and store alignment; required for support of multi-line interpolated strings (SCL-8665)
interpolated.putUserData(interpolatedStringAlignmentsKey, buildQuotesAndMarginAlignments)
case paramClause: ScParameterClause =>
paramClause.putUserData(typeParameterTypeAnnotationAlignmentsKey, Alignment.createAlignment(true))
case psi@(_: ScValueOrVariable | _: ScFunction) if node.getFirstChildNode.getPsi.isInstanceOf[PsiComment] =>
val childrenFiltered: Array[ASTNode] = node.getChildren(null).filter(isNotEmptyNode)
val childHead :: childTail = childrenFiltered.toList
subBlocks.add(subBlock(childHead))
val indent: Indent = {
val prevNonWsNode: Option[PsiElement] = psi.prevSibling match {
case Some(prev@Whitespace(s)) =>
if (s.contains("\\n")) None
else prev.prevSibling
case prev =>
prev
}
prevNonWsNode.map(_.elementType) match {
case Some(`tLBRACE` | `tLPARENTHESIS`) if scalaSettings.KEEP_COMMENTS_ON_SAME_LINE =>
Indent.getNormalIndent
case _ =>
Indent.getNoneIndent
}
}
subBlocks.add(subBlock(childTail.head, childTail.last, null, Some(indent)))
return subBlocks
case _ =>
}
val sharedAlignment: Alignment = createAlignment(node)
val children = node.getChildren(null)
for (child <- children if isNotEmptyNode(child)) {
val childAlignment: Alignment = calcChildAlignment(node, child, sharedAlignment)
val needFlattenInterpolatedStrings = child.getFirstChildNode == null &&
child.getElementType == tINTERPOLATED_MULTILINE_STRING &&
ss.supportMultilineString
if (needFlattenInterpolatedStrings) {
subBlocks.addAll(getMultilineStringBlocks(child))
} else {
subBlocks.add(subBlock(child, null, childAlignment))
}
}
subBlocks
}
private def calcChildAlignment(parent: ASTNode, child: ASTNode, sharedAlignment: Alignment): Alignment =
parent.getPsi match {
case _: ScDocListItem if scalaSettings.SD_ALIGN_LIST_ITEM_CONTENT =>
val doNotAlignInListItem = child.getElementType match {
case ScalaDocTokenType.DOC_LIST_ITEM_HEAD |
ScalaDocTokenType.DOC_COMMENT_LEADING_ASTERISKS |
ScalaDocTokenType.DOC_WHITESPACE |
ScalaDocTokenType.DOC_INNER_CODE_TAG |
ScalaDocElementTypes.DOC_LIST => true
case _ => false
}
if (doNotAlignInListItem) null
else sharedAlignment
case params: ScParameters =>
val firstParameterStartsFromNewLine =
commonSettings.METHOD_PARAMETERS_LPAREN_ON_NEXT_LINE ||
params.clauses.headOption.flatMap(_.parameters.headOption).forall(_.startsFromNewLine())
if (firstParameterStartsFromNewLine && !scalaSettings.INDENT_FIRST_PARAMETER) null
else sharedAlignment
case _: ScParameterClause =>
child.getElementType match {
case `tRPARENTHESIS` | `tLPARENTHESIS` => null
case _ => sharedAlignment
}
case _: ScArgumentExprList =>
child.getElementType match {
case `tRPARENTHESIS` if cs.ALIGN_MULTILINE_PARAMETERS_IN_CALLS => sharedAlignment
case `tRPARENTHESIS` | `tLPARENTHESIS` => null
case ScCodeBlockElementType.BlockExpression if ss.DO_NOT_ALIGN_BLOCK_EXPR_PARAMS => null
case _ if cs.ALIGN_MULTILINE_PARAMETERS_IN_CALLS => sharedAlignment
case _ => null
}
case patt: ScPatternArgumentList =>
child.getElementType match {
case `tRPARENTHESIS` if cs.ALIGN_MULTILINE_PARAMETERS_IN_CALLS && patt.missedLastExpr => sharedAlignment
case `tRPARENTHESIS` | `tLPARENTHESIS` => null
case ScCodeBlockElementType.BlockExpression if ss.DO_NOT_ALIGN_BLOCK_EXPR_PARAMS => null
case _ if cs.ALIGN_MULTILINE_PARAMETERS_IN_CALLS => sharedAlignment
case _ => null
}
case _: ScMethodCall | _: ScReferenceExpression =>
if (child.getElementType == tIDENTIFIER &&
child.getPsi.getParent.isInstanceOf[ScReferenceExpression] &&
child.getPsi.getParent.asInstanceOf[ScReferenceExpression].qualifier.isEmpty) null
else if (child.getPsi.isInstanceOf[ScExpression]) null
else sharedAlignment
case _: ScXmlStartTag | _: ScXmlEmptyTag =>
child.getElementType match {
case ScalaElementType.XML_ATTRIBUTE => sharedAlignment
case _ => null
}
case _: ScXmlElement =>
child.getElementType match {
case ScalaElementType.XML_START_TAG | ScalaElementType.XML_END_TAG => sharedAlignment
case _ => null
}
case param: ScParameter =>
import ScalaCodeStyleSettings._
val addAlignmentToChild = ss.ALIGN_PARAMETER_TYPES_IN_MULTILINE_DECLARATIONS match {
case ALIGN_ON_COLON => child.getElementType == tCOLON
case ALIGN_ON_TYPE => child.getElementType == ScalaElementType.PARAM_TYPE
case _ => false
}
if (addAlignmentToChild) {
val parameterClause = Option(PsiTreeUtil.getParentOfType(param, classOf[ScParameterClause], false))
val alignmentOpt = parameterClause.flatMap(cachedParameterTypeAnnotationAlignment)
alignmentOpt.getOrElse(sharedAlignment)
}
else sharedAlignment
case literal: ScInterpolatedStringLiteral if child.getElementType == tINTERPOLATED_STRING_END =>
cachedAlignment(literal).map(_.quotes).orNull
case _ =>
sharedAlignment
}
private def addScalaDocCommentSubBlocks(docCommentNode: ASTNode, subBlocks: util.ArrayList[Block]): Unit = {
val node = docCommentNode
val alignment = createAlignment(node)
var prevTagName : Option[String] = None
var lastTagContextAlignment: Alignment = Alignment.createAlignment(true)
for (child <- node.getChildren(null) if isNotEmptyDocNode(child)) {
val tagContextAlignment = child.getElementType match {
case ScalaDocElementTypes.DOC_TAG =>
val tagName = child.getFirstChildNode.withTreeNextNodes.find(_.getElementType == ScalaDocTokenType.DOC_TAG_NAME).map(_.getText)
if (prevTagName.isEmpty || prevTagName != tagName)
lastTagContextAlignment = Alignment.createAlignment(true)
prevTagName = tagName
Some(lastTagContextAlignment)
case _ => None
}
val context = tagContextAlignment.map(a => new SubBlocksContext(alignment = Some(a)))
subBlocks.add(subBlock(child, null, alignment, context = context))
}
}
private def addScalaDocTagSubBlocks(docTag: ScDocTag, subBlocks: util.ArrayList[Block]): Unit = {
import ScalaDocTokenType._
val children = docTag.getNode.getFirstChildNode.withTreeNextNodes.toList
val (childrenLeading, childrenFromNameElement) =
children.span(_.getElementType != ScalaDocTokenType.DOC_TAG_NAME)
/**
* tag can start not from name element, this can happen e.g. when asterisks
* is added in [[org.jetbrains.plugins.scala.lang.formatting.processors.ScalaDocNewlinedPreFormatProcessor]]
* also it can contain leading white space
*/
childrenLeading.foreach { c =>
if (isNotEmptyDocNode(c))
subBlocks.add(subBlock(c))
}
childrenFromNameElement match {
case tagName :: space :: tagParameter :: tail
if Option(docTag.getValueElement).exists(_.getNode == tagParameter) =>
subBlocks.add(subBlock(tagName))
subBlocks.add(subBlock(tagParameter, tail.lastOption.orNull))
case tagName :: tail =>
subBlocks.add(subBlock(tagName))
if (tail.nonEmpty) {
val (leadingAsterisks, other) = tail
.filter(isNotEmptyDocNode)
.span(_.getElementType == DOC_COMMENT_LEADING_ASTERISKS)
leadingAsterisks.foreach { c =>
subBlocks.add(subBlock(c))
}
if (other.nonEmpty)
subBlocks.add(subBlock(other.head, other.last))
}
case _ =>
}
}
private def getCaseClauseGroupSubBlocks(node: ASTNode): util.ArrayList[Block] = {
val children = node.getChildren(null).filter(isNotEmptyNode)
val subBlocks = new util.ArrayList[Block]
def getPrevGroupNode(nodePsi: PsiElement) = {
var prev = nodePsi.getPrevSibling
var breaks = 0
def isOk(psi: PsiElement): Boolean = psi match {
case _: ScCaseClause => true
case _: PsiComment => false
case _: PsiWhiteSpace =>
breaks += psi.getText.count(_ == '\\n')
false
case _ =>
breaks += 2
false
}
while (prev != null && breaks <= 1 && !isOk(prev)) {
prev = prev.getPrevSibling
}
if (breaks == 1 && prev != null) prev.getNode
else null
}
var prevChild: ASTNode = null
for (child <- children) {
val childAlignment = calcGtoupChildAlignment(node, child)(getPrevGroupNode)(FunctionTypeTokenSet)
subBlocks.add(subBlock(child, null, childAlignment))
prevChild = child
}
subBlocks
}
private def getFieldGroupSubBlocks(node: ASTNode): util.ArrayList[Block] = {
val children = node.getChildren(null).filter(isNotEmptyNode)
val subBlocks = new util.ArrayList[Block]
def getPrevGroupNode(nodePsi: PsiElement) = {
var prev = nodePsi.getPrevSibling
var breaks = 0
def isOk(psi: PsiElement): Boolean = psi match {
case ElementType(t) if t == tSEMICOLON =>
false
case _: ScVariableDeclaration | _: ScValueDeclaration if nodePsi.is[ScPatternDefinition, ScVariableDefinition] =>
breaks += 2
false
case _: ScVariableDefinition | _: ScPatternDefinition if nodePsi.is[ScValueDeclaration, ScValueDeclaration] =>
breaks += 2
false
case _: ScVariable | _: ScValue =>
def hasEmptyModifierList(psi: PsiElement): Boolean = psi match {
case mod: ScModifierListOwner if mod.getModifierList.getTextLength == 0 => true
case _ => false
}
if (hasEmptyModifierList(psi) != hasEmptyModifierList(nodePsi)) {
breaks += 2
false
} else {
true
}
case _: PsiComment =>
false
case _: PsiWhiteSpace =>
breaks += psi.getText.count(_ == '\\n')
false
case _ =>
breaks += 2
false
}
while (prev != null && breaks <= 1 && !isOk(prev)) {
prev = prev.getPrevSibling
}
if (breaks == 1 && prev != null) prev.getNode
else null
}
var prevChild: ASTNode = null
for (child <- children) {
//TODO process rare case of first-line comment before one of the fields for SCL-10000 here
val childAlignment = calcGtoupChildAlignment(node, child)(getPrevGroupNode)(FieldGroupSubBlocksTokenSet)
subBlocks.add(subBlock(child, null, childAlignment))
prevChild = child
}
subBlocks
}
@tailrec
private def calcGtoupChildAlignment(node: ASTNode, child: ASTNode)
(getPrevGroupNode: PsiElement => ASTNode)
(implicit tokenSet: TokenSet): Alignment = {
def createNewAlignment: Alignment = {
val alignment = Alignment.createAlignment(true)
child.getPsi.putUserData(fieldGroupAlignmentKey, alignment)
alignment
}
val prev = getPrevGroupNode(node.getPsi)
child.getElementType match {
case elementType if tokenSet.contains(elementType) =>
prev match {
case null => createNewAlignment
case _ =>
prev.findChildByType(elementType) match {
case null => calcGtoupChildAlignment(prev, child)(getPrevGroupNode)
case prevChild =>
val newAlignment = prevChild.getPsi.getUserData(fieldGroupAlignmentKey) match {
case null => createNewAlignment
case alignment => alignment
}
child.getPsi.putUserData(fieldGroupAlignmentKey, newAlignment)
newAlignment
}
}
case _ => null
}
}
private def getExtendsSubBlocks(node: ASTNode, extBlock: ScExtendsBlock): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val firstChild = extBlock.getFirstChild
if (firstChild == null) return subBlocks
val tempBody = extBlock.templateBody
val lastChild = tempBody.map(_.getPrevSiblingNotWhitespace).getOrElse(extBlock.getLastChild)
if (lastChild != null) {
val alignment =
if (ss.ALIGN_EXTENDS_WITH == ScalaCodeStyleSettings.ALIGN_TO_EXTENDS) Alignment.createAlignment(false)
else null
subBlocks.add(subBlock(firstChild.getNode, lastChild.getNode, alignment))
}
tempBody match {
case Some(x) =>
subBlocks.add(subBlock(x.getNode))
case _ =>
}
subBlocks
}
private def getForSubBlocks(node: ASTNode, children: Array[ASTNode]): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]()
var prevChild: ASTNode = null
def addTail(tail: List[ASTNode]): Unit = {
for (child <- tail) {
if (!isYieldOrDo(child))
if (prevChild != null && isYieldOrDo(prevChild))
subBlocks.add(subBlock(prevChild, child))
else
subBlocks.add(subBlock(child, null))
prevChild = child
}
if (prevChild != null && isYieldOrDo(prevChild)) {
//add a block for 'yield' in case of incomplete for statement (expression after yield is missing)
subBlocks.add(subBlock(prevChild, null))
}
}
@tailrec
def addFor(children: List[ASTNode]): Unit = children match {
case forWord :: tail if forWord.getElementType == kFOR =>
subBlocks.add(subBlock(forWord, null))
addFor(tail)
case lParen :: tail if LBRACE_LPARENT_TOKEN_SET.contains(lParen.getElementType) =>
val closingType =
if (lParen.getElementType == tLPARENTHESIS) tRPARENTHESIS
else tRBRACE
val afterClosingParent = tail.dropWhile(_.getElementType != closingType)
afterClosingParent match {
case Nil =>
addTail(children)
case rParent :: yieldNodes =>
val enumerators = tail.dropWhile(x => ScalaTokenTypes.COMMENTS_TOKEN_SET.contains(x.getElementType)).head
val context = if (commonSettings.ALIGN_MULTILINE_FOR && !enumerators.getPsi.startsFromNewLine()) {
val alignment = Alignment.createAlignment()
Some(SubBlocksContext(Map(rParent -> alignment, enumerators -> alignment)))
} else {
None
}
subBlocks.add(subBlock(lParen, rParent, context = context))
addTail(yieldNodes)
}
case _ =>
addTail(children)
}
addFor(children.filter(isNotEmptyNode).toList)
subBlocks
}
private def getIfSubBlocks(node: ASTNode, alignment: Alignment): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val firstChildFirstNode = node.getFirstChildNode // `if`
val firstChildLastNode = firstChildFirstNode
.treeNextNodes
.takeWhile(_.getElementType != kELSE)
.lastOption
.getOrElse(firstChildFirstNode)
val firstBlock = subBlock(firstChildFirstNode, firstChildLastNode, alignment)
subBlocks.add(firstBlock)
val elseFirstChild: ASTNode = {
val commentsHandled = firstChildLastNode
.treeNextNodes
.takeWhile(isComment)
.map { c =>
val next = c.getTreeNext
subBlocks.add(subBlock(c, next))
next
}
.lastOption
.getOrElse(firstChildLastNode)
commentsHandled.getTreeNext
}
if (elseFirstChild != null) {
val elseLastNode = elseFirstChild.treeNextNodes
.takeWhile(n => if (cs.SPECIAL_ELSE_IF_TREATMENT) n.getElementType != kIF else true)
.lastOption
.getOrElse(elseFirstChild)
subBlocks.add(subBlock(elseFirstChild, elseLastNode, alignment, Some(firstBlock.indent)))
val next = elseLastNode.getTreeNext
if (next != null && next.getElementType == kIF) {
subBlocks.addAll(getIfSubBlocks(next, alignment))
}
}
subBlocks
}
private def interpolatedRefLength(node: ASTNode): Int =
if (node.getElementType == tINTERPOLATED_MULTILINE_STRING) {
node.getPsi.getParent match {
case str: ScInterpolatedStringLiteral => str.referenceName.length
case _ => 0
}
} else 0
private def buildQuotesAndMarginAlignments: InterpolatedStringAlignments = {
val quotesAlignment = if (scalaSettings.MULTILINE_STRING_ALIGN_DANGLING_CLOSING_QUOTES) Alignment.createAlignment() else null
val marginAlignment = Alignment.createAlignment(true)
InterpolatedStringAlignments(quotesAlignment, marginAlignment)
}
private def getMultilineStringBlocks(node: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val interpolatedOpt = Option(PsiTreeUtil.getParentOfType(node.getPsi, classOf[ScInterpolatedStringLiteral]))
val InterpolatedStringAlignments(quotesAlignment, marginAlignment) =
interpolatedOpt
.flatMap(cachedAlignment)
.getOrElse(buildQuotesAndMarginAlignments)
val wrap = Wrap.createWrap(WrapType.NONE, true)
val marginChar = MultilineStringUtil.getMarginChar(node.getPsi)
val marginIndent = Indent.getSpaceIndent(ss.MULTILINE_STRING_MARGIN_INDENT + interpolatedRefLength(node), true)
def relativeRange(start: Int, end: Int, shift: Int = 0): TextRange =
TextRange.from(node.getStartOffset + shift + start, end - start)
val lines = node.getText.split("\\n")
var acc = 0
lines.foreach { line =>
val trimmedLine = line.trim()
val lineLength = line.length
val linePrefixLength = if (settings.useTabCharacter(ScalaFileType.INSTANCE)) {
val tabsCount = line.segmentLength(_ == '\\t')
tabsCount + line.substring(tabsCount).segmentLength(_ == ' ')
} else {
line.segmentLength(_ == ' ')
}
if (trimmedLine.startsWith(marginChar)) {
val marginRange = relativeRange(linePrefixLength, linePrefixLength + 1, acc)
subBlocks.add(new StringLineScalaBlock(marginRange, node, block, marginAlignment, marginIndent, null, settings))
val contentRange = relativeRange(linePrefixLength + 1, lineLength, acc)
subBlocks.add(new StringLineScalaBlock(contentRange, node, block, null, Indent.getNoneIndent, wrap, settings))
} else if (trimmedLine.nonEmpty) {
val (range, myIndent, myAlignment) =
if (trimmedLine.startsWith(MultilineQuotes)) {
if (acc == 0) {
val hasMarginOnFirstLine = trimmedLine.charAt(MultilineQuotes.length.min(trimmedLine.length - 1)) == '|'
if (hasMarginOnFirstLine && lineLength > 3) {
val range = relativeRange(0, 3)
val marginBlock = new StringLineScalaBlock(range, node, block, quotesAlignment, Indent.getNoneIndent, null, settings)
subBlocks.add(marginBlock)
//now, return block parameters for text after the opening quotes
(relativeRange(3, lineLength), Indent.getNoneIndent, marginAlignment)
} else {
(relativeRange(linePrefixLength, lineLength), Indent.getNoneIndent, quotesAlignment)
}
} else {
(relativeRange(linePrefixLength, lineLength, acc), Indent.getNoneIndent, quotesAlignment)
}
} else {
(relativeRange(0, lineLength, acc), Indent.getAbsoluteNoneIndent, null)
}
subBlocks.add(new StringLineScalaBlock(range, node, block, myAlignment, myIndent, null, settings))
}
acc += lineLength + 1
}
subBlocks
}
private def getInfixBlocks(node: ASTNode, parentAlignment: Alignment = null): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val children = node.getChildren(null)
val alignment =
if (parentAlignment != null) parentAlignment
else createAlignment(node)
for (child <- children) {
if (InfixElementsTokenSet.contains(child.getElementType) && infixPriority(node) == infixPriority(child)) {
subBlocks.addAll(getInfixBlocks(child, alignment))
} else if (isNotEmptyNode(child)) {
subBlocks.add(subBlock(child, null, alignment))
}
}
subBlocks
}
private def infixPriority(node: ASTNode): Int = node.getPsi match {
case inf: ScInfixExpr => ParserUtils.priority(inf.operation.getText, assignments = true)
case inf: ScInfixPattern => ParserUtils.priority(inf.operation.getText, assignments = false)
case inf: ScInfixTypeElement => ParserUtils.priority(inf.operation.getText, assignments = false)
case _ => 0
}
private def getMethodCallOrRefExprSubBlocks(node: ASTNode): util.ArrayList[Block] = {
val dotAlignment = if (cs.ALIGN_MULTILINE_CHAINED_METHODS) Alignment.createAlignment() else null
val dotWrap = block.suggestedWrap
val result = new util.ArrayList[Block]
@scala.annotation.tailrec
def collectChainedMethodCalls(
node: ASTNode,
dotFollowedByNewLine: Boolean = false,
delegatedChildren: List[ASTNode] = List(),
delegatedContext: Map[ASTNode, SubBlocksContext] = Map(),
): Unit = {
node.getPsi match {
case _: ScLiteral | _: ScBlockExpr=>
result.add(subBlock(node, null))
for (child <- delegatedChildren.filter(isNotEmptyNode)) {
result.add(subBlock(child, null))
}
return
case _ =>
}
val alignment = createAlignment(node)
val childrenAll = node.getChildren(null).filter(isNotEmptyNode).toList
/**
* some edge cases with comments in the middle of a method call: {{{
* Seq(1) // comment
* .map(_ * 2)
*
* foo // comment
* {}
* }}}
*/
val (comments, children) = childrenAll.partition(isComment)
//don't check for element types other then absolutely required - they do not matter
children match {
// foo(1, 2, 3)
case caller :: args :: Nil if args.getPsi.is[ScArgumentExprList] =>
collectChainedMethodCalls(
caller, dotFollowedByNewLine,
childrenAll.filter(it => !(it eq caller)) ::: delegatedChildren
)
// obj.foo
case expr :: dot :: id :: Nil if dot.getElementType == tDOT =>
// delegatedChildren can be args or typeArgs
val idAdditionalNodes = {
// using Set we imply that ASTNode equals and hashCode methods are lightweight (default implementation)
val filterOutNodes = delegatedContext.values.flatMap(_.additionalNodes).toSet
sorted(delegatedChildren.filterNot(filterOutNodes.contains))
}
val context = SubBlocksContext(id, idAdditionalNodes, Some(dotAlignment), delegatedContext)
result.add(subBlock(dot, lastNode(id :: delegatedChildren), dotAlignment, wrap = Some(dotWrap), context = Some(context)))
assert(childrenAll.head.eq(expr), "assuming that first child is expr and comments can't go before it")
val commentsBeforeDot = childrenAll.tail.takeWhile(isComment)
commentsBeforeDot.foreach { comment =>
val commentAlign = if (comment.getPsi.startsFromNewLine()) dotAlignment else null
result.add(subBlock(comment, comment, commentAlign, wrap = Some(dotWrap)))
}
val dotFollowedByNewLine = dot.getPsi.followedByNewLine()
collectChainedMethodCalls(expr, dotFollowedByNewLine)
// foo[String]
case expr :: typeArgs :: Nil if typeArgs.getPsi.is[ScTypeArgs] =>
if (expr.getChildren(null).length == 1) {
val actualAlignment = if (dotFollowedByNewLine) dotAlignment else alignment
val context = SubBlocksContext(typeArgs, sorted(delegatedChildren))
result.add(subBlock(expr, lastNode(typeArgs :: delegatedChildren), actualAlignment, context = Some(context)))
} else {
collectChainedMethodCalls(
expr, dotFollowedByNewLine,
typeArgs :: delegatedChildren ++ comments,
Map(typeArgs -> new SubBlocksContext(sorted(delegatedChildren)))
)
}
case expr :: Nil =>
val actualAlignment = if (dotFollowedByNewLine) dotAlignment else alignment
val context = SubBlocksContext(expr, delegatedChildren)
result.add(subBlock(expr, lastNode(delegatedChildren), actualAlignment, context = Some(context)))
case _ =>
val childrenWithDelegated = children ++ delegatedChildren
for (child <- childrenWithDelegated.filter(isNotEmptyNode)) {
result.add(subBlock(child, null))
}
}
}
collectChainedMethodCalls(node)
// we need to sort blocks because we add them in wrong order to make inner method tail recursive
util.Collections.sort(result, util.Comparator.comparingInt[Block](_.getTextRange.getStartOffset))
result
}
@inline
private def lastNode(nodes: Seq[ASTNode]): ASTNode = sorted(nodes).lastOption.orNull
@inline
private def sorted(nodes: Seq[ASTNode]): Seq[ASTNode] = nodes.sortBy(_.getTextRange.getStartOffset)
@inline
private def isComment(node: ASTNode) = COMMENTS_TOKEN_SET.contains(node.getElementType)
private def createAlignment(node: ASTNode): Alignment = {
import commonSettings._
import Alignment.{createAlignment => create}
node.getPsi match {
case _: ScXmlStartTag => create //todo:
case _: ScXmlEmptyTag => create //todo:
case _: ScParameters if ALIGN_MULTILINE_PARAMETERS => create
case _: ScParameterClause if ALIGN_MULTILINE_PARAMETERS => create
case _: ScArgumentExprList if ALIGN_MULTILINE_PARAMETERS_IN_CALLS => create
case _: ScPatternArgumentList if ALIGN_MULTILINE_PARAMETERS_IN_CALLS => create
case _: ScEnumerators if ALIGN_MULTILINE_FOR => create
case _: ScParenthesisedExpr if ALIGN_MULTILINE_PARENTHESIZED_EXPRESSION => create
case _: ScParenthesisedTypeElement if ALIGN_MULTILINE_PARENTHESIZED_EXPRESSION => create
case _: ScParenthesisedPattern if ALIGN_MULTILINE_PARENTHESIZED_EXPRESSION => create
case _: ScInfixExpr if ALIGN_MULTILINE_BINARY_OPERATION => create
case _: ScInfixPattern if ALIGN_MULTILINE_BINARY_OPERATION => create
case _: ScInfixTypeElement if ALIGN_MULTILINE_BINARY_OPERATION => create
case _: ScCompositePattern if ss.ALIGN_COMPOSITE_PATTERN => create
case _: ScMethodCall |
_: ScReferenceExpression |
_: ScThisReference |
_: ScSuperReference if ALIGN_MULTILINE_CHAINED_METHODS => create
case _: ScDocListItem if ss.SD_ALIGN_LIST_ITEM_CONTENT => create(true)
case _ => null
}
}
private def applyInner(node: ASTNode, lastNode: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
def childBlock(child: ASTNode): ScalaBlock = {
val lastNode = block.getChildBlockLastNode(child)
val alignment = block.getCustomAlignment(child).orNull
val context = block.subBlocksContext.flatMap(_.childrenAdditionalContexts.get(child))
subBlock(child, lastNode, alignment, context = context)
}
var child: ASTNode = node
do {
if (isNotEmptyNode(child)) {
if (child.getPsi.isInstanceOf[ScTemplateParents]) {
subBlocks.addAll(getTemplateParentsBlocks(child))
} else {
subBlocks.add(childBlock(child))
}
}
} while (child != lastNode && {
child = child.getTreeNext
child != null
})
//it is not used right now, but could come in handy later
for {
context <- block.subBlocksContext
additionalNode <- context.additionalNodes
} {
subBlocks.add(childBlock(additionalNode))
}
subBlocks
}
private def isScalaDocNode(node: ASTNode): Boolean = {
def isInsideIncompleteScalaDocTag = {
val parent = node.getTreeParent
parent!= null && parent.getElementType == ScalaDocElementTypes.DOC_TAG &&
node.getPsi.isInstanceOf[PsiErrorElement]
}
node.getElementType == ScalaDocElementTypes.SCALA_DOC_COMMENT ||
ScalaDocElementTypes.AllElementAndTokenTypes.contains(node.getElementType) ||
isInsideIncompleteScalaDocTag
}
private def applyInnerScaladoc(node: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val nodePsi = node.getPsi
nodePsi match {
case _: ScDocComment => addScalaDocCommentSubBlocks(node, subBlocks)
case docTag: ScDocTag => addScalaDocTagSubBlocks(docTag, subBlocks)
case _ =>
val sharedAlignment = createAlignment(node)
val children = node.getChildren(null)
for (child <- children if isNotEmptyDocNode(child)) {
val childAlignment = calcChildAlignment(node, child, sharedAlignment)
subBlocks.add(subBlock(child, null, childAlignment))
}
}
subBlocks
}
private def applyInnerScaladoc(node: ASTNode, lastNode: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
val parent = node.getTreeParent
var scaladocNode = node.getElementType match {
case ScalaDocTokenType.DOC_TAG_VALUE_TOKEN =>
subBlocks.add(subBlock(node, indent = Some(Indent.getNoneIndent)))
node.getTreeNext
case _ =>
node
}
val children = ArrayBuffer[ASTNode]()
do {
if (needFlattenDocElementChildren(scaladocNode)) {
flattenChildren(scaladocNode, children)
} else {
children += scaladocNode
}
} while (scaladocNode != lastNode && { scaladocNode = scaladocNode.getTreeNext; true })
val normalAlignment =
block.parentBlock.subBlocksContext.flatMap(_.alignment)
.getOrElse(Alignment.createAlignment(true))
children.view.filter(isNotEmptyDocNode).foreach { child =>
import ScalaDocTokenType._
val childType = child.getElementType
val isDataInsideDocTag: Boolean =
parent.getElementType == ScalaDocElementTypes.DOC_TAG && (childType match {
case DOC_WHITESPACE | DOC_COMMENT_LEADING_ASTERISKS | DOC_TAG_NAME => false
case _ => true
})
val (childAlignment, childWrap) =
if (isDataInsideDocTag) {
val tagElement = parent.getPsi.asInstanceOf[ScDocTag]
val tagNameElement = tagElement.getNameElement
val tagName = tagNameElement.getText
val alignment = childType match {
case DOC_INNER_CODE |
DOC_INNER_CLOSE_CODE_TAG |
DOC_INNER_CODE_TAG |
ScalaDocElementTypes.DOC_LIST => null
case _ =>
tagName match {
case "@param" | "@tparam" => if (ss.SD_ALIGN_PARAMETERS_COMMENTS) normalAlignment else null
case "@return" => if (ss.SD_ALIGN_RETURN_COMMENTS) normalAlignment else null
case "@throws" => if (ss.SD_ALIGN_EXCEPTION_COMMENTS) normalAlignment else null
case _ => if (ss.SD_ALIGN_OTHER_TAGS_COMMENTS) normalAlignment else null
}
}
val noWrap = Wrap.createWrap(WrapType.NONE, false)
(alignment, noWrap)
} else {
(null, arrangeSuggestedWrapForChild(block, child, block.suggestedWrap))
}
subBlocks.add(subBlock(child, null, childAlignment, wrap = Some(childWrap)))
}
subBlocks
}
private def needFlattenDocElementChildren(node: ASTNode): Boolean = {
val check1 = node.getElementType match {
case ScalaDocElementTypes.DOC_PARAGRAPH => true
case ScalaDocElementTypes.DOC_LIST => false
case _ => node.textContains('\\n')
}
check1 && node.getFirstChildNode != null
}
private def flattenChildren(multilineNode: ASTNode, buffer: ArrayBuffer[ASTNode]): Unit =
for (nodeChild <- multilineNode.getChildren(null))
if (needFlattenDocElementChildren(nodeChild))
flattenChildren(nodeChild, buffer)
else
buffer += nodeChild
private def getTemplateParentsBlocks(node: ASTNode): util.ArrayList[Block] = {
val subBlocks = new util.ArrayList[Block]
import ScalaCodeStyleSettings._
val alignSetting = ss.ALIGN_EXTENDS_WITH
val alignment =
if (alignSetting == ALIGN_TO_EXTENDS) block.getAlignment
else Alignment.createAlignment(true)
val children = node.getChildren(null)
for (child <- children if isNotEmptyNode(child)) {
val actualAlignment = (child.getElementType, alignSetting) match {
case (_, DO_NOT_ALIGN) => null
case (`kWITH` | `kEXTENDS`, ON_FIRST_ANCESTOR) => null
case _ => alignment
}
val lastNode = block.getChildBlockLastNode(child)
val context = block.subBlocksContext.flatMap(_.childrenAdditionalContexts.get(child))
subBlocks.add(subBlock(child, lastNode, actualAlignment, context = context))
}
subBlocks
}
private def subBlock(node: ASTNode,
lastNode: ASTNode = null,
alignment: Alignment = null,
indent: Option[Indent] = None,
wrap: Option[Wrap] = None,
context: Option[SubBlocksContext] = None): ScalaBlock = {
val indentFinal = indent.getOrElse(ScalaIndentProcessor.getChildIndent(block, node))
val wrapFinal = wrap.getOrElse(arrangeSuggestedWrapForChild(block, node, block.suggestedWrap))
new ScalaBlock(block, node, lastNode, alignment, indentFinal, wrapFinal, settings, context)
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/formatting/getDummyBlocks.scala | Scala | apache-2.0 | 43,653 |
package skuber
/**
* @author David O'Riordan
*/
package object settings {
type PodPresetList=ListResource[PodPreset]
}
| doriordan/skuber | client/src/main/scala/skuber/settings/package.scala | Scala | apache-2.0 | 126 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.CATO04
import uk.gov.hmrc.ct.box.{CtBigDecimal, CtBoxIdentifier, Linked}
case class B64(value: BigDecimal) extends CtBoxIdentifier("Marginal Rate Relief") with CtBigDecimal
object B64 extends Linked[CATO04, B64] {
override def apply(source: CATO04): B64 = B64(source.value)
}
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B64.scala | Scala | apache-2.0 | 941 |
package coursier.cli.jvm
import caseapp._
// format: off
final case class SharedJavaOptions(
@Group("Java")
jvm: Option[String] = None,
@Group("Java")
@Hidden
systemJvm: Option[Boolean] = None,
@Group("Java")
@Hidden
update: Boolean = false,
@Group("Java")
jvmIndex: Option[String] = None
)
// format: on
| alexarchambault/coursier | modules/cli/src/main/scala/coursier/cli/jvm/SharedJavaOptions.scala | Scala | apache-2.0 | 339 |
package org.dita.dost.module
import scala.collection.JavaConversions._
import java.io.File
import org.dita.dost.util.FileUtils
class XHTML(ditaDir: File) extends XHTMLBase(ditaDir) {
$("ant.file.dita2xhtml") = new File("plugins/org.dita.xhtml/build_dita2xhtml.xml")
override val transtype = "xhtml"
// private def dita2html5Init() {
// logger.info("dita2html5.init:")
// $("html-version") = "html5"
// }
//
// def dita2html5() {
// logger.info("dita2html5:")
// dita2html5Init()
// buildInit()
// preprocess()
// xhtmlTopics()
// ditaMapXhtml()
// copyCss()
// }
def dita2xhtmlInit() {
logger.info("dita2xhtml.init:")
if (!$.contains("html-version")) {
$("html-version") = "xhtml"
}
}
override def run() {
dita2xhtmlInit()
buildInit()
preprocess()
xhtmlTopics()
ditaMapXhtml()
copyCss()
}
def ditaMapXhtml() {
logger.info("dita.map.xhtml:")
ditaMapXhtmlInit()
ditaMapXhtmlToc()
}
def ditaMapXhtmlInit() {
if (job.getFileInfo.find(_.format == "ditamap").isEmpty) {
return
}
logger.info("dita.map.xhtml.init:")
if (!$.contains("args.xhtml.toc.xsl")) {
$("args.xhtml.toc.xsl") = $("dita.plugin.org.dita.xhtml.dir") + "/xsl/map2" + $("html-version") + "toc.xsl"
}
if (!$.contains("args.xhtml.toc")) {
$("args.xhtml.toc") = "index"
}
}
/** Build HTML TOC file */
def ditaMapXhtmlToc() {
if (job.getFileInfo.find(_.format == "ditamap").isEmpty) {
return
}
logger.info("dita.map.xhtml.toc:")
val templates = compileTemplates(new File($("args.xhtml.toc.xsl")))
val transformer = templates.newTransformer()
transformer.setParameter("OUTEXT", $("out.ext"))
if ($.contains("args.xhtml.contenttarget")) {
transformer.setParameter("contenttarget", $("args.xhtml.contenttarget"))
}
if ($.contains("args.css.file")) {
transformer.setParameter("CSS", $("args.css.file"))
}
if ($.contains("user.csspath")) {
transformer.setParameter("CSSPATH", $("user.csspath"))
}
if ($.contains("args.xhtml.toc.class")) {
transformer.setParameter("OUTPUTCLASS", $("args.xhtml.toc.class"))
}
val l = new File(job.getInputMap)
val inFile = new File(ditaTempDir, l.getPath)
val outDir = if (oldTransform) {
new File(outputDir, l.getPath).getAbsoluteFile.getParent
} else {
new File(outputDir, l.getPath).getAbsoluteFile.getParent + File.separator + job.getProperty("uplevels")
}
val outFile = new File(outDir, $("args.xhtml.toc") + $("out.ext")).getCanonicalFile
if (!outFile.getParentFile.exists) {
outFile.getParentFile.mkdirs()
}
val source = getSource(inFile)
val result = getResult(outFile)
logger.info("Processing " + inFile + " to " + outFile)
transformer.transform(source, result)
}
def copyRevflag() {
if (!$.contains("dita.input.valfile")) {
return
}
logger.info("copy-revflag:")
logger.info(get_msg("DOTA069W"))
}
/** Copy CSS files */
def copyCss() {
if ($.contains("user.csspath.url")) {
return
}
logger.info("copy-css:")
val userCsspathReal = new File(outputDir, $("user.csspath"))
if (!userCsspathReal.exists) {
userCsspathReal.mkdirs()
}
copy(new File($("dita.plugin.org.dita.xhtml.dir"), "resource"),
userCsspathReal,
Set("*.css"))
if ($("args.copycss") == "yes" && $.contains("args.css.present")) {
FileUtils.copyFile(new File($("args.css.real")), new File(userCsspathReal, new File($("args.css.real")).getName))
}
}
}
| jelovirt/muuntaja | src/main/scala/org/dita/dost/module/XHTML.scala | Scala | apache-2.0 | 3,626 |
package com.giyeok.jparser.nparser
import com.giyeok.jparser.Inputs.Character
import com.giyeok.jparser.ParseResultTree.{BindNode, JoinNode, TerminalNode}
import com.giyeok.jparser.{ParseResultTree, Symbols}
import org.scalatest.matchers.{MatchResult, Matcher}
object ParseTreeMatchers {
sealed abstract class TreeMatcher extends Matcher[ParseResultTree.Node]
case class TermM(expectedChar: Char) extends TreeMatcher {
override def apply(left: ParseResultTree.Node): MatchResult = left match {
case TerminalNode(_, Character(actualChar)) if actualChar == expectedChar =>
MatchResult(matches = true, "", "")
case _ =>
MatchResult(matches = false, s"Term($expectedChar) did not match to $left", "error")
}
}
case class BindM(expectedSymbol: Option[Symbols.Symbol], expectedBody: TreeMatcher) extends TreeMatcher {
override def apply(left: ParseResultTree.Node): MatchResult = left match {
case BindNode(actualSymbol, actualBody) if expectedSymbol.isEmpty || actualSymbol.symbol == expectedSymbol.get =>
expectedBody(actualBody)
case BindNode(actualSymbol, _) => MatchResult(matches = false,
s"Bind did not match, expected=${expectedSymbol.get.toShortString}, actual=${actualSymbol.symbol.toShortString}",
"")
case actual => MatchResult(matches = false,
s"Bind did not match, expected=${expectedSymbol.get.toShortString}, actual=$actual",
"")
}
}
object BindM {
def apply(expectedSymbol: Symbols.Symbol, expectedBody: TreeMatcher): BindM =
new BindM(Some(expectedSymbol), expectedBody)
def apply(expectedBody: TreeMatcher): BindM =
new BindM(None, expectedBody)
}
case class JoinM(expectedBody: TreeMatcher, expectedJoin: TreeMatcher) extends TreeMatcher {
override def apply(left: ParseResultTree.Node): MatchResult = left match {
case JoinNode(_, actualBody, actualJoin)
if expectedBody(actualBody).matches && expectedJoin(actualJoin).matches =>
MatchResult(matches = true, "", "")
case _ => MatchResult(matches = false, "join not matched", "join not matched")
}
}
case class SeqM(expected: List[TreeMatcher]) extends TreeMatcher {
override def apply(left: ParseResultTree.Node): MatchResult = left match {
case actual: ParseResultTree.SequenceNode if actual.children.size != expected.size => MatchResult(matches = false,
s"Seq match failed, expectedLength=${expected.size}, actualLength=${actual.children.size}",
"")
case actual: ParseResultTree.SequenceNode =>
val matches = actual.children.zip(expected).map(pair => pair._2(pair._1)).zipWithIndex
val failed = matches.filter(!_._1.matches)
MatchResult(matches = failed.isEmpty,
s"Seq match failed, mismatched at ${failed.map(_._1.failureMessage)}",
"")
case actual => MatchResult(matches = false,
s"Seq match failed, actual=$actual",
"")
}
}
object SeqM {
def apply(expected: TreeMatcher*): SeqM = new SeqM(expected.toList)
}
object DontCare extends TreeMatcher {
override def apply(left: ParseResultTree.Node): MatchResult = MatchResult(matches = true, "", "")
}
}
| Joonsoo/moon-parser | naive/src/test/scala/com/giyeok/jparser/nparser/ParseTreeMatchers.scala | Scala | mit | 3,228 |
package org.jetbrains.plugins.scala.testingSupport.locationProvider
import com.intellij.execution.PsiLocation
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
case class PsiLocationWithName[T <: PsiElement](
project: Project,
element: T,
name: String
) extends PsiLocation[T](project, element)
object PsiLocationWithName {
def apply[T <: PsiElement](element: T, name: String): PsiLocationWithName[T] =
new PsiLocationWithName(element.getProject, element, name)
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/testingSupport/locationProvider/PsiLocationWithName.scala | Scala | apache-2.0 | 507 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.fetch
import io.gatling.BaseSpec
import io.gatling.http.fetch.InferredResourceNaming._
import org.asynchttpclient.uri.Uri
class InferredResourceNamingSpec extends BaseSpec {
"UrlTrailInferredResourceNaming" should "return the url trail, query included" in {
UrlTrailInferredResourceNaming(Uri.create("http://foo.com/bar?baz=qic")) shouldBe "bar?baz=qic"
}
"AbsoluteUrlInferredResourceNaming" should "return the absolute url, query included" in {
AbsoluteUrlInferredResourceNaming(Uri.create("http://foo.com/bar?baz=qic")) shouldBe "http://foo.com/bar?baz=qic"
}
"RelativeUrlInferredResourceNaming" should "return the relative url, query included" in {
RelativeUrlInferredResourceNaming(Uri.create("http://foo.com/bar?baz=qic")) shouldBe "/bar?baz=qic"
}
"PathInferredResourceNaming" should "return full path" in {
PathInferredResourceNaming(Uri.create("http://foo.com/bar")) shouldBe "/bar"
}
it should "ignore query" in {
PathInferredResourceNaming(Uri.create("http://foo.com/bar?baz=qic")) shouldBe "/bar"
}
it should "not drop trailing /" in {
PathInferredResourceNaming(Uri.create("http://foo.com/bar/")) shouldBe "/bar/"
}
"LastPathElementInferredResourceNaming" should "return last path element" in {
LastPathElementInferredResourceNaming(Uri.create("http://foo.com/bla/foo.png?bar=baz")) shouldBe "foo.png"
}
it should "handle empty path" in {
LastPathElementInferredResourceNaming(Uri.create("http://foo.com")) shouldBe "/"
}
it should "handle root path" in {
LastPathElementInferredResourceNaming(Uri.create("http://foo.com/")) shouldBe "/"
}
it should "handle directory" in {
LastPathElementInferredResourceNaming(Uri.create("http://foo.com/bar/")) shouldBe "bar/"
}
it should "handle sub directory" in {
LastPathElementInferredResourceNaming(Uri.create("http://foo.com/bar/baz/")) shouldBe "baz/"
}
}
| wiacekm/gatling | gatling-http/src/test/scala/io/gatling/http/fetch/InferredResourceNamingSpec.scala | Scala | apache-2.0 | 2,556 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import akka.actor.Cancellable
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.KMDeleteTopicFeature
import kafka.manager.model._
import kafka.manager.utils.CuratorAwareTest
import kafka.manager.model.ActorModel.{KafkaManagedConsumer, TopicList, ZKManagedConsumer}
import kafka.test.{HighLevelConsumer, NewKafkaManagedConsumer, SeededBroker, SimpleProducer}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
class TestKafkaManager extends CuratorAwareTest with BaseTest {
private[this] val seededTopic = "km-api-test"
private[this] val broker = new SeededBroker(seededTopic,4)
private[this] val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] val akkaConfig: Properties = new Properties()
akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
akkaConfig.setProperty("cmak.zkhosts",testServer.getConnectString)
akkaConfig.setProperty("cmak.broker-view-update-seconds","1")
akkaConfig.setProperty("cmak.kafka-manager-update-seconds","1")
akkaConfig.setProperty("cmak.delete-cluster-update-seconds","1")
akkaConfig.setProperty("cmak.consumer.properties.file","conf/consumer.properties")
private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
private[this] val kafkaManager : KafkaManager = new KafkaManager(config)
private[this] val duration = FiniteDuration(10,SECONDS)
private[this] val createTopicNameA = "km-unit-test-a"
private[this] val createTopicNameB = "km-unit-test-b"
private[this] val createLogkafkaLogkafkaId = "km-unit-test-logkafka-logkafka_id"
private[this] val createLogkafkaLogPath = "/km-unit-test-logkafka-logpath"
private[this] val createLogkafkaTopic = "km-unit-test-logkafka-topic"
private[this] var hlConsumer : Option[HighLevelConsumer] = None
private[this] var hlConsumerThread : Option[Thread] = None
private[this] val hlShutdown = new AtomicBoolean(false)
private[this] var newConsumer : Option[NewKafkaManagedConsumer] = None
private[this] var newConsumerThread : Option[Thread] = None
private[this] val newShutdown = new AtomicBoolean(false)
private[this] var simpleProducer : Option[SimpleProducer] = None
private[this] var simpleProducerThread : Option[Thread] = None
override protected def beforeAll() : Unit = {
super.beforeAll()
hlConsumer = Option(broker.getHighLevelConsumer)
hlConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!hlShutdown.get()) {
hlConsumer.map(_.read { ba => {
println(s"Read ba: $ba")
Option(ba).map(asString).foreach( s => println(s"hl consumer read message : $s"))
}
})
Thread.sleep(500)
}
}
})
hlConsumerThread.foreach(_.start())
newConsumer = Option(broker.getNewConsumer)
newConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!newShutdown.get()) {
newConsumer.map(_.read { message =>
Option(message).foreach( s => println(s"new consumer read message : $s"))
})
Thread.sleep(500)
}
}
})
newConsumerThread.foreach(_.start())
simpleProducer = Option(broker.getSimpleProducer)
simpleProducerThread = Option(new Thread() {
override def run(): Unit = {
var count = 0
while(!hlShutdown.get()) {
simpleProducer.foreach { p =>
p.send(s"simple message $count", null)
count+=1
Thread.sleep(500)
}
}
}
})
simpleProducerThread.foreach(_.start())
Thread.sleep(1000)
//val future = kafkaManager.addCluster("dev","1.1.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(kafkaManager.defaultTuning), securityProtocol = "PLAINTEXT", saslMechanism = "PLAIN", jaasConfig = None)
//val result = Await.result(future,duration)
//assert(result.isRight === true)
//Thread.sleep(2000)
}
override protected def afterAll(): Unit = {
Try(newShutdown.set(true))
Try(hlShutdown.set(true))
Try(simpleProducerThread.foreach(_.interrupt()))
Try(hlConsumerThread.foreach(_.interrupt()))
Try(hlConsumer.foreach(_.close()))
Try(newConsumerThread.foreach(_.interrupt()))
Try(newConsumer.foreach(_.close()))
if(kafkaManager!=null) {
kafkaManager.shutdown()
}
Try(broker.shutdown())
super.afterAll()
}
private[this] def getTopicList() : TopicList = {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
result.toOption.get
}
test("add cluster") {
val future = kafkaManager.addCluster("dev","2.4.1",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(kafkaManager.defaultTuning), securityProtocol="PLAINTEXT", saslMechanism = None, jaasConfig = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("create topic") {
val futureA = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val resultA = Await.result(futureA,duration)
val futureB = kafkaManager.createTopic("dev",createTopicNameB,4,1)
val resultB = Await.result(futureB,duration)
assert(resultA.isRight === true)
assert(resultB.isRight === true)
Thread.sleep(2000)
}
test("fail to create topic again") {
val future = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val result = Await.result(future,duration)
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get topic list") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
}
test("query request for invalid cluster") {
val future = kafkaManager.getTopicList("blah")
val result = Await.result(future,duration)
assert(result.isLeft === true)
assert(result.swap.toOption.get.msg.contains("blah") === true)
}
test("get broker list") {
val future = kafkaManager.getBrokerList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.nonEmpty === true)
}
test("get topic identity") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { topic =>
val future2 = kafkaManager.getTopicIdentity("dev",topic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
//seeded topic should have offsets
val future2 = kafkaManager.getTopicIdentity("dev",seededTopic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
assert(result2.toOption.get.summedTopicOffsets >= 0)
}
test("get cluster list") {
val future = kafkaManager.getClusterList
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.active.nonEmpty === true)
}
test("get cluster view") {
val future = kafkaManager.getClusterView("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster config") {
val future = kafkaManager.getClusterConfig("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster context") {
val future = kafkaManager.getClusterContext("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterFeatures.features(KMDeleteTopicFeature))
}
test("get consumer list passive mode") {
//Thread.sleep(2000)
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.map(_._1).contains((newConsumer.get.groupId, KafkaManagedConsumer)), s"Failed : ${result}")
//TODO: fix high level consumer test
//assert(result.toOption.get.list.map(_._1).contains((hlConsumer.get.groupId, KafkaManagedConsumer)), s"Failed : ${result}")
}
/*test("get consumer identity passive mode for old consumer") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId, "ZK")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}*/
test("get consumer identity passive mode for new consumer") {
val future = kafkaManager.getConsumerIdentity("dev", newConsumer.get.groupId, "KF")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("run preferred leader election") {
val topicList = getTopicList()
val future = kafkaManager.runPreferredLeaderElection("dev",topicList.list.toSet)
val result = Await.result(future,duration)
//TODO: this is a failure since there is nothing to do, need a better test
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get preferred leader election") {
val future = kafkaManager.getPreferredLeaderElection("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
println(result.toOption.get)
}
test("schedule preferred leader election") {
val topicList = getTopicList()
kafkaManager.schedulePreferredLeaderElection("dev",topicList.list.toSet, 1)
assert(
kafkaManager.pleCancellable.contains("dev"),
"Scheduler not being persisted against the cluster name in KafkaManager instance. Is the task even getting scheduled?"
)
assert(
kafkaManager.pleCancellable("dev")._1.isInstanceOf[Option[Cancellable]],
"Some(system.scheduler.schedule) instance not being stored in KafkaManager instance. This is required for cancelling."
)
}
test("cancel scheduled preferred leader election") {
// For cancelling it is necessary for the task to be scheduled
if(!(kafkaManager.pleCancellable.contains("dev") && kafkaManager.pleCancellable("dev")._1.isInstanceOf[Option[Cancellable]])){
kafkaManager.schedulePreferredLeaderElection("dev",getTopicList().list.toSet, 1)
}
kafkaManager.cancelPreferredLeaderElection("dev")
assert(
!kafkaManager.pleCancellable.contains("dev"),
"Scheduler cluster name is not being removed from KafkaManager instance. Is the task even getting cancelled?"
)
}
test("generate partition assignments") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev",topicList.list.toSet,Set(0))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("generate partition assignments with replication factor") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev", topicList.list.toSet, Set(0), Some(1))
val result = Await.result(future, duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("fail to generate partition assignments with replication factor larger than available brokers") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev", topicList.list.toSet, Set(0), Some(2))
val result = Await.result(future, duration)
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("run reassign partitions") {
val topicList = getTopicList()
val future = kafkaManager.runReassignPartitions("dev",topicList.list.toSet)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get reassign partitions") {
val future = kafkaManager.getReassignPartitions("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("add topic partitions") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val future = kafkaManager.addTopicPartitions("dev",createTopicNameA,Seq(0),ti.partitions + 1,ti.readVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new partition num
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.partitions === 5)
}
}
test("add multiple topics partitions") {
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
val newPartitionNum = tiA.partitions + 1
val future = kafkaManager.addMultipleTopicsPartitions("dev",Seq(createTopicNameA, createTopicNameB),Set(0),newPartitionNum,Map(createTopicNameA->tiA.readVersion,createTopicNameB->tiB.readVersion))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
{
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
assert(tiA.partitions === newPartitionNum)
assert(tiB.partitions === newPartitionNum)
}
}
test("update topic config") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.zero82.LogConfig.RententionMsProp,"1800000")
val configReadVersion = ti.configReadVersion
val future = kafkaManager.updateTopicConfig("dev",createTopicNameA,config,configReadVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new topic config
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.configReadVersion > configReadVersion)
assert(ti.config.toMap.apply(kafka.manager.utils.zero82.LogConfig.RententionMsProp) === "1800000")
}
}
test("delete topic") {
val futureA = kafkaManager.deleteTopic("dev",createTopicNameA)
val resultA = Await.result(futureA,duration)
assert(resultA.isRight === true, resultA)
Thread.sleep(2000)
val futureA2 = kafkaManager.getTopicList("dev")
val resultA2 = Await.result(futureA2,duration)
assert(resultA2.isRight === true, resultA2)
assert(!resultA2.toOption.get.list.contains(createTopicNameA),"Topic not deleted")
val futureB = kafkaManager.deleteTopic("dev",createTopicNameB)
val resultB = Await.result(futureB,duration)
assert(resultB.isRight === true, resultB)
Thread.sleep(2000)
val futureB2 = kafkaManager.getTopicList("dev")
val resultB2 = Await.result(futureB2,duration)
assert(resultB2.isRight === true, resultB2)
assert(!resultB2.toOption.get.list.contains(createTopicNameB),"Topic not deleted")
}
test("fail to delete non-existent topic") {
val future = kafkaManager.deleteTopic("dev","delete_me")
val result = Await.result(future,duration)
assert(result.isLeft === true)
}
test("update cluster zkhost") {
val future = kafkaManager.updateCluster("dev","2.4.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxSsl = false, jmxPass = None, tuning = Option(defaultTuning), securityProtocol = "PLAINTEXT", saslMechanism = None, jaasConfig = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.curatorConfig.zkConnect === testServer.getConnectString))
Thread.sleep(2000)
}
test("disable cluster") {
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.enabled === false))
Thread.sleep(2000)
}
test("enable cluster") {
val future = kafkaManager.enableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("update cluster version") {
val future = kafkaManager.updateCluster("dev","0.8.1.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol = "PLAINTEXT", saslMechanism = None, jaasConfig = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.version === Kafka_0_8_1_1))
Thread.sleep(2000)
}
test("delete topic not supported prior to 2.0.0") {
val future = kafkaManager.deleteTopic("dev",createTopicNameA)
val result = Await.result(future,duration)
assert(result.isLeft === true, result)
assert(result.swap.toOption.get.msg.contains("not supported"))
Thread.sleep(2000)
}
test("update cluster logkafka enabled and activeOffsetCache enabled") {
val future = kafkaManager.updateCluster("dev","2.4.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol = "PLAINTEXT", saslMechanism = None, jaasConfig = None)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.active.find(c => c.name == "dev").get.logkafkaEnabled === true) &&
(result2.toOption.get.active.find(c => c.name == "dev").get.activeOffsetCacheEnabled === true))
Thread.sleep(2000)
}
test("update cluster security protocol and sasl mechanism") {
val future = kafkaManager.updateCluster("dev","1.1.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol = "SASL_PLAINTEXT", saslMechanism = Option("PLAIN"), jaasConfig = Option("blah"))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.active.find(c => c.name == "dev").get.securityProtocol === SASL_PLAINTEXT) &&
(result2.toOption.get.active.find(c => c.name == "dev").get.saslMechanism === Option(SASL_MECHANISM_PLAIN)))
Thread.sleep(2000)
val future3 = kafkaManager.updateCluster("dev","1.1.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol = "PLAINTEXT", saslMechanism = None, jaasConfig = None)
val result3 = Await.result(future3,duration)
assert(result3.isRight === true)
Thread.sleep(2000)
val future4 = kafkaManager.getClusterList
val result4 = Await.result(future4,duration)
assert(result4.isRight === true)
assert((result4.toOption.get.active.find(c => c.name == "dev").get.securityProtocol === PLAINTEXT) &&
(result4.toOption.get.active.find(c => c.name == "dev").get.saslMechanism === None))
Thread.sleep(2000)
}
/*
test("get consumer list active mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.head._1 === hlConsumer.get.groupId, s"Failed : ${result}")
}
test("get consumer identity active mode") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId)
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}*/
test("create logkafka") {
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
val future = kafkaManager.createLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get logkafka identity") {
val future = kafkaManager.getLogkafkaLogkafkaIdList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { logkafka_id =>
val future2 = kafkaManager.getLogkafkaIdentity("dev",logkafka_id)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
}
test("update logkafka config") {
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
config.put(kafka.manager.utils.logkafka82.LogConfig.PartitionProp,"1")
val future = kafkaManager.updateLogkafkaConfig("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(1000)
//check new logkafka config
{
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath).get._1.get.apply(kafka.manager.utils.logkafka82.LogConfig.PartitionProp) === "1")
}
}
test("delete logkafka") {
val future = kafkaManager.deleteLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath)
val result = Await.result(future,duration)
assert(result.isRight === true, result)
Thread.sleep(2000)
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath) === None)
Thread.sleep(2000)
}
test("delete cluster") {
//first have to disable in order to delete
{
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future, duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
val future = kafkaManager.deleteCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert(result2.toOption.get.pending.isEmpty === true)
assert(result2.toOption.get.active.isEmpty === true)
}
}
| yahoo/kafka-manager | test/kafka/manager/TestKafkaManager.scala | Scala | apache-2.0 | 26,729 |
package com.ing.baker.recipe.javadsl
import com.ing.baker.recipe.{common, scaladsl}
import scala.collection.immutable.Seq
object JavadslTestHelper {
//Ingredients
val initialIngredientCheck: common.Ingredient = scaladsl.Ingredient[String]("initialIngredient")
val recipeInstanceIdStringCheck: common.Ingredient = scaladsl.Ingredient[String]("RecipeInstanceId")
//Events
val interactionProvidedEventCheck: common.Event = new scaladsl.Event("InteractionProvidedEvent", Seq.empty, None)
val interactionProvidedEvent2Check: common.Event = new scaladsl.Event("InteractionProvidedEvent2", Seq.empty, None)
val sensoryEventWithIngredientCheck: common.Event = new scaladsl.Event("SensoryEventWithIngredient", Seq(initialIngredientCheck), Some(1))
val sensoryEventWithIngredientAndNoFiringLimitCheck: common.Event = new scaladsl.Event("SensoryEventWithIngredient", Seq(initialIngredientCheck), None)
val sensoryEventWithoutIngredientCheck: common.Event = new scaladsl.Event("SensoryEventWithoutIngredient", Seq.empty, Some(1))
val sensoryEventWithoutIngredientAndFiringLimit2Check: common.Event = new scaladsl.Event("SensoryEventWithoutIngredient", Seq.empty, Some(2))
val sensoryEventWithoutIngredientAndNoFiringLimitCheck: common.Event = new scaladsl.Event("SensoryEventWithoutIngredient", Seq.empty, None)
//Interactions
val requiresrecipeInstanceIdStringInteractionCheck: scaladsl.Interaction = scaladsl.Interaction("RequiresRecipeInstanceIdStringInteraction", Seq(recipeInstanceIdStringCheck, initialIngredientCheck), Seq.empty)
val firesEventInteractionCheck: scaladsl.Interaction = scaladsl.Interaction("FiresEventInteraction", Seq(initialIngredientCheck), Seq((interactionProvidedEventCheck)))
val firesTwoEventInteractionCheck: scaladsl.Interaction = scaladsl.Interaction("FiresTwoEventInteraction", Seq(initialIngredientCheck), Seq(interactionProvidedEventCheck, interactionProvidedEvent2Check))
}
| ing-bank/baker | core/recipe-dsl/src/test/scala/com/ing/baker/recipe/javadsl/JavadslTestHelper.scala | Scala | mit | 1,934 |
// This is a generator to forward to the generated java methods
// as a workaround for scala bug#11770
//
// Run this generator like a script:
// scala GeneratorWorkaroundScala.scala > ../src/main/scala-2.13+/com/twitter/bijection/clojure/GenertedIFnBijections.scala
val letters = (('A' to 'Z').toList.inits.toList.reverse.tail).take(23)
def rot(l: List[Char]) = l.tail :+ l.head
val methods = letters.zipWithIndex.map { case (range, i) => s"""implicit def function${i}ToIFn[${range.mkString(", ")}]:
Bijection[Function${i}[${rot(range).mkString(", ")}], IFn] =
Workaround11770.function${i}ToIFn[${range.mkString(", ")}]
""" }
println("// Autogenerated code DO NOT EDIT BY HAND")
println("// Generated by bijection-clojure/codegen/GeneratorWorkaroundScala.scala")
println("package com.twitter.bijection.clojure")
println("import clojure.lang.{ AFn, IFn }")
println("import com.twitter.bijection.{ AbstractBijection, Bijection, CastInjection }")
println("\\ntrait GeneratedIFnBijections {")
methods.foreach(method => {
println(method)
println
})
println("}") | twitter/bijection | bijection-clojure/codegen/GeneratorWorkaroundScala.scala | Scala | apache-2.0 | 1,069 |
package com.shorrockin.cascal.jmx
import management.ManagementFactory
import javax.management.ObjectName
import com.shorrockin.cascal.session.{Session, Host, SessionPool}
/**
* object used to capture various metrics related to cascal and expose them through
* a jmx interface.
*
* @author Chris Shorrock
*/
object CascalStatistics extends CascalStatistics$MBean {
private val objectName = new ObjectName("com.shorrockin.cascal:name=Statistics")
private val mbeanServer = ManagementFactory.getPlatformMBeanServer
reinstallMBean()
private var pools = List[SessionPool]()
private var hosts = Map[Host, HostStatistics]()
/**
* normally this shouldn't be an issue, since this is an object. However if this library
* is loaded twice by different class-loaders (for example) we could run into a scenario
* where register throws an already registered exception. This scenario is likely to
* occur in situations where we're running Cassandra/Cascal using something like SBT where
* the JVM stays around between runs and each test is run in an isolated classloader. This
* fix DOES NOT address the situation where cascal is used in two separate classloaders
* concurrently - which would be a problem. (that is a TODO item).*
*/
def reinstallMBean() {
if (mbeanServer.isRegistered(objectName)) mbeanServer.unregisterMBean(objectName)
mbeanServer.registerMBean(this, objectName)
}
/**
* retrieves the stats for the specified host, creating and registering them if they don't
* exist.
*/
private def get(host:Host) = {
if (hosts.contains(host)) {
hosts(host)
} else {
this.synchronized {
if (hosts.contains(host)) {
hosts(host)
} else {
val hostObjectName = new ObjectName("com.shorrockin.cascal:name=Statistics-%s-%s-%s".format(host.address, host.port, host.timeout))
if (mbeanServer.isRegistered(hostObjectName)) mbeanServer.unregisterMBean(hostObjectName)
val stats = new HostStatistics(host)
hosts = hosts + (host -> stats)
mbeanServer.registerMBean(stats, hostObjectName)
stats
}
}
}
}
def register(pool:SessionPool) = pools = pool :: pools
def unregister(pool:SessionPool) = pools = pools - pool
def creation(host:Host) = get(host).creation
def creationError(host:Host) = get(host).creationError
def usage(host:Host, duration:Long) = get(host).usage(duration)
def usageError(host:Host) = get(host).usageError
def getNumberOfActiveConnections():Int = pools.foldLeft(0) { _ + _.active }
def getNumberOfIdleConnections():Int = pools.foldLeft(0) { _ + _.idle }
def getNumberOfConnectionsUsed():Long = hosts.foldLeft(0L) { _ + _._2.getNumberOfConnectionsUsed }
def getAverageConnectionUsageTime():Long = getNumberOfConnectionsUsed() / getTotalUsageTime()
def getTotalUsageTime():Long = hosts.foldLeft(0L) { _ + _._2.getTotalUsageTime }
def getNumberOfCreationFailures():Long = hosts.foldLeft(0L) { _ + _._2.getNumberOfCreationFailures }
def getNumberOfUsageExceptions():Long = hosts.foldLeft(0L) { _ + _._2.getNumberOfUsageExceptions }
def getNumberOfSessionsCreated():Long = hosts.foldLeft(0L) { _ + _._2.getNumberOfSessionsCreated }
}
class HostStatistics(host:Host) extends HostStatisticsMBean {
var usedCount = 0L
var usageTime = 0L
var usageErrors = 0L
var created = 0L
var createFails = 0L
def creation = created = created + 1
def creationError = createFails = createFails + 1
def usage(duration:Long) = {usedCount = usedCount + 1 ; usageTime = usageTime + duration }
def usageError = usageErrors = usageErrors + 1
def getNumberOfConnectionsUsed() = usedCount
def getAverageConnectionUsageTime() = usedCount / usageTime
def getTotalUsageTime() = usageTime
def getNumberOfCreationFailures() = createFails
def getNumberOfUsageExceptions() = usageErrors
def getNumberOfSessionsCreated() = created
}
trait HostStatisticsMBean {
def getNumberOfConnectionsUsed():Long
def getAverageConnectionUsageTime():Long
def getTotalUsageTime():Long
def getNumberOfCreationFailures():Long
def getNumberOfUsageExceptions():Long
def getNumberOfSessionsCreated():Long
}
trait CascalStatistics$MBean extends HostStatisticsMBean {
def getNumberOfActiveConnections():Int
def getNumberOfIdleConnections():Int
} | shorrockin/cascal | src/main/scala/com/shorrockin/cascal/jmx/CascalStatistics.scala | Scala | apache-2.0 | 4,627 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package macros
/**
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
* @see [[scala.reflect.api.Internals]]
*/
trait Internals {
self: blackbox.Context =>
/** @see [[scala.reflect.api.Internals]] */
val internal: ContextInternalApi
/** @see [[scala.reflect.api.Internals]] */
trait ContextInternalApi extends universe.MacroInternalApi {
/** Symbol associated with the innermost enclosing lexical context.
* Walking the owner chain of this symbol will reveal information about more and more enclosing contexts.
*/
def enclosingOwner: Symbol
/** Functions that are available during [[transform]].
* @see [[transform]]
*/
trait TransformApi {
/** Calls the current transformer on the given tree.
* Current transformer = argument to the `transform` call.
*/
def recur(tree: Tree): Tree
/** Calls the default transformer on the given tree.
* Default transformer = recur into tree's children and assemble the results.
*/
def default(tree: Tree): Tree
}
/** Transforms a given tree using the provided function.
* @see [[TransformApi]]
*/
// TODO: explore a more concise notation that Denys and I discussed today
// when transformer is PartialFunction[Tree, Tree]] and TransformApi is passed magically
// also cf. https://github.com/dsl-paradise/dsl-paradise
def transform(tree: Tree)(transformer: (Tree, TransformApi) => Tree): Tree
/** Functions that are available during [[typingTransform]].
* @see [[typingTransform]]
*/
trait TypingTransformApi extends TransformApi {
/** Temporarily pushes the given symbol onto the owner stack, creating a new local typer,
* invoke the given operation and then rollback the changes to the owner stack.
*/
def atOwner[T](owner: Symbol)(op: => T): T
/** Temporarily pushes the given tree onto the recursion stack, and then calls `atOwner(symbol)(trans)`.
*/
def atOwner[T](tree: Tree, owner: Symbol)(op: => T): T
/** Returns the symbol currently on the top of the owner stack.
* If we're not inside any `atOwner` call, then macro application's context owner will be used.
*/
def currentOwner: Symbol
/** Typechecks the given tree using the local typer currently on the top of the owner stack.
* If we're not inside any `atOwner` call, then macro application's callsite typer will be used.
*/
def typecheck(tree: Tree): Tree
}
/** Transforms a given tree using the provided function.
* @see [[TypingTransformApi]]
*/
def typingTransform(tree: Tree)(transformer: (Tree, TypingTransformApi) => Tree): Tree
/** Transforms a given tree at a given owner using the provided function.
* @see [[TypingTransformApi]]
*/
def typingTransform(tree: Tree, owner: Symbol)(transformer: (Tree, TypingTransformApi) => Tree): Tree
}
}
| scala/scala | src/reflect/scala/reflect/macros/Internals.scala | Scala | apache-2.0 | 3,307 |
package slamdata.engine.std
import slamdata.engine.{Data, Func, Type, Mapping, SemanticError}
import scalaz._
import SemanticError._
import Validation.{success, failure}
import NonEmptyList.nel
// TODO: Cleanup!
trait RelationsLib extends Library {
private val BinaryAny: Func.Untyper = {
case Type.Const(Data.Bool(_)) => success(Type.Top :: Type.Top :: Nil)
case Type.Bool => success(Type.Top :: Type.Top :: Nil)
case t => failure(nel(TypeError(Type.Bool, t), Nil))
}
private val BinaryBool: Func.Untyper = {
case Type.Bool => success(Type.Bool :: Type.Bool :: Nil)
case t => failure(nel(TypeError(Type.Bool, t), Nil))
}
private val UnaryBool: Func.Untyper = {
case Type.Bool => success(Type.Bool :: Nil)
case t => failure(nel(TypeError(Type.Bool, t), Nil))
}
val Eq = Mapping("(=)", "Determines if two values are equal", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 == v2))
case Type.Const(data1) :: Type.Const(data2) :: Nil => Type.Const(Data.Bool(data1 == data2))
case type1 :: type2 :: Nil if Type.lub(type1, type2) == Type.Top && type1 != Type.Top => Type.Const(Data.Bool(false))
case _ => Type.Bool
}),
BinaryAny
)
val Neq = Mapping("(<>)", "Determines if two values are not equal", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 != v2))
case Type.Const(data1) :: Type.Const(data2) :: Nil => Type.Const(Data.Bool(data1 != data2))
case type1 :: type2 :: Nil if Type.lub(type1, type2) == Type.Top && type1 != Type.Top => Type.Const(Data.Bool(true))
case _ => Type.Bool
}),
BinaryAny
)
val Lt = Mapping("(<)", "Determines if one value is less than another value of the same type", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 < v2))
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 < v2))
case Type.Const(Data.Str(v1)) :: Type.Const(Data.Str(v2)) :: Nil => Type.Const(Data.Bool(v1 < v2))
case Type.Const(Data.DateTime(v1)) :: Type.Const(Data.DateTime(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) < 0))
case Type.Const(Data.Interval(v1)) :: Type.Const(Data.Interval(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) < 0))
case _ => Type.Bool
}),
BinaryAny
)
val Lte = Mapping("(<=)", "Determines if one value is less than or equal to another value of the same type", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 <= v2))
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 <= v2))
case Type.Const(Data.Str(v1)) :: Type.Const(Data.Str(v2)) :: Nil => Type.Const(Data.Bool(v1 <= v2))
case Type.Const(Data.DateTime(v1)) :: Type.Const(Data.DateTime(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) <= 0))
case Type.Const(Data.Interval(v1)) :: Type.Const(Data.Interval(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) <= 0))
case _ => Type.Bool
}),
BinaryAny
)
val Gt = Mapping("(>)", "Determines if one value is greater than another value of the same type", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 > v2))
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 > v2))
case Type.Const(Data.Str(v1)) :: Type.Const(Data.Str(v2)) :: Nil => Type.Const(Data.Bool(v1 > v2))
case Type.Const(Data.DateTime(v1)) :: Type.Const(Data.DateTime(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) > 0))
case Type.Const(Data.Interval(v1)) :: Type.Const(Data.Interval(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) > 0))
case _ => Type.Bool
}),
BinaryAny
)
val Gte = Mapping("(>=)", "Determines if one value is greater than or equal to another value of the same type", Type.Top :: Type.Top :: Nil,
(partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 >= v2))
case Type.Const(Data.Number(v1)) :: Type.Const(Data.Number(v2)) :: Nil => Type.Const(Data.Bool(v1 >= v2))
case Type.Const(Data.Str(v1)) :: Type.Const(Data.Str(v2)) :: Nil => Type.Const(Data.Bool(v1 >= v2))
case Type.Const(Data.DateTime(v1)) :: Type.Const(Data.DateTime(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) >= 0))
case Type.Const(Data.Interval(v1)) :: Type.Const(Data.Interval(v2)) :: Nil => Type.Const(Data.Bool(v1.compareTo(v2) >= 0))
case _ => Type.Bool
}),
BinaryAny
)
val Between = Mapping("(BETWEEN)", "Determines if a value is between two other values of the same type, inclusive", Type.Top :: Type.Top :: Nil,
(partialTyper {
// TODO: partial evaluation for Int and Dec and possibly other constants
case _ :: _ :: Nil => Type.Bool
case _ => Type.Bool
}),
BinaryAny
)
val Range = Mapping("RANGE", "Used with BETWEEN", Type.Top :: Type.Top :: Nil,
(partialTyper {
// TODO: partial evaluation for Int and Dec and possibly other constants
case _ :: _ :: Nil => Type.Top // HACK
}),
t => t match {
case _ => success(Type.Top :: Type.Top :: Nil)
}
)
val And = Mapping("(AND)", "Performs a logical AND of two boolean values", Type.Bool :: Type.Bool :: Nil,
partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 && v2))
case Type.Const(Data.Bool(false)) :: _ :: Nil => Type.Const(Data.Bool(false))
case _ :: Type.Const(Data.Bool(false)) :: Nil => Type.Const(Data.Bool(false))
case Type.Const(Data.Bool(true)) :: x :: Nil => x
case x :: Type.Const(Data.Bool(true)) :: Nil => x
case _ => Type.Bool
},
BinaryBool
)
val Or = Mapping("(OR)", "Performs a logical OR of two boolean values", Type.Bool :: Type.Bool :: Nil,
partialTyper {
case Type.Const(Data.Bool(v1)) :: Type.Const(Data.Bool(v2)) :: Nil => Type.Const(Data.Bool(v1 || v2))
case Type.Const(Data.Bool(true)) :: _ :: Nil => Type.Const(Data.Bool(true))
case _ :: Type.Const(Data.Bool(true)) :: Nil => Type.Const(Data.Bool(false))
case Type.Const(Data.Bool(false)) :: x :: Nil => x
case x :: Type.Const(Data.Bool(false)) :: Nil => x
case _ => Type.Bool
},
BinaryBool
)
val Not = Mapping("(NOT)", "Performs a logical negation of a boolean value", Type.Bool :: Nil,
partialTyper {
case Type.Const(Data.Bool(v)) :: Nil => Type.Const(Data.Bool(!v))
case _ => Type.Bool
},
UnaryBool
)
val Cond = Mapping("(IF_THEN_ELSE)", "Chooses between one of two cases based on the value of a boolean expression", Type.Bool :: Type.Top :: Type.Top :: Nil,
partialTyper {
case Type.Const(Data.Bool(true)) :: ifTrue :: ifFalse :: Nil => ifTrue
case Type.Const(Data.Bool(false)) :: ifTrue :: ifFalse :: Nil => ifFalse
}, _ => success(Type.Top :: Type.Top :: Nil)
)
def functions = Eq :: Neq :: Lt :: Lte :: Gt :: Gte :: Between :: Range :: And :: Or :: Not :: Cond :: Nil
}
object RelationsLib extends RelationsLib | sellout/slamengine-old | src/main/scala/slamdata/engine/std/relations.scala | Scala | agpl-3.0 | 7,537 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import java.lang.{Boolean => JBoolean, Long => JLong}
import java.util
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelFieldCollation.Direction
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex._
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.SqlMatchRecognize.AfterOption
import org.apache.calcite.sql.`type`.SqlTypeFamily
import org.apache.calcite.sql.fun.SqlStdOperatorTable._
import org.apache.flink.annotation.VisibleForTesting
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.runtime.RowComparator
import org.apache.flink.cep.nfa.aftermatch.AfterMatchSkipStrategy
import org.apache.flink.cep.nfa.compiler.NFACompiler
import org.apache.flink.cep.pattern.Pattern
import org.apache.flink.cep.pattern.Quantifier.QuantifierProperty
import org.apache.flink.cep.pattern.conditions.BooleanConditions
import org.apache.flink.cep.{CEP, PatternStream}
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.windowing.time.Time
import scala.collection.JavaConverters._
import org.apache.flink.table.api._
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.MatchCodeGenerator
import org.apache.flink.table.plan.logical.MatchRecognize
import org.apache.flink.table.plan.nodes.CommonMatchRecognize
import org.apache.flink.table.plan.rules.datastream.DataStreamRetractionRules
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.plan.util.RexDefaultVisitor
import org.apache.flink.table.runtime.`match`._
import org.apache.flink.table.runtime.aggregate.SortUtil
import org.apache.flink.table.runtime.conversion.CRowToRowMapFunction
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.runtime.{RowKeySelector, RowtimeProcessFunction}
import org.apache.flink.types.Row
import org.apache.flink.util.MathUtils
/**
* Flink RelNode which matches along with LogicalMatch.
*/
class DataStreamMatch(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputNode: RelNode,
logicalMatch: MatchRecognize,
schema: RowSchema,
inputSchema: RowSchema)
extends SingleRel(cluster, traitSet, inputNode)
with CommonMatchRecognize
with DataStreamRel {
override def needsUpdatesAsRetraction = true
override def consumesRetractions = true
override def deriveRowType(): RelDataType = schema.relDataType
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new DataStreamMatch(
cluster,
traitSet,
inputs.get(0),
logicalMatch,
schema,
inputSchema)
}
override def toString: String = {
matchToString(logicalMatch, inputSchema.fieldNames, getExpressionString)
}
override def explainTerms(pw: RelWriter): RelWriter = {
explainMatch(super.explainTerms(pw), logicalMatch, inputSchema.fieldNames, getExpressionString)
}
private def translateTimeBound(interval: RexNode): Time = {
interval match {
case x: RexLiteral if x.getTypeName.getFamily == SqlTypeFamily.INTERVAL_DAY_TIME =>
Time.milliseconds(x.getValueAs(classOf[JLong]))
case _ =>
throw new TableException("Only constant intervals with millisecond resolution " +
"are supported as time constraints of patterns.")
}
}
@VisibleForTesting
private[flink] def translatePattern(
config: TableConfig,
inputTypeInfo: TypeInformation[Row]
): (Pattern[Row, Row], Iterable[String]) = {
val patternVisitor = new PatternVisitor(config, inputTypeInfo, logicalMatch)
val cepPattern = if (logicalMatch.interval != null) {
val interval = translateTimeBound(logicalMatch.interval)
logicalMatch.pattern.accept(patternVisitor).within(interval)
} else {
logicalMatch.pattern.accept(patternVisitor)
}
(cepPattern, patternVisitor.names)
}
override def translateToPlan(
tableEnv: StreamTableEnvironment,
queryConfig: StreamQueryConfig)
: DataStream[CRow] = {
val inputIsAccRetract = DataStreamRetractionRules.isAccRetract(getInput)
val config = tableEnv.config
val inputTypeInfo = inputSchema.typeInfo
val crowInput: DataStream[CRow] = getInput
.asInstanceOf[DataStreamRel]
.translateToPlan(tableEnv, queryConfig)
if (inputIsAccRetract) {
throw new TableException(
"Retraction on match recognize is not supported. " +
"Note: Match recognize should not follow a non-windowed GroupBy aggregation.")
}
val (timestampedInput, rowComparator) = translateOrder(tableEnv,
crowInput,
logicalMatch.orderKeys)
val (cepPattern, patternNames) = translatePattern(config, inputTypeInfo)
//TODO remove this once it is supported in CEP library
if (NFACompiler.canProduceEmptyMatches(cepPattern)) {
throw new TableException(
"Patterns that can produce empty matches are not supported. There must be at least one " +
"non-optional state.")
}
//TODO remove this once it is supported in CEP library
if (cepPattern.getQuantifier.hasProperty(QuantifierProperty.GREEDY)) {
throw new TableException(
"Greedy quantifiers are not allowed as the last element of a Pattern yet. Finish your " +
"pattern with either a simple variable or reluctant quantifier.")
}
val inputDS: DataStream[Row] = timestampedInput
.map(new CRowToRowMapFunction)
.setParallelism(timestampedInput.getParallelism)
.name("ConvertToRow")
.returns(inputTypeInfo)
val partitionKeys = logicalMatch.partitionKeys
val partitionedStream = applyPartitioning(partitionKeys, inputDS)
val patternStream: PatternStream[Row] = if (rowComparator.isDefined) {
CEP.pattern[Row](partitionedStream, cepPattern, new EventRowComparator(rowComparator.get))
} else {
CEP.pattern[Row](partitionedStream, cepPattern)
}
val measures = logicalMatch.measures
val outTypeInfo = CRowTypeInfo(schema.typeInfo)
if (logicalMatch.allRows) {
throw new TableException("All rows per match mode is not supported yet.")
} else {
val generator = new MatchCodeGenerator(config, inputTypeInfo, patternNames.toSeq)
val patternSelectFunction = generator.generateOneRowPerMatchExpression(
schema,
partitionKeys,
measures)
patternStream.process[CRow](patternSelectFunction, outTypeInfo)
}
}
private def translateOrder(
tableEnv: StreamTableEnvironment,
crowInput: DataStream[CRow],
orderKeys: RelCollation)
: (DataStream[CRow], Option[RowComparator]) = {
if (orderKeys.getFieldCollations.size() == 0) {
throw new ValidationException("You must specify either rowtime or proctime for order by.")
}
// need to identify time between others order fields. Time needs to be first sort element
val timeOrderField = SortUtil.getFirstSortField(orderKeys, inputSchema.relDataType)
if (!FlinkTypeFactory.isTimeIndicatorType(timeOrderField.getType)) {
throw new ValidationException(
"You must specify either rowtime or proctime for order by as the first one.")
}
// time ordering needs to be ascending
if (SortUtil.getFirstSortDirection(orderKeys) != Direction.ASCENDING) {
throw new ValidationException(
"Primary sort order of a streaming table must be ascending on time.")
}
val rowComparator = if (orderKeys.getFieldCollations.size() > 1) {
Some(SortUtil
.createRowComparator(inputSchema.relDataType,
orderKeys.getFieldCollations.asScala.tail,
tableEnv.execEnv.getConfig))
} else {
None
}
timeOrderField.getType match {
case _ if FlinkTypeFactory.isRowtimeIndicatorType(timeOrderField.getType) =>
(crowInput.process(
new RowtimeProcessFunction(timeOrderField.getIndex, CRowTypeInfo(inputSchema.typeInfo))
).setParallelism(crowInput.getParallelism),
rowComparator)
case _ =>
(crowInput, rowComparator)
}
}
private def applyPartitioning(partitionKeys: util.List[RexNode], inputDs: DataStream[Row])
: DataStream[Row] = {
if (partitionKeys.size() > 0) {
val keys = partitionKeys.asScala.map {
case ref: RexInputRef => ref.getIndex
}.toArray
val keySelector = new RowKeySelector(keys, inputSchema.projectedTypeInfo(keys))
inputDs.keyBy(keySelector)
} else {
inputDs
}
}
}
private class PatternVisitor(
config: TableConfig,
inputTypeInfo: TypeInformation[Row],
logicalMatch: MatchRecognize)
extends RexDefaultVisitor[Pattern[Row, Row]] {
private var pattern: Pattern[Row, Row] = _
val names = new collection.mutable.LinkedHashSet[String]()
override def visitLiteral(literal: RexLiteral): Pattern[Row, Row] = {
val patternName = literal.getValueAs(classOf[String])
pattern = translateSingleVariable(Option.apply(pattern), patternName)
val patternDefinition = logicalMatch.patternDefinitions.get(patternName)
if (patternDefinition != null) {
val generator = new MatchCodeGenerator(config, inputTypeInfo, names.toSeq, Some(patternName))
val condition = generator.generateIterativeCondition(patternDefinition)
pattern.where(condition)
} else {
pattern.where(BooleanConditions.trueFunction())
}
}
override def visitCall(call: RexCall): Pattern[Row, Row] = {
call.getOperator match {
case PATTERN_CONCAT =>
val left = call.operands.get(0)
val right = call.operands.get(1)
pattern = left.accept(this)
pattern = right.accept(this)
pattern
case PATTERN_QUANTIFIER =>
val name = call.operands.get(0) match {
case c: RexLiteral => c
case x => throw new TableException(s"Expression not supported: $x Group patterns are " +
s"not supported yet.")
}
pattern = name.accept(this)
val startNum = MathUtils.checkedDownCast(call.operands.get(1).asInstanceOf[RexLiteral]
.getValueAs(classOf[JLong]))
val endNum = MathUtils.checkedDownCast(call.operands.get(2).asInstanceOf[RexLiteral]
.getValueAs(classOf[JLong]))
val isGreedy = !call.operands.get(3).asInstanceOf[RexLiteral]
.getValueAs(classOf[JBoolean])
applyQuantifier(pattern, startNum, endNum, isGreedy)
case PATTERN_ALTER =>
throw new TableException(
s"Expression not supported: $call. Currently, CEP doesn't support branching patterns.")
case PATTERN_PERMUTE =>
throw new TableException(
s"Expression not supported: $call. Currently, CEP doesn't support PERMUTE patterns.")
case PATTERN_EXCLUDE =>
throw new TableException(
s"Expression not supported: $call. Currently, CEP doesn't support '{-' '-}' patterns.")
}
}
override def visitNode(rexNode: RexNode): Pattern[Row, Row] = throw new TableException(
s"Unsupported expression within Pattern: [$rexNode]")
private def translateSkipStrategy = {
val getPatternTarget = () => logicalMatch.after.asInstanceOf[RexCall].getOperands.get(0)
.asInstanceOf[RexLiteral].getValueAs(classOf[String])
logicalMatch.after.getKind match {
case SqlKind.LITERAL =>
logicalMatch.after.asInstanceOf[RexLiteral].getValueAs(classOf[AfterOption]) match {
case AfterOption.SKIP_PAST_LAST_ROW => AfterMatchSkipStrategy.skipPastLastEvent()
case AfterOption.SKIP_TO_NEXT_ROW => AfterMatchSkipStrategy.skipToNext()
}
case SqlKind.SKIP_TO_FIRST =>
AfterMatchSkipStrategy.skipToFirst(getPatternTarget()).throwExceptionOnMiss()
case SqlKind.SKIP_TO_LAST =>
AfterMatchSkipStrategy.skipToLast(getPatternTarget()).throwExceptionOnMiss()
case _ => throw new IllegalStateException(s"Corrupted query tree. Unexpected " +
s"${logicalMatch.after} for after match strategy.")
}
}
private def translateSingleVariable(
previousPattern: Option[Pattern[Row, Row]],
patternName: String)
: Pattern[Row, Row] = {
if (names.contains(patternName)) {
throw new TableException("Pattern variables must be unique. That might change in the future.")
} else {
names.add(patternName)
}
previousPattern match {
case Some(p) => p.next(patternName)
case None =>
Pattern.begin(patternName, translateSkipStrategy)
}
}
private def applyQuantifier(
pattern: Pattern[Row, Row],
startNum: Int,
endNum: Int,
greedy: Boolean)
: Pattern[Row, Row] = {
val isOptional = startNum == 0 && endNum == 1
val newPattern = if (startNum == 0 && endNum == -1) { // zero or more
pattern.oneOrMore().optional().consecutive()
} else if (startNum == 1 && endNum == -1) { // one or more
pattern.oneOrMore().consecutive()
} else if (isOptional) { // optional
pattern.optional()
} else if (endNum != -1) { // times
pattern.times(startNum, endNum).consecutive()
} else { // times or more
pattern.timesOrMore(startNum).consecutive()
}
if (greedy && isOptional) {
newPattern
} else if (greedy) {
newPattern.greedy()
} else if (isOptional) {
throw new TableException("Reluctant optional variables are not supported yet.")
} else {
newPattern
}
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamMatch.scala | Scala | apache-2.0 | 14,421 |
package oose.sbtjavaapigen
import oose.sbtjavaapigen.generator._
import org.specs2.mutable._
class JavaApiGenSpec extends SpecificationWithJUnit {
def wrapInPackage(packageName: String)(c: String) = (packageName + "." + c)
"The API Generator for java.io.File" should {
val io = Set(
"File") map wrapInPackage("java.io")
val (warnings, output) = Generator(Seq.empty,
io)
"run without warnings" in {
warnings must beEmpty
}
"create output" in {
output must not beEmpty
}
"create the correct package" in {
output must contain("package genjava.io")
}
"import java.io" in {
output must contain("import java.io")
}
"contain an api object" in {
output must contain("object api")
}
"create the correct class" in {
output must contain("implicit class ScalaFile")
}
"treats getClass correctly" in {
output must contain("def Class =")
}
}
"The API Generator for java.lang.Package" should {
val io = Set(
"Package") map wrapInPackage("java.lang")
val (warnings, output) = Generator(Seq.empty,
io)
"treat static methods correctly" in {
output must not contain ("def packages")
}
}
"The API Generator for java.lang.Thread" should {
val io = Set(
"Thread") map wrapInPackage("java.lang")
val (warnings, output) = Generator(Seq.empty,
io)
"import java.lang" in {
output must contain("import java.lang")
}
"create package genjava.lang" in {
output must contain("package genjava.lang")
}
}
"The API Generator for java.lang.Class" should {
val io = Set(
"Class") map wrapInPackage("java.lang")
val (warnings, output) = Generator(Seq.empty,
io)
"ignore that class because it has type parameters" in {
//warnings must not beEmpty
warnings(0) must contain("Class has type parameters.")
}
}
} | oose/sbt-javaapi-gen | src/test/scala/oose/sbtjavaapigen/javaapigenspec.scala | Scala | apache-2.0 | 1,950 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.params.{ClassConstr, TraitConstr, TypeParamClause}
sealed abstract class TemplateDef extends ParsingRule {
protected def parseConstructor()(implicit builder: ScalaPsiBuilder): Unit = {}
protected def extendsBlockRule: Template
override final def parse(implicit builder: ScalaPsiBuilder): Boolean =
builder.getTokenType match {
case ScalaTokenTypes.tIDENTIFIER =>
builder.advanceLexer() // Ate identifier
parseConstructor()
extendsBlockRule()
true
case _ =>
builder.error(ScalaBundle.message("identifier.expected"))
false
}
}
/**
* * [[ClassDef]] ::= id [[ClassConstr]] [ [[ClassTemplate]] ]
*/
object ClassDef extends TemplateDef {
override protected def parseConstructor()(implicit builder: ScalaPsiBuilder): Unit =
ClassConstr()
override protected def extendsBlockRule: ClassTemplate.type = ClassTemplate
}
/**
* [[TraitDef]] ::= id [ [[TypeParamClause]] ] [ [[TraitTemplate]] ]
*/
object TraitDef extends TemplateDef {
override protected def parseConstructor()(implicit builder: ScalaPsiBuilder): Unit =
TraitConstr()
override protected def extendsBlockRule: TraitTemplate.type = TraitTemplate
}
/**
* [[ObjectDef]] ::= id [ [[ClassTemplate]] ]
*/
object ObjectDef extends TemplateDef {
override protected def extendsBlockRule: ClassTemplate.type = ClassTemplate
}
/**
* [[EnumDef]] ::= id [[ClassConstr]] [[EnumTemplate]]
*/
object EnumDef extends TemplateDef {
override protected def parseConstructor()(implicit builder: ScalaPsiBuilder): Unit =
ClassConstr()
override protected def extendsBlockRule: EnumTemplate.type = EnumTemplate
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/top/TemplateDef.scala | Scala | apache-2.0 | 1,955 |
package com.enkidu.lignum.parsers.ast.statement.declaration.initializers
import com.enkidu.lignum.parsers.ast.statement.Block
case class InstanceInitializerDeclaration(block: Block) extends InitializerDeclaration {
override def dispatch(visitor: Visitor): Unit = {
block.dispatch(visitor)
apply(visitor)
}
}
| marek1840/java-parser | src/main/scala/com/enkidu/lignum/parsers/ast/statement/declaration/initializers/InstanceInitializerDeclaration.scala | Scala | mit | 322 |
package rescala.extra.lattices.delta.crdt.basic
import kofre.decompose.{CRDTInterface, Delta, UIJDLattice}
import rescala.extra.replication.AntiEntropy
/** BasicCRDTs are Delta CRDTs that use [[AntiEntropy]] and [[Network]] as Middleware for exchanging deltas between replicas.
* They cannot actually be used on multiple connected replicas, but are useful for locally testing the behavior of
* Delta CRDTs.
*
* Generated deltas are automatically propagated to the registered [[AntiEntropy]] instance, but to apply deltas received
* by the AntiEntropy instance you need to explicitly call processReceivedDeltas on the CRDT.
*/
trait BasicCRDT[State, Wrapper] extends CRDTInterface[State, Wrapper] {
protected val antiEntropy: AntiEntropy[State]
override val replicaID: String = antiEntropy.replicaID
protected def copy(state: State = state): Wrapper
override def applyDelta(delta: Delta[State])(implicit u: UIJDLattice[State]): Wrapper = delta match {
case Delta(origin, deltaState) =>
UIJDLattice[State].diff(state, deltaState) match {
case Some(stateDiff) =>
val stateMerged = UIJDLattice[State].merge(state, stateDiff)
antiEntropy.recordChange(Delta(origin, stateDiff), stateMerged)
copy(state = stateMerged)
case None => this.asInstanceOf[Wrapper]
}
}
def processReceivedDeltas()(implicit u: UIJDLattice[State]): Wrapper = antiEntropy.getReceivedDeltas.foldLeft(this) {
(crdt, delta) => crdt.applyDelta(delta).asInstanceOf[BasicCRDT[State, Wrapper]]
}.asInstanceOf[Wrapper]
}
| guidosalva/REScala | Code/Extensions/Replication/src/main/scala/rescala/extra/lattices/delta/crdt/basic/BasicCRDT.scala | Scala | apache-2.0 | 1,577 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.ui
import java.util.Properties
import scala.collection.mutable.ListBuffer
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.json4s.jackson.JsonMethods._
import org.scalatest.BeforeAndAfter
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.internal.config
import org.apache.spark.internal.config.Status._
import org.apache.spark.rdd.RDD
import org.apache.spark.resource.ResourceProfile
import org.apache.spark.scheduler._
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.connector.{CSVDataWriter, CSVDataWriterFactory, RangeInputPartition, SimpleScanBuilder, SimpleWritableDataSource}
import org.apache.spark.sql.connector.catalog.Table
import org.apache.spark.sql.connector.metric.{CustomMetric, CustomTaskMetric}
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory}
import org.apache.spark.sql.connector.write.{BatchWrite, DataWriter, DataWriterFactory, LogicalWriteInfo, PhysicalWriteInfo, Write, WriteBuilder}
import org.apache.spark.sql.execution.{LeafExecNode, QueryExecution, SparkPlanInfo, SQLExecution}
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecution
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.sql.functions.count
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.UI_RETAINED_EXECUTIONS
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.status.ElementTrackingStore
import org.apache.spark.util.{AccumulatorMetadata, JsonProtocol, LongAccumulator, SerializableConfiguration}
import org.apache.spark.util.kvstore.InMemoryStore
class SQLAppStatusListenerSuite extends SharedSparkSession with JsonTestUtils
with BeforeAndAfter {
import testImplicits._
override protected def sparkConf = {
super.sparkConf.set(LIVE_ENTITY_UPDATE_PERIOD, 0L).set(ASYNC_TRACKING_ENABLED, false)
}
private var kvstore: ElementTrackingStore = _
after {
if (kvstore != null) {
kvstore.close()
kvstore = null
}
}
private def createTestDataFrame: DataFrame = {
Seq(
(1, 1),
(2, 2)
).toDF().filter("_1 > 1")
}
private def createProperties(executionId: Long): Properties = {
val properties = new Properties()
properties.setProperty(SQLExecution.EXECUTION_ID_KEY, executionId.toString)
properties
}
private def createStageInfo(stageId: Int, attemptId: Int): StageInfo = {
new StageInfo(stageId = stageId,
attemptId = attemptId,
numTasks = 8,
// The following fields are not used in tests
name = "",
rddInfos = Nil,
parentIds = Nil,
details = "",
resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID)
}
private def createTaskInfo(
taskId: Int,
attemptNumber: Int,
accums: Map[Long, Long] = Map.empty): TaskInfo = {
val info = new TaskInfo(
taskId = taskId,
attemptNumber = attemptNumber,
index = taskId.toInt,
// The following fields are not used in tests
launchTime = 0,
executorId = "",
host = "",
taskLocality = null,
speculative = false)
info.markFinished(TaskState.FINISHED, 1L)
info.setAccumulables(createAccumulatorInfos(accums))
info
}
private def createAccumulatorInfos(accumulatorUpdates: Map[Long, Long]): Seq[AccumulableInfo] = {
accumulatorUpdates.map { case (id, value) =>
val acc = new LongAccumulator
acc.metadata = AccumulatorMetadata(id, None, false)
acc.toInfo(Some(value), None)
}.toSeq
}
private def assertJobs(
exec: Option[SQLExecutionUIData],
running: Seq[Int] = Nil,
completed: Seq[Int] = Nil,
failed: Seq[Int] = Nil): Unit = {
val actualRunning = new ListBuffer[Int]()
val actualCompleted = new ListBuffer[Int]()
val actualFailed = new ListBuffer[Int]()
exec.get.jobs.foreach { case (jobId, jobStatus) =>
jobStatus match {
case JobExecutionStatus.RUNNING => actualRunning += jobId
case JobExecutionStatus.SUCCEEDED => actualCompleted += jobId
case JobExecutionStatus.FAILED => actualFailed += jobId
case _ => fail(s"Unexpected status $jobStatus")
}
}
assert(actualRunning.sorted === running)
assert(actualCompleted.sorted === completed)
assert(actualFailed.sorted === failed)
}
private def createStatusStore(): SQLAppStatusStore = {
val conf = sparkContext.conf
kvstore = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, kvstore, live = true)
new SQLAppStatusStore(kvstore, Some(listener))
}
test("basic") {
def checkAnswer(actual: Map[Long, String], expected: Map[Long, Long]): Unit = {
assert(actual.size == expected.size)
expected.foreach { case (id, value) =>
// The values in actual can be SQL metrics meaning that they contain additional formatting
// when converted to string. Verify that they start with the expected value.
assert(actual.contains(id))
val v = actual(id).trim
if (v.contains("\\n")) {
// The actual value can be "total (max, ...)\\n6 ms (5 ms, ...)".
assert(v.split("\\n")(1).startsWith(value.toString), s"Wrong value for accumulator $id")
} else {
assert(v.startsWith(value.toString), s"Wrong value for accumulator $id")
}
}
}
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
val accumulatorIds =
SparkPlanGraph(SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan))
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
// Assume all accumulators are long
var accumulatorValue = 0L
val accumulatorUpdates = accumulatorIds.map { id =>
accumulatorValue += 1L
(id, accumulatorValue)
}.toMap
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(
createStageInfo(0, 0),
createStageInfo(1, 0)
),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
listener.onTaskStart(SparkListenerTaskStart(0, 0, createTaskInfo(0, 0)))
listener.onTaskStart(SparkListenerTaskStart(0, 0, createTaskInfo(1, 0)))
assert(statusStore.executionMetrics(executionId).isEmpty)
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 2).toMap)
// Driver accumulator updates don't belong to this execution should be filtered and no
// exception will be thrown.
listener.onOtherEvent(SparkListenerDriverAccumUpdates(0, Seq((999L, 2L))))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 2).toMap)
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 0, createAccumulatorInfos(accumulatorUpdates.mapValues(_ * 2).toMap))
)))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 3).toMap)
// Retrying a stage should reset the metrics
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 1)))
listener.onTaskStart(SparkListenerTaskStart(0, 1, createTaskInfo(0, 0)))
listener.onTaskStart(SparkListenerTaskStart(0, 1, createTaskInfo(1, 0)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 0, 1, createAccumulatorInfos(accumulatorUpdates)),
(1L, 0, 1, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 2).toMap)
// Ignore the task end for the first attempt
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 100).toMap),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 2).toMap)
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 2).toMap),
new ExecutorMetrics,
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 1,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3).toMap),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 5).toMap)
// Summit a new stage
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0)))
listener.onTaskStart(SparkListenerTaskStart(1, 0, createTaskInfo(0, 0)))
listener.onTaskStart(SparkListenerTaskStart(1, 0, createTaskInfo(1, 0)))
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
// (task id, stage id, stage attempt, accum updates)
(0L, 1, 0, createAccumulatorInfos(accumulatorUpdates)),
(1L, 1, 0, createAccumulatorInfos(accumulatorUpdates))
)))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 7).toMap)
// Finish two tasks
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0, accums = accumulatorUpdates.mapValues(_ * 3).toMap),
new ExecutorMetrics,
null))
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(1, 0, accums = accumulatorUpdates.mapValues(_ * 3).toMap),
new ExecutorMetrics,
null))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 11).toMap)
assertJobs(statusStore.execution(executionId), running = Seq(0))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
checkAnswer(statusStore.executionMetrics(executionId),
accumulatorUpdates.mapValues(_ * 11).toMap)
}
test("control a plan explain mode in listeners via SQLConf") {
def checkPlanDescription(mode: String, expected: Seq[String]): Unit = {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLExecutionStart(_, _, _, planDescription, _, _) =>
assert(expected.forall(planDescription.contains))
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode) {
createTestDataFrame.collect()
try {
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
Seq(("simple", Seq("== Physical Plan ==")),
("extended", Seq("== Parsed Logical Plan ==", "== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==", "== Physical Plan ==")),
("codegen", Seq("WholeStageCodegen subtrees")),
("cost", Seq("== Optimized Logical Plan ==", "Statistics(sizeInBytes")),
("formatted", Seq("== Physical Plan ==", "Output", "Arguments"))).foreach {
case (mode, expected) =>
checkPlanDescription(mode, expected)
}
}
test("onExecutionEnd happens before onJobEnd(JobSucceeded)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0))
}
test("onExecutionEnd happens before multiple onJobEnd(JobSucceeded)s") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
listener.onJobStart(SparkListenerJobStart(
jobId = 1,
time = System.currentTimeMillis(),
stageInfos = Nil,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 1,
time = System.currentTimeMillis(),
JobSucceeded
))
assertJobs(statusStore.execution(executionId), completed = Seq(0, 1))
}
test("onExecutionEnd happens before onJobEnd(JobFailed)") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq.empty,
createProperties(executionId)))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))
))
assertJobs(statusStore.execution(executionId), failed = Seq(0))
}
test("onJobStart happens after onExecutionEnd shouldn't overwrite kvstore") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(createStageInfo(0, 0)),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobFailed(new RuntimeException("Oops"))))
assert(listener.noLiveData())
assert(statusStore.execution(executionId).get.completionTime.nonEmpty)
}
test("handle one execution with multiple jobs") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
System.currentTimeMillis()))
var stageId = 0
def twoStageJob(jobId: Int): Unit = {
val stages = Seq(stageId, stageId + 1).map { id => createStageInfo(id, 0)}
stageId += 2
listener.onJobStart(SparkListenerJobStart(
jobId = jobId,
time = System.currentTimeMillis(),
stageInfos = stages,
createProperties(executionId)))
stages.foreach { s =>
listener.onStageSubmitted(SparkListenerStageSubmitted(s))
listener.onStageCompleted(SparkListenerStageCompleted(s))
}
listener.onJobEnd(SparkListenerJobEnd(
jobId = jobId,
time = System.currentTimeMillis(),
JobSucceeded
))
}
// submit two jobs with the same executionId
twoStageJob(0)
twoStageJob(1)
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
assertJobs(statusStore.execution(0), completed = 0 to 1)
assert(statusStore.execution(0).get.stages === (0 to 3).toSet)
}
test("SPARK-11126: no memory leak when running non SQL jobs") {
val listener = spark.sharedState.statusStore.listener.get
// At the beginning of this test case, there should be no live data in the listener.
assert(listener.noLiveData())
spark.sparkContext.parallelize(1 to 10).foreach(i => ())
spark.sparkContext.listenerBus.waitUntilEmpty()
// Listener should ignore the non-SQL stages, as the stage data are only removed when SQL
// execution ends, which will not be triggered for non-SQL jobs.
assert(listener.noLiveData())
}
test("driver side SQL metrics") {
val statusStore = spark.sharedState.statusStore
val oldCount = statusStore.executionsList().size
val expectedAccumValue = 12345L
val expectedAccumValue2 = 54321L
val physicalPlan = MyPlan(sqlContext.sparkContext, expectedAccumValue, expectedAccumValue2)
val dummyQueryExecution = new QueryExecution(spark, LocalRelation()) {
override lazy val sparkPlan = physicalPlan
override lazy val executedPlan = physicalPlan
}
SQLExecution.withNewExecutionId(dummyQueryExecution) {
physicalPlan.execute().collect()
}
// Wait until the new execution is started and being tracked.
while (statusStore.executionsCount() < oldCount) {
Thread.sleep(100)
}
// Wait for listener to finish computing the metrics for the execution.
while (statusStore.executionsList().isEmpty ||
statusStore.executionsList().last.metricValues == null) {
Thread.sleep(100)
}
val execId = statusStore.executionsList().last.executionId
val metrics = statusStore.executionMetrics(execId)
val driverMetric = physicalPlan.metrics("dummy")
val driverMetric2 = physicalPlan.metrics("dummy2")
val expectedValue = SQLMetrics.stringValue(driverMetric.metricType,
Array(expectedAccumValue), Array.empty[Long])
val expectedValue2 = SQLMetrics.stringValue(driverMetric2.metricType,
Array(expectedAccumValue2), Array.empty[Long])
assert(metrics.contains(driverMetric.id))
assert(metrics(driverMetric.id) === expectedValue)
assert(metrics.contains(driverMetric2.id))
assert(metrics(driverMetric2.id) === expectedValue2)
}
test("roundtripping SparkListenerDriverAccumUpdates through JsonProtocol (SPARK-18462)") {
val event = SparkListenerDriverAccumUpdates(1L, Seq((2L, 3L)))
val json = JsonProtocol.sparkEventToJson(event)
assertValidDataInJson(json,
parse("""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 1,
| "accumUpdates": [[2,3]]
|}
""".stripMargin))
JsonProtocol.sparkEventFromJson(json) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 1L)
accums.foreach { case (a, b) =>
assert(a == 2L)
assert(b == 3L)
}
}
// Test a case where the numbers in the JSON can only fit in longs:
val longJson = parse(
"""
|{
| "Event": "org.apache.spark.sql.execution.ui.SparkListenerDriverAccumUpdates",
| "executionId": 4294967294,
| "accumUpdates": [[4294967294,3]]
|}
""".stripMargin)
JsonProtocol.sparkEventFromJson(longJson) match {
case SparkListenerDriverAccumUpdates(executionId, accums) =>
assert(executionId == 4294967294L)
accums.foreach { case (a, b) =>
assert(a == 4294967294L)
assert(b == 3L)
}
}
}
test("eviction should respect execution completion time") {
val conf = sparkContext.conf.clone().set(UI_RETAINED_EXECUTIONS.key, "2")
kvstore = new ElementTrackingStore(new InMemoryStore, conf)
val listener = new SQLAppStatusListener(conf, kvstore, live = true)
val statusStore = new SQLAppStatusStore(kvstore, Some(listener))
var time = 0
val df = createTestDataFrame
// Start execution 1 and execution 2
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
1,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
2,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
// Stop execution 2 before execution 1
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(2, time))
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionEnd(1, time))
// Start execution 3 and execution 2 should be evicted.
time += 1
listener.onOtherEvent(SparkListenerSQLExecutionStart(
3,
"test",
"test",
df.queryExecution.toString,
SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan),
time))
assert(statusStore.executionsCount === 2)
assert(statusStore.execution(2) === None)
}
test("SPARK-29894 test Codegen Stage Id in SparkPlanInfo",
DisableAdaptiveExecution("WSCG rule is applied later in AQE")) {
// with AQE on, the WholeStageCodegen rule is applied when running QueryStageExec.
val df = createTestDataFrame.select(count("*"))
val sparkPlanInfo = SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan)
assert(sparkPlanInfo.nodeName === "WholeStageCodegen (2)")
}
test("SPARK-32615,SPARK-33016: SQLMetrics validation after sparkPlanInfo updated in AQE") {
val statusStore = createStatusStore()
val listener = statusStore.listener.get
val executionId = 0
val df = createTestDataFrame
// oldPlan SQLMetrics
// SQLPlanMetric(duration,0,timing)
// SQLPlanMetric(number of output rows,1,sum)
// SQLPlanMetric(number of output rows,2,sum)
val oldPlan = SparkPlanInfo.fromSparkPlan(df.queryExecution.executedPlan)
val oldAccumulatorIds =
SparkPlanGraph(oldPlan)
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
listener.onOtherEvent(SparkListenerSQLExecutionStart(
executionId,
"test",
"test",
df.queryExecution.toString,
oldPlan,
System.currentTimeMillis()))
listener.onJobStart(SparkListenerJobStart(
jobId = 0,
time = System.currentTimeMillis(),
stageInfos = Seq(createStageInfo(0, 0)),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(0, 0)))
listener.onTaskStart(SparkListenerTaskStart(0, 0, createTaskInfo(0, 0)))
assert(statusStore.executionMetrics(executionId).isEmpty)
// update old metrics with Id 1 & 2, since 0 is timing metrics,
// timing metrics has a complicated string presentation so we don't test it here.
val oldMetricsValueMap = oldAccumulatorIds.sorted.tail.map(id => (id, 100L)).toMap
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
(0L, 0, 0, createAccumulatorInfos(oldMetricsValueMap))
)))
assert(statusStore.executionMetrics(executionId).size == 2)
statusStore.executionMetrics(executionId).foreach { m =>
assert(m._2 == "100")
}
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 0,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0),
new ExecutorMetrics,
null))
listener.onStageCompleted(SparkListenerStageCompleted(createStageInfo(0, 0)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 0,
time = System.currentTimeMillis(),
JobSucceeded
))
val df2 = createTestDataFrame.filter("_2 > 2")
// newPlan SQLMetrics
// SQLPlanMetric(duration,3,timing)
// SQLPlanMetric(number of output rows,4,sum)
// SQLPlanMetric(number of output rows,5,sum)
val newPlan = SparkPlanInfo.fromSparkPlan(df2.queryExecution.executedPlan)
val newAccumulatorIds =
SparkPlanGraph(newPlan)
.allNodes.flatMap(_.metrics.map(_.accumulatorId))
// Assume that AQE update sparkPlanInfo with newPlan
// ExecutionMetrics will be appended using newPlan's SQLMetrics
listener.onOtherEvent(SparkListenerSQLAdaptiveExecutionUpdate(
executionId,
"test",
newPlan))
listener.onJobStart(SparkListenerJobStart(
jobId = 1,
time = System.currentTimeMillis(),
stageInfos = Seq(createStageInfo(1, 0)),
createProperties(executionId)))
listener.onStageSubmitted(SparkListenerStageSubmitted(createStageInfo(1, 0)))
listener.onTaskStart(SparkListenerTaskStart(1, 0, createTaskInfo(0, 0)))
// historical metrics will be kept despite of the newPlan updated.
assert(statusStore.executionMetrics(executionId).size == 2)
// update new metrics with Id 4 & 5, since 3 is timing metrics,
// timing metrics has a complicated string presentation so we don't test it here.
val newMetricsValueMap = newAccumulatorIds.sorted.tail.map(id => (id, 500L)).toMap
listener.onExecutorMetricsUpdate(SparkListenerExecutorMetricsUpdate("", Seq(
(0L, 1, 0, createAccumulatorInfos(newMetricsValueMap))
)))
assert(statusStore.executionMetrics(executionId).size == 4)
statusStore.executionMetrics(executionId).foreach { m =>
assert(m._2 == "100" || m._2 == "500")
}
listener.onTaskEnd(SparkListenerTaskEnd(
stageId = 1,
stageAttemptId = 0,
taskType = "",
reason = null,
createTaskInfo(0, 0),
new ExecutorMetrics,
null))
listener.onStageCompleted(SparkListenerStageCompleted(createStageInfo(1, 0)))
listener.onJobEnd(SparkListenerJobEnd(
jobId = 1,
time = System.currentTimeMillis(),
JobSucceeded
))
// aggregateMetrics should contains all metrics from job 0 and job 1
val aggregateMetrics = listener.liveExecutionMetrics(executionId)
if (aggregateMetrics.isDefined) {
assert(aggregateMetrics.get.keySet.size == 4)
}
listener.onOtherEvent(SparkListenerSQLExecutionEnd(
executionId, System.currentTimeMillis()))
}
test("SPARK-34338: Report metrics from Datasource v2 scan") {
val statusStore = spark.sharedState.statusStore
val oldCount = statusStore.executionsList().size
val schema = new StructType().add("i", "int").add("j", "int")
val physicalPlan = BatchScanExec(schema.toAttributes, new CustomMetricScanBuilder(), Seq.empty)
val dummyQueryExecution = new QueryExecution(spark, LocalRelation()) {
override lazy val sparkPlan = physicalPlan
override lazy val executedPlan = physicalPlan
}
SQLExecution.withNewExecutionId(dummyQueryExecution) {
physicalPlan.execute().collect()
}
// Wait until the new execution is started and being tracked.
while (statusStore.executionsCount() < oldCount) {
Thread.sleep(100)
}
// Wait for listener to finish computing the metrics for the execution.
while (statusStore.executionsList().isEmpty ||
statusStore.executionsList().last.metricValues == null) {
Thread.sleep(100)
}
val execId = statusStore.executionsList().last.executionId
val metrics = statusStore.executionMetrics(execId)
val expectedMetric = physicalPlan.metrics("custom_metric")
val expectedValue = "custom_metric: 12345, 12345"
val innerMetric = physicalPlan.metrics("inner_metric")
val expectedInnerValue = "inner_metric: 54321, 54321"
assert(metrics.contains(expectedMetric.id))
assert(metrics(expectedMetric.id) === expectedValue)
assert(metrics.contains(innerMetric.id))
assert(metrics(innerMetric.id) === expectedInnerValue)
}
test("SPARK-36030: Report metrics from Datasource v2 write") {
withTempDir { dir =>
val statusStore = spark.sharedState.statusStore
val oldCount = statusStore.executionsList().size
val cls = classOf[CustomMetricsDataSource].getName
spark.range(10).select('id as 'i, -'id as 'j).write.format(cls)
.option("path", dir.getCanonicalPath).mode("append").save()
// Wait until the new execution is started and being tracked.
eventually(timeout(10.seconds), interval(10.milliseconds)) {
assert(statusStore.executionsCount() >= oldCount)
}
// Wait for listener to finish computing the metrics for the execution.
eventually(timeout(10.seconds), interval(10.milliseconds)) {
assert(statusStore.executionsList().nonEmpty &&
statusStore.executionsList().last.metricValues != null)
}
val execId = statusStore.executionsList().last.executionId
val metrics = statusStore.executionMetrics(execId)
val customMetric = metrics.find(_._2 == "custom_metric: 12345, 12345")
val innerMetric = metrics.find(_._2 == "inner_metric: 54321, 54321")
assert(customMetric.isDefined)
assert(innerMetric.isDefined)
}
}
}
/**
* A dummy [[org.apache.spark.sql.execution.SparkPlan]] that updates a [[SQLMetrics]]
* on the driver.
*/
private case class MyPlan(sc: SparkContext, expectedValue: Long, expectedValue2: Long)
extends LeafExecNode {
override def sparkContext: SparkContext = sc
override def output: Seq[Attribute] = Seq()
override val metrics: Map[String, SQLMetric] = Map(
"dummy" -> SQLMetrics.createMetric(sc, "dummy"),
"dummy2" -> SQLMetrics.createMetric(sc, "dummy2"))
override def doExecute(): RDD[InternalRow] = {
longMetric("dummy") += expectedValue
longMetric("dummy2") += expectedValue2
// postDriverMetricUpdates may happen multiple time in a query.
// (normally from different operators, but for the sake of testing, from one operator)
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy")))
SQLMetrics.postDriverMetricUpdates(
sc,
sc.getLocalProperty(SQLExecution.EXECUTION_ID_KEY),
Seq(metrics("dummy2")))
sc.emptyRDD
}
}
class SQLAppStatusListenerMemoryLeakSuite extends SparkFunSuite {
test("no memory leak") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(config.TASK_MAX_FAILURES, 1) // Don't retry the tasks to run this test quickly
.set(UI_RETAINED_EXECUTIONS.key, "50") // Set it to 50 to run this test quickly
.set(ASYNC_TRACKING_ENABLED, false)
withSpark(new SparkContext(conf)) { sc =>
quietly {
val spark = new SparkSession(sc)
import spark.implicits._
// Run 100 successful executions and 100 failed executions.
// Each execution only has one job and one stage.
for (i <- 0 until 100) {
val df = Seq(
(1, 1),
(2, 2)
).toDF()
df.collect()
try {
df.foreach(_ => throw new RuntimeException("Oops"))
} catch {
case e: SparkException => // This is expected for a failed job
}
}
sc.listenerBus.waitUntilEmpty()
val statusStore = spark.sharedState.statusStore
assert(statusStore.executionsCount() <= 50)
assert(statusStore.planGraphCount() <= 50)
// No live data should be left behind after all executions end.
assert(statusStore.listener.get.noLiveData())
}
}
}
}
object Outer {
class InnerCustomMetric extends CustomMetric {
override def name(): String = "inner_metric"
override def description(): String = "a simple custom metric in an inner class"
override def aggregateTaskMetrics(taskMetrics: Array[Long]): String = {
s"inner_metric: ${taskMetrics.mkString(", ")}"
}
}
}
class SimpleCustomMetric extends CustomMetric {
override def name(): String = "custom_metric"
override def description(): String = "a simple custom metric"
override def aggregateTaskMetrics(taskMetrics: Array[Long]): String = {
s"custom_metric: ${taskMetrics.mkString(", ")}"
}
}
// The followings are for custom metrics of V2 data source.
object CustomMetricReaderFactory extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val RangeInputPartition(start, end) = partition
new PartitionReader[InternalRow] {
private var current = start - 1
override def next(): Boolean = {
current += 1
current < end
}
override def get(): InternalRow = InternalRow(current, -current)
override def close(): Unit = {}
override def currentMetricsValues(): Array[CustomTaskMetric] = {
val metric = new CustomTaskMetric {
override def name(): String = "custom_metric"
override def value(): Long = 12345
}
val innerMetric = new CustomTaskMetric {
override def name(): String = "inner_metric"
override def value(): Long = 54321;
}
Array(metric, innerMetric)
}
}
}
}
class CustomMetricScanBuilder extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
Array(RangeInputPartition(0, 5), RangeInputPartition(5, 10))
}
override def supportedCustomMetrics(): Array[CustomMetric] = {
Array(new SimpleCustomMetric, new Outer.InnerCustomMetric)
}
override def createReaderFactory(): PartitionReaderFactory = CustomMetricReaderFactory
}
class CustomMetricsCSVDataWriter(fs: FileSystem, file: Path) extends CSVDataWriter(fs, file) {
override def currentMetricsValues(): Array[CustomTaskMetric] = {
val metric = new CustomTaskMetric {
override def name(): String = "custom_metric"
override def value(): Long = 12345
}
val innerMetric = new CustomTaskMetric {
override def name(): String = "inner_metric"
override def value(): Long = 54321;
}
Array(metric, innerMetric)
}
}
class CustomMetricsWriterFactory(path: String, jobId: String, conf: SerializableConfiguration)
extends CSVDataWriterFactory(path, jobId, conf) {
override def createWriter(partitionId: Int, taskId: Long): DataWriter[InternalRow] = {
val jobPath = new Path(new Path(path, "_temporary"), jobId)
val filePath = new Path(jobPath, s"$jobId-$partitionId-$taskId")
val fs = filePath.getFileSystem(conf.value)
new CustomMetricsCSVDataWriter(fs, filePath)
}
}
class CustomMetricsDataSource extends SimpleWritableDataSource {
class CustomMetricBatchWrite(queryId: String, path: String, conf: Configuration)
extends MyBatchWrite(queryId, path, conf) {
override def createBatchWriterFactory(info: PhysicalWriteInfo): DataWriterFactory = {
new CustomMetricsWriterFactory(path, queryId, new SerializableConfiguration(conf))
}
}
class CustomMetricWriteBuilder(path: String, info: LogicalWriteInfo)
extends MyWriteBuilder(path, info) {
override def build(): Write = {
new Write {
override def toBatch: BatchWrite = {
val hadoopPath = new Path(path)
val hadoopConf = SparkContext.getActive.get.hadoopConfiguration
val fs = hadoopPath.getFileSystem(hadoopConf)
if (needTruncate) {
fs.delete(hadoopPath, true)
}
val pathStr = hadoopPath.toUri.toString
new CustomMetricBatchWrite(queryId, pathStr, hadoopConf)
}
override def supportedCustomMetrics(): Array[CustomMetric] = {
Array(new SimpleCustomMetric, new Outer.InnerCustomMetric)
}
}
}
}
class CustomMetricTable(options: CaseInsensitiveStringMap) extends MyTable(options) {
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new CustomMetricWriteBuilder(path, info)
}
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
new CustomMetricTable(options)
}
}
| nchammas/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/ui/SQLAppStatusListenerSuite.scala | Scala | apache-2.0 | 39,582 |
package com.chaos.pingplusplus
/**
* Created by zcfrank1st on 11/14/14.
*/
object Pingpp {
final val LIVE_API_BASE: String = "https://api.pingplusplus.com"
val VERSION: String = "1.0.3"
@volatile
var apiKey: String = "sk_test_LGyXvHaDaDOSTOizTG5GOqPO"
@volatile
var apiVersion: String = "2014-10-10"
@volatile
var verifySSL: Boolean = true
@volatile
var apiBase: String = LIVE_API_BASE
}
| zcfrank1st/chaos-pingapp-scala | src/main/scala/com/chaos/pingplusplus/Pingpp.scala | Scala | mit | 431 |
package edu.berkeley.cs.amplab.mlmatrix
import java.util.concurrent.ThreadLocalRandom
import breeze.linalg._
import edu.berkeley.cs.amplab.mlmatrix.util.QRUtils
import edu.berkeley.cs.amplab.mlmatrix.util.Utils
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkException
import org.apache.spark.scheduler.StatsReportListener
object StabilityChecker extends Logging {
def main(args: Array[String]) {
if (args.length < 6) {
println("Usage: StabilityChecker <master> <sparkHome> <numRows> <numCols> <numParts> " +
"<numClasses> <solver: tsqr|normal|sgd|local> [<stepsize> <numIters> <miniBatchFraction>]")
System.exit(0)
}
val sparkMaster = args(0)
val numRows = args(1).toInt
val numCols = args(2).toInt
val numParts = args(3).toInt
val numClasses = args(4).toInt
val solver = args(5)
var stepSize = 0.1
var numIterations = 10
var miniBatchFraction = 1.0
if (solver == "sgd") {
if (args.length < 9) {
println("Usage: StabilityChecker <master> <sparkHome> <numRows> <numCols> <numParts> " +
"<numClasses> <solver: tsqr|normal|sgd|local> [<stepsize> <numIters> <miniBatchFraction>]")
System.exit(0)
} else {
stepSize = args(6).toDouble
numIterations = args(7).toInt
miniBatchFraction = args(8).toDouble
}
}
val sc = new SparkContext(sparkMaster, "StabilityChecker",
jars=SparkContext.jarOfClass(this.getClass).toSeq)
sc.addSparkListener(new StatsReportListener)
val thetas = Seq(3.74e-6)
val condNumbers = Seq(1.0, 10, 1e3, 1e6, 1e9, 1e12, 1e15)
val lss = LinearSystem.createLinearSystems(sc, numRows, numCols, numClasses,
numParts, condNumbers, thetas)
lss.foreach { ls =>
var begin = System.nanoTime()
val xComputed = solver.toLowerCase match {
case "normal" =>
new NormalEquations().solveLeastSquares(ls.A, ls.b)
case "sgd" =>
new LeastSquaresGradientDescent(numIterations, stepSize, miniBatchFraction).solveLeastSquares(ls.A, ls.b)
case "tsqr" =>
new TSQR().solveLeastSquares(ls.A, ls.b)
case "local" =>
val (r, qtb) = QRUtils.qrSolve(ls.A.collect(), ls.b.collect())
r \\ qtb
case _ =>
logError("Invalid Solver " + solver + " should be one of tsqr|normal|sgd")
logError("Using TSQR")
new TSQR().solveLeastSquares(ls.A, ls.b)
}
var end = System.nanoTime()
ls.computeResidualNorm(xComputed)
ls.computeRelativeError(xComputed)
logInfo("Solver: " + solver + " of " + numRows + " x " + ls.A.numCols + " took " +
(end - begin)/1e6 + "ms")
val R = ls.A.qrR()
val svd.SVD(uR,sR,vR) = svd(R)
val conditionNumberR = sR.data.max / sR.data.min
logInfo("Actual condition number " + conditionNumberR + ", Estimate: " + ls.A.condEst(Some(R)))
}
}
}
| amplab/ml-matrix | src/main/scala/edu/berkeley/cs/amplab/mlmatrix/StabilityChecker.scala | Scala | apache-2.0 | 3,014 |
package actors
import actors.SteamGameDetailsActor.RunRefresh
import akka.actor.Actor
import model.Tables
import play.api.libs.ws.WSClient
import services.{SteamEntry, SteamStorePageRetriever}
import scala.concurrent.{ExecutionContext, Future}
object SteamGameDetailsActor {
case class RunRefresh()
}
class SteamGameDetailsActor(steamIds: Seq[Long], client: WSClient, tables: Tables, implicit val exec: ExecutionContext) extends Actor {
val steamRetriever = new SteamStorePageRetriever(client)
override def receive: Receive = {
{
case _: RunRefresh =>
val categoriesAndTags = for {
detailPages <- Future.sequence(steamIds.map(id => steamRetriever.retrieve(id.toString).map((id, _))))
} yield {
detailPages.map({
case (steamId: Long, page: String) =>
val parsed = SteamEntry.parseGenresAndTags(page)
(steamId, parsed._1, parsed._2)
})
}
categoriesAndTags.map(tables.updateSteamGenresAndTags)
}
}
}
| kongus99/Aggregator | app/actors/SteamGameDetailsActor.scala | Scala | gpl-3.0 | 1,029 |
object Prob5 {
def main(args: Array[String]) {
val max = 20
val factorization = new Factorization()
val facts = (2 to max).map { i => factorization(i) }
val result = (2 to max).map { i =>
val num = facts.map { _.count(_ == i) }.max
Iterator.fill(num)(i).product
}.product
println(result)
}
}
| ponkotuy/ProjectEular | src/main/scala/Prob5.scala | Scala | mit | 333 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.cache.caffeine
import java.util.concurrent.TimeUnit
import javax.inject.{ Inject, Provider, Singleton }
import javax.cache.CacheException
import akka.Done
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.google.common.primitives.Primitives
import play.cache.caffeine.NamedCaffeineCache
import play.api.cache._
import play.api.inject._
import play.api.Configuration
import play.cache.{ NamedCacheImpl, SyncCacheApiAdapter, AsyncCacheApi => JavaAsyncCacheApi, DefaultAsyncCacheApi => JavaDefaultAsyncCacheApi, SyncCacheApi => JavaSyncCacheApi }
import scala.concurrent.duration.{ Duration, FiniteDuration }
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
/**
* CaffeineCache components for compile time injection
*/
trait CaffeineCacheComponents {
def configuration: Configuration
def actorSystem: ActorSystem
implicit def executionContext: ExecutionContext
lazy val caffeineCacheManager: CaffeineCacheManager = new CaffeineCacheManager(configuration.underlying.getConfig("play.cache.caffeine"))
/**
* Use this to create with the given name.
*/
def cacheApi(name: String): AsyncCacheApi = {
val ec = configuration.get[Option[String]]("play.cache.dispatcher")
.fold(executionContext)(actorSystem.dispatchers.lookup(_))
new CaffeineCacheApi(NamedCaffeineCacheProvider.getNamedCache(name, caffeineCacheManager, configuration))(ec)
}
lazy val defaultCacheApi: AsyncCacheApi = cacheApi(configuration.underlying.getString("play.cache.defaultCache"))
}
/**
* CaffeineCache implementation.
*/
class CaffeineCacheModule extends SimpleModule((environment, configuration) => {
import scala.collection.JavaConverters._
val defaultCacheName = configuration.underlying.getString("play.cache.defaultCache")
val bindCaches = configuration.underlying.getStringList("play.cache.bindCaches").asScala
// Creates a named cache qualifier
def named(name: String): NamedCache = {
new NamedCacheImpl(name)
}
// bind wrapper classes
def wrapperBindings(cacheApiKey: BindingKey[AsyncCacheApi], namedCache: NamedCache): Seq[Binding[_]] = Seq(
bind[JavaAsyncCacheApi].qualifiedWith(namedCache).to(new NamedJavaAsyncCacheApiProvider(cacheApiKey)),
bind[Cached].qualifiedWith(namedCache).to(new NamedCachedProvider(cacheApiKey)),
bind[SyncCacheApi].qualifiedWith(namedCache).to(new NamedSyncCacheApiProvider(cacheApiKey)),
bind[JavaSyncCacheApi].qualifiedWith(namedCache).to(new NamedJavaSyncCacheApiProvider(cacheApiKey))
)
// bind a cache with the given name
def bindCache(name: String) = {
val namedCache = named(name)
val caffeineCacheKey = bind[NamedCaffeineCache[Any, Any]].qualifiedWith(namedCache)
val cacheApiKey = bind[AsyncCacheApi].qualifiedWith(namedCache)
Seq(
caffeineCacheKey.to(new NamedCaffeineCacheProvider(name, configuration)),
cacheApiKey.to(new NamedAsyncCacheApiProvider(caffeineCacheKey))
) ++ wrapperBindings(cacheApiKey, namedCache)
}
def bindDefault[T: ClassTag]: Binding[T] = {
bind[T].to(bind[T].qualifiedWith(named(defaultCacheName)))
}
Seq(
bind[CaffeineCacheManager].toProvider[CacheManagerProvider],
// alias the default cache to the unqualified implementation
bindDefault[AsyncCacheApi],
bindDefault[JavaAsyncCacheApi],
bindDefault[SyncCacheApi],
bindDefault[JavaSyncCacheApi]
) ++ bindCache(defaultCacheName) ++ bindCaches.flatMap(bindCache)
})
@Singleton
class CacheManagerProvider @Inject() (configuration: Configuration) extends Provider[CaffeineCacheManager] {
lazy val get: CaffeineCacheManager = {
val cacheManager: CaffeineCacheManager = new CaffeineCacheManager(configuration.underlying.getConfig("play.cache.caffeine"))
cacheManager
}
}
private[play] class NamedCaffeineCacheProvider(name: String, configuration: Configuration) extends Provider[NamedCaffeineCache[Any, Any]] {
@Inject private var manager: CaffeineCacheManager = _
lazy val get: NamedCaffeineCache[Any, Any] = NamedCaffeineCacheProvider.getNamedCache(name, manager, configuration)
}
private[play] object NamedCaffeineCacheProvider {
def getNamedCache(name: String, manager: CaffeineCacheManager, configuration: Configuration) = try {
manager.getCache(name).asInstanceOf[NamedCaffeineCache[Any, Any]]
} catch {
case e: CacheException =>
throw new CaffeineCacheExistsException(
s"""A CaffeineCache instance with name '$name' already exists.
|
|This usually indicates that multiple instances of a dependent component (e.g. a Play application) have been started at the same time.
""".stripMargin, e)
}
}
private[play] class NamedAsyncCacheApiProvider(key: BindingKey[NamedCaffeineCache[Any, Any]]) extends Provider[AsyncCacheApi] {
@Inject private var injector: Injector = _
@Inject private var defaultEc: ExecutionContext = _
@Inject private var configuration: Configuration = _
@Inject private var actorSystem: ActorSystem = _
private lazy val ec: ExecutionContext = configuration.get[Option[String]]("play.cache.dispatcher").map(actorSystem.dispatchers.lookup(_)).getOrElse(defaultEc)
lazy val get: AsyncCacheApi =
new CaffeineCacheApi(injector.instanceOf(key))(ec)
}
private[play] class NamedSyncCacheApiProvider(key: BindingKey[AsyncCacheApi])
extends Provider[SyncCacheApi] {
@Inject private var injector: Injector = _
lazy val get: SyncCacheApi = {
val async = injector.instanceOf(key)
async.sync match {
case sync: SyncCacheApi => sync
case _ => new DefaultSyncCacheApi(async)
}
}
}
private[play] class NamedJavaAsyncCacheApiProvider(key: BindingKey[AsyncCacheApi]) extends Provider[JavaAsyncCacheApi] {
@Inject private var injector: Injector = _
lazy val get: JavaAsyncCacheApi = {
new JavaDefaultAsyncCacheApi(injector.instanceOf(key))
}
}
private[play] class NamedJavaSyncCacheApiProvider(key: BindingKey[AsyncCacheApi])
extends Provider[JavaSyncCacheApi] {
@Inject private var injector: Injector = _
lazy val get: JavaSyncCacheApi =
new SyncCacheApiAdapter(injector.instanceOf(key).sync)
}
private[play] class NamedCachedProvider(key: BindingKey[AsyncCacheApi]) extends Provider[Cached] {
@Inject private var injector: Injector = _
lazy val get: Cached =
new Cached(injector.instanceOf(key))(injector.instanceOf[Materializer])
}
private[play] case class CaffeineCacheExistsException(msg: String, cause: Throwable) extends RuntimeException(msg, cause)
class SyncCaffeineCacheApi @Inject() (val cache: NamedCaffeineCache[Any, Any]) extends SyncCacheApi {
override def set(key: String, value: Any, expiration: Duration): Unit = {
expiration match {
case infinite: Duration.Infinite => cache.policy().expireVariably().get().put(key, value, Long.MaxValue, TimeUnit.DAYS)
case finite: FiniteDuration =>
val seconds = finite.toSeconds
if (seconds <= 0) {
cache.policy().expireVariably().get().put(key, value, 1, TimeUnit.SECONDS)
} else {
cache.policy().expireVariably().get().put(key, value, seconds.toInt, TimeUnit.SECONDS)
}
}
Done
}
override def remove(key: String): Unit = cache.invalidate(key)
override def getOrElseUpdate[A: ClassTag](key: String, expiration: Duration)(orElse: => A): A = {
get[A](key) match {
case Some(value) => value
case None =>
val value = orElse
set(key, value, expiration)
value
}
}
override def get[T](key: String)(implicit ct: ClassTag[T]): Option[T] = {
Option(cache.getIfPresent(key)).filter { v =>
Primitives.wrap(ct.runtimeClass).isInstance(v) ||
ct == ClassTag.Nothing || (ct == ClassTag.Unit && v == ((): Unit))
}.asInstanceOf[Option[T]]
}
}
/**
* Cache implementation of [[AsyncCacheApi]]. Since Cache is synchronous by default, this uses [[SyncCaffeineCacheApi]].
*/
class CaffeineCacheApi @Inject() (val cache: NamedCaffeineCache[Any, Any])(implicit context: ExecutionContext) extends AsyncCacheApi {
override lazy val sync: SyncCaffeineCacheApi = new SyncCaffeineCacheApi(cache)
def set(key: String, value: Any, expiration: Duration): Future[Done] = Future {
sync.set(key, value, expiration)
Done
}
def get[T: ClassTag](key: String): Future[Option[T]] = Future {
sync.get(key)
}
def remove(key: String): Future[Done] = Future {
sync.remove(key)
Done
}
def getOrElseUpdate[A: ClassTag](key: String, expiration: Duration)(orElse: => Future[A]): Future[A] = {
get[A](key).flatMap {
case Some(value) => Future.successful(value)
case None => orElse.flatMap(value => set(key, value, expiration).map(_ => value))
}
}
def removeAll(): Future[Done] = Future {
cache.invalidateAll()
Done
}
}
| Shenker93/playframework | framework/src/play-caffeine-cache/src/main/scala/play/api/cache/caffeine/CaffeineCacheApi.scala | Scala | apache-2.0 | 8,928 |
package com.timushev.sbt.updates
import com.timushev.sbt.updates.metadata.MetadataLoader
import com.timushev.sbt.updates.versions._
import sbt.ModuleID
import scala.collection.immutable.SortedSet
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object UpdatesFinder {
import scala.Ordered._
def findUpdates(loaders: Seq[MetadataLoader], allowPreRelease: Boolean)(
module: ModuleID
): Future[SortedSet[Version]] = {
val current = Version(module.revision)
val versionSets = loaders.map(_.getVersions(module).recover(withEmpty))
val versions = Future.sequence(versionSets).map(v => SortedSet(v.flatten: _*))
versions.map(_.filter(isUpdate(current)).filterNot(lessStable(current, allowPreRelease)))
}
private def lessStable(current: Version, allowPreRelease: Boolean)(another: Version): Boolean =
(current, another) match {
case (ReleaseVersion(_), ReleaseVersion(_)) => false
case (SnapshotVersion(_, _, _), _) => false
case (_, SnapshotVersion(_, _, _)) => true
case (ReleaseVersion(_), PreReleaseVersion(_, _)) => !allowPreRelease
case (ReleaseVersion(_), PreReleaseBuildVersion(_, _, _)) => !allowPreRelease
case (ReleaseVersion(_), _) => true
case (_, _) => false
}
private def isUpdate(current: Version) = current < _
private val withEmpty: PartialFunction[Throwable, Seq[Version]] = { case _ =>
Seq.empty
}
}
| rtimush/sbt-updates | src/main/scala/com/timushev/sbt/updates/UpdatesFinder.scala | Scala | bsd-3-clause | 1,602 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.cli
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.projections.{ AlignmentRecordField, Projection }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rdd.read.{ DuplicateMetrics, FlagStatMetrics }
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.adam.rdd.read.FlagStat._
class FlagStatTest extends ADAMFunSuite {
sparkTest("Standard FlagStat test") {
val inputpath = ClassLoader.getSystemClassLoader.getResource("features/NA12878.sam").getFile
val argLine = "%s".format(inputpath).split("\\\\s+")
val args: FlagStatArgs = Args4j.apply[FlagStatArgs](argLine)
val projection = Projection(
AlignmentRecordField.readMapped,
AlignmentRecordField.mateMapped,
AlignmentRecordField.readPaired,
AlignmentRecordField.contig,
AlignmentRecordField.mateContig,
AlignmentRecordField.primaryAlignment,
AlignmentRecordField.duplicateRead,
AlignmentRecordField.readMapped,
AlignmentRecordField.mateMapped,
AlignmentRecordField.firstOfPair,
AlignmentRecordField.secondOfPair,
AlignmentRecordField.properPair,
AlignmentRecordField.mapq,
AlignmentRecordField.failedVendorQualityChecks)
val adamFile: RDD[AlignmentRecord] = sc.loadAlignments(args.inputPath, projection = Some(projection))
val (failedVendorQuality, passedVendorQuality) = apply(adamFile)
def percent(fraction: Long, total: Long) = if (total == 0) 0.0 else 100.00 * fraction.toFloat / total
assert(passedVendorQuality.total == 529 && failedVendorQuality.total == 36,
": The values of total passed and vendor quality were not the expected values")
assert(passedVendorQuality.duplicatesPrimary.total == 59 &&
failedVendorQuality.duplicatesPrimary.total == 16,
": The values of passed and failed vendor quality primary duplicates (total) were not the expected values")
assert(passedVendorQuality.duplicatesPrimary.bothMapped == 58 &&
failedVendorQuality.duplicatesPrimary.bothMapped == 15,
": The values of passed and failed vendor quality primary duplicates (both mapped) were not the expected values")
assert(passedVendorQuality.duplicatesPrimary.onlyReadMapped == 1 &&
failedVendorQuality.duplicatesPrimary.onlyReadMapped == 1,
": The values of passed and failed vendor quality primary duplicates (only read mapped) were not the expected values")
assert(passedVendorQuality.duplicatesPrimary.crossChromosome == 0 &&
failedVendorQuality.duplicatesPrimary.crossChromosome == 0,
": The values of passed and failed vendor quality primary duplicates were not the expected values")
assert(passedVendorQuality.duplicatesSecondary.total == 0 &&
failedVendorQuality.duplicatesSecondary.total == 0,
": The values of passed and failed vendor quality secondary duplicates (total) were not the expected values")
assert(passedVendorQuality.duplicatesSecondary.bothMapped == 0 &&
failedVendorQuality.duplicatesSecondary.bothMapped == 0,
": The values of passed and failed vendor quality secondary duplicates (both mapped) were not the expected values")
assert(passedVendorQuality.duplicatesSecondary.onlyReadMapped == 0 &&
failedVendorQuality.duplicatesSecondary.onlyReadMapped == 0,
": The values of passed and failed vendor quality secondary duplicates (only read mapped) were not the expected values")
assert(passedVendorQuality.duplicatesSecondary.crossChromosome == 0 &&
failedVendorQuality.duplicatesSecondary.crossChromosome == 0,
": The values of passed and failed vendor quality (cross chromosome) were not the expected values")
assert(passedVendorQuality.mapped == 529 && failedVendorQuality.mapped == 36,
": The values of passed and failed vendor quality (mapped) were not the expected values")
assert(percent(passedVendorQuality.mapped, passedVendorQuality.total) == 100.00,
": The values of percent passed vendor quality (mapped/total) were not the expected values")
assert(percent(failedVendorQuality.mapped, failedVendorQuality.total) == 100.00,
": The values of percent failed vendor quality (mapped/total) were not the expected values")
assert(passedVendorQuality.pairedInSequencing == 529 && failedVendorQuality.pairedInSequencing == 36,
": The values of passed and failed vendor quality (paired sequencing) were not the expected values")
assert(passedVendorQuality.read1 == 258 && failedVendorQuality.read1 == 13,
": The values of passed and failed vendor quality (read1) were not the expected values")
assert(passedVendorQuality.read2 == 271 && failedVendorQuality.read2 == 23,
": The values of passed and failed vendor quality (read2) were not the expected values")
assert(passedVendorQuality.properlyPaired == 524 && failedVendorQuality.properlyPaired == 32,
": The values of passed and failed vendor quality (properly paired) were not the expected values")
assert("%.2f".format(percent(passedVendorQuality.properlyPaired, passedVendorQuality.total)).toDouble == 99.05,
": The values of percent passed vendor quality (properly paired) were not the expected values")
assert("%.2f".format(percent(failedVendorQuality.properlyPaired, failedVendorQuality.total)).toDouble == 88.89,
": The values of percent passed vendor quality (properly paired) were not the expected values")
assert(passedVendorQuality.withSelfAndMateMapped == 524 && failedVendorQuality.withSelfAndMateMapped == 32,
": The values of passed and failed vendor quality (itself & mate mapped) were not the expected values")
assert(passedVendorQuality.singleton == 5 && failedVendorQuality.singleton == 4,
": The values of passed and failed vendor quality (singletons) were not the expected values")
assert("%.2f".format(percent(passedVendorQuality.singleton, passedVendorQuality.total)).toDouble == .95,
": The values of percent passed vendor quality (singletons) were not the expected values")
assert("%.2f".format(percent(failedVendorQuality.singleton, failedVendorQuality.total)).toDouble == 11.11,
": The values of percent failed vendor quality (singletons) were not the expected values")
assert(passedVendorQuality.withMateMappedToDiffChromosome == 0 && failedVendorQuality.withMateMappedToDiffChromosome == 0,
": The values of passed and failed vendor quality (mate mapped to a different chromosome) were not the expected values")
assert(passedVendorQuality.withMateMappedToDiffChromosomeMapQ5 == 0 && failedVendorQuality.withMateMappedToDiffChromosomeMapQ5 == 0,
": The values of passed and failed vendor quality (mate mapped to a different chromosome, mapQ>=5) were not the expected values")
}
}
| tomwhite/adam | adam-cli/src/test/scala/FlagStatTest.scala | Scala | apache-2.0 | 7,677 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala
package collection
/** This trait implements a proxy for sequence objects. It forwards
* all calls to a different sequence object.
*
* @author Martin Odersky
* @version 2.8
* @since 2.8
*/
@deprecated("proxying is deprecated due to lack of use and compiler-level support", "2.11.0")
trait SeqProxy[+A] extends Seq[A] with SeqProxyLike[A, Seq[A]]
| felixmulder/scala | src/library/scala/collection/SeqProxy.scala | Scala | bsd-3-clause | 902 |
package dawn.flow.trajectory
import dawn.flow._
import spire.math.{Real => _, _ => _}
import spire.implicits._
import breeze.linalg._
trait ParticleFilter {
def N: Int
def covGyro: Real
def source1: Source[_]
type Weight = Real
type State <: {
def x: MatrixR
def cov: MatrixR
def p: Position
def predict(q: Quat, a: Acceleration, dt: Time): State
}
case class Particle(w: Weight, q: Attitude, s: State, lastA: Acceleration, lastQ: Quat)
case class Particles(sp: Seq[Particle], lastO: Omega)
type Combined
def update(x: (Timestamped[Combined], Timestamped[Particles])): Particles
def updateAttitude(ps: Particles, dt: Time) =
ps.copy(sp = ps.sp.map(x => x.copy(q = sampleAtt(x.q, ps.lastO, dt))))
def updateAcceleration(ps: Particles, acc: Acceleration) =
ps.copy(sp = ps.sp.map(x => x.copy(lastA = x.q.rotate(acc), lastQ = x.q)))
def kalmanPredict(ps: Particles, dt: Time) =
ps.copy(sp = ps.sp.map(x => x.copy(s = x.s.predict(x.lastQ, x.lastA, dt))))
def sampleAtt(q: Quat, om: Omega, dt: Time): Quat = {
val withNoise = Rand.gaussian(om, eye(3)*covGyro)
val integrated = withNoise * dt
val lq = Quat.localAngleToLocalQuat(integrated)
lq.rotateBy(q)
}
//http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/
def normWeights(ps: Particles) = {
val ws = ps.sp.map(_.w)
val b = ws.max
val sum = b + log(ws.map(x => exp(x - b)).sum)
ps.copy(
sp = ps.sp.map(p => p.copy(w = p.w - sum))
)
}
//http://users.isy.liu.se/rt/schon/Publications/HolSG2006.pdf
//See systematic resampling
def resample(ps: Particles) = {
if (tooLowEffective(ps)) {
val u = Rand.uniform()
val us = Array.tabulate(N)(i => (i + u) / N)
val ws = ps.sp.map(x => exp(x.w)).scanLeft(0.0)(_ + _).drop(1)
val ns = Array.fill(N)(0)
var cu = 0
var wu = 0
for (w <- ws) {
while (cu < N && us(cu) <= w) {
ns(wu) += 1
cu += 1
}
wu += 1
}
ps.copy(sp = ps.sp.zip(ns).flatMap(x => List.fill(x._2)(x._1)).map(_.copy(w = log(1.0 / N))))
} else
ps
}
def tooLowEffective(ps: Particles) = {
val effective = ps.sp.count(x => exp(x.w) >= 1.0/N)
val ratio = (effective / N.toDouble)
ratio <= 0.1
}
def averagePosition(ps: Particles): Position = {
var r = Vec3()
ps.sp
.map(x => x.s.p * exp(x.w))
.foreach(x => r += x)
r
}
//https://stackoverflow.com/questions/12374087/average-of-multiple-quaternions
def averageQuaternion(ps: Particles): Quat = {
Quat.averageQuaternion(ps.sp.map(x => (exp(x.w), x.q)))
}
def updateFromIMU(ps: Particles, acc: Acceleration, om: Omega, dt: Time) = {
val psO = ps.copy(lastO = om)
val psUA = updateAttitude(psO, dt)
val psA = updateAcceleration(psUA, acc)
kalmanPredict(psA, dt)
}
def updateWeightPos(ps: Particles, pos: Position, cov: MatrixR) =
ps.copy(
sp = ps.sp.map(x =>
x.copy(w =
x.w + posLogLikelihood(pos, x.s.p, cov))))
def updateWeightAtt(ps: Particles, att: Attitude, cov: MatrixR) =
ps.copy(
sp = ps.sp.map(x =>
x.copy(w =
x.w + attLogLikelihood(att, x.q, cov))))
def posLogLikelihood(measurement: Position, state: Position, cov: MatrixR) = {
Rand.gaussianLogPdf(measurement, state, cov)
}
def attLogLikelihood(measurement: Attitude, state: Attitude, cov: MatrixR) = {
val error = measurement.rotateBy(state.reciprocal)
val rad = Quat.quatToAngle(error)
Rand.gaussianLogPdf(rad, Vec3(), cov)
}
def fused: Source[Combined]
lazy val process: Source[Particles] =
fused
.zipLastT(buffer)
.map(update, "Update")
.map(normWeights, "NormWeight")
.map(resample, "Resample")
lazy val out: Source[(Position, Quat)] =
process
.map(x =>
(
averagePosition(x),
averageQuaternion(x)
),
"Average")
def initP: Particle
lazy val buffer: Source[Particles] = {
Buffer(process, Particles(Seq.fill(N)(initP), Vec3()), source1)
}
}
| rubenfiszel/scala-flow | core/src/main/scala/trajectory/ParticleFilter.scala | Scala | mit | 4,230 |
package com.azavea.rasterfoundry
import org.apache.spark.serializer.{ KryoRegistrator => SparkKryoRegistrator }
import com.esotericsoftware.kryo.Kryo
class KryoRegistrator extends SparkKryoRegistrator {
override def registerClasses(kryo: Kryo): Unit = {
(new geotrellis.spark.io.hadoop.KryoRegistrator).registerClasses(kryo)
kryo.register(classOf[OrderedImage])
}
}
| kdeloach/raster-foundry-tiler | mosaic/src/main/scala/com/azavea/rasterfoundry/KryoRegistrator.scala | Scala | apache-2.0 | 382 |
/*
* Tests for forgetful resolution.
*
*/
package at.logic.gapt.proofs.lk.algorithms.cutIntroduction
import at.logic.gapt.proofs.lk.algorithms.cutIntroduction.MinimizeSolution._
import at.logic.gapt.expr._
import org.specs2.mutable._
class ForgetfulResolutionTest extends Specification {
"Forgetful Paramodulation Should" should {
"successfully paramodulate a=b into f(a,a)" in {
val a = FOLConst( "a" )
val b = FOLConst( "b" )
val fs = "f"
val faa = FOLFunction( fs, a :: a :: Nil )
val realab = Set( FOLFunction( fs, a :: a :: Nil ), FOLFunction( fs, a :: b :: Nil ), FOLFunction( fs, b :: a :: Nil ), FOLFunction( fs, b :: b :: Nil ) )
val realba = Set( FOLFunction( fs, a :: a :: Nil ) )
val parasab = Paramodulants( a, b, faa )
val parasba = Paramodulants( b, a, faa )
parasab must beEqualTo( realab )
parasba must beEqualTo( realba )
}
"successfully apply forgetful paramodulation to { :- a = b; :- P(a, a); :- Q } " in {
val a = FOLConst( "a" )
val b = FOLConst( "b" )
val ps = "P"
val paa = FOLAtom( ps, a :: a :: Nil )
val pab = FOLAtom( ps, a :: b :: Nil )
val pba = FOLAtom( ps, b :: a :: Nil )
val pbb = FOLAtom( ps, b :: b :: Nil )
val q = FOLAtom( "Q", Nil )
val cq = new MyFClause( Nil, q :: Nil )
val cpaa = new MyFClause( Nil, paa :: Nil )
val cpab = new MyFClause( Nil, pab :: Nil )
val cpba = new MyFClause( Nil, pba :: Nil )
val cpbb = new MyFClause( Nil, pbb :: Nil )
val r1 = Set( cpab, cq )
val r2 = Set( cpba, cq )
val r3 = Set( cpbb, cq )
val real = Set( r1, r2, r3 )
val res = ForgetfulParamodulateCNF( And( Eq( a, b ) :: paa :: q :: Nil ) )
val setres = res.map( cnf => cnf.toSet ).toSet
setres must beEqualTo( real )
}
/*
"improve the solution correctly" in {
val p = at.logic.testing.LinearExampleProof(8)
val ts = new FlatTermSet(TermsExtraction(p))
val g = ComputeGrammars(ts)
val grm = g(2)
val ehs = new ExtendedHerbrandSequent(p.root, grm, ts)
val improv = improveSolution(ehs.canonicalSol, ehs)
// TODO: type the expected value correctly
//val expected =
//improv must
success
}
*/
}
"Forgetful Resolution Should" should {
"compute a single resolvent successfully" in {
val a = FOLAtom( "A" )
val b = FOLAtom( "B" )
val c = FOLAtom( "C" )
val d = FOLAtom( "D" )
val e = FOLAtom( "E" )
val f = And( And( Or( a, Or( b, c ) ), Or( Neg( b ), d ) ), e )
val res = ForgetfulResolve( f )
//println("Formula (in CNF): " + f)
//println("Resolvent: " + res)
res.size must beEqualTo( 1 )
}
/*
"improve the solution correctly" in {
val p = at.logic.testing.LinearExampleProof(8)
val ts = new FlatTermSet(TermsExtraction(p))
val g = ComputeGrammars(ts)
val grm = g(2)
val ehs = new ExtendedHerbrandSequent(p.root, grm, ts)
val improv = improveSolution(ehs.canonicalSol, ehs)
// TODO: type the expected value correctly
//val expected =
//improv must
success
}
*/
}
}
| gisellemnr/gapt | src/test/scala/at/logic/gapt/proofs/lk/algorithms/cutIntroduction/ForgetfulResolutionTest.scala | Scala | gpl-3.0 | 3,228 |
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.monitor
/** Describes an aspect of the trace by providing the name of a component.
* This data object is part of a [[TraceComponentHealth]] used by the HealthMonitors.
*
* @param componentName The name of the trace component
*/
sealed abstract class TraceComponent(val componentName: String)
case object AgentComponent extends TraceComponent("Agent")
case object DataRouterComponent extends TraceComponent("Data Processing")
case object FilesystemComponent extends TraceComponent("File System")
| secdec/codepulse | hq/src/main/scala/com/secdec/bytefrog/hq/monitor/TraceComponent.scala | Scala | apache-2.0 | 1,264 |
//scalc -Xlog-implicit-conversions flatmap-study.scala
//
//flatmap-study.scala:3: inferred view from Some[String] to scala.collection.GenTraversableOnce[?] = scala.this.Option.option2Iterable[String]:(xo: Option[String])Iterable[String]
// xs.flatMap(x => Some(x))
//
class Hoge {
def test(xs: List[String]): List[String] = {
xs.flatMap(x => Some(x))
}
}
| backpaper0/sandbox | scripts/flatmap-study.scala | Scala | mit | 367 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent.atomic.AtomicReference
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import org.apache.spark.{ExecutorAllocationClient, SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.util.{Clock, SystemClock, Utils}
/**
* HealthTracker is designed to track problematic executors and nodes. It supports excluding
* executors and nodes across an entire application (with a periodic expiry). TaskSetManagers add
* additional logic for exclusion of executors and nodes for individual tasks and stages which
* works in concert with the logic here.
*
* The tracker needs to deal with a variety of workloads, e.g.:
*
* * bad user code -- this may lead to many task failures, but that should not count against
* individual executors
* * many small stages -- this may prevent a bad executor for having many failures within one
* stage, but still many failures over the entire application
* * "flaky" executors -- they don't fail every task, but are still faulty enough to merit
* excluding
* * missing shuffle files -- may trigger fetch failures on healthy executors.
*
* See the design doc on SPARK-8425 for a more in-depth discussion. Note SPARK-32037 renamed
* the feature.
*
* THREADING: As with most helpers of TaskSchedulerImpl, this is not thread-safe. Though it is
* called by multiple threads, callers must already have a lock on the TaskSchedulerImpl. The
* one exception is [[excludedNodeList()]], which can be called without holding a lock.
*/
private[scheduler] class HealthTracker (
private val listenerBus: LiveListenerBus,
conf: SparkConf,
allocationClient: Option[ExecutorAllocationClient],
clock: Clock = new SystemClock()) extends Logging {
def this(sc: SparkContext, allocationClient: Option[ExecutorAllocationClient]) = {
this(sc.listenerBus, sc.conf, allocationClient)
}
HealthTracker.validateExcludeOnFailureConfs(conf)
private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC)
private val MAX_FAILED_EXEC_PER_NODE = conf.get(config.MAX_FAILED_EXEC_PER_NODE)
val EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS = HealthTracker.getExludeOnFailureTimeout(conf)
private val EXCLUDE_FETCH_FAILURE_ENABLED =
conf.get(config.EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED)
private val EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED =
conf.get(config.EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED)
/**
* A map from executorId to information on task failures. Tracks the time of each task failure,
* so that we can avoid excluding executors due to failures that are very far apart. We do not
* actively remove from this as soon as tasks hit their timeouts, to avoid the time it would take
* to do so. But it will not grow too large, because as soon as an executor gets too many
* failures, we exclude the executor and remove its entry here.
*/
private val executorIdToFailureList = new HashMap[String, ExecutorFailureList]()
val executorIdToExcludedStatus = new HashMap[String, ExcludedExecutor]()
val nodeIdToExcludedExpiryTime = new HashMap[String, Long]()
/**
* An immutable copy of the set of nodes that are currently excluded. Kept in an
* AtomicReference to make [[excludedNodeList()]] thread-safe.
*/
private val _excludedNodeList = new AtomicReference[Set[String]](Set())
/**
* Time when the next excluded node will expire. Used as a shortcut to
* avoid iterating over all entries in the excludedNodeList when none will have expired.
*/
var nextExpiryTime: Long = Long.MaxValue
/**
* Mapping from nodes to all of the executors that have been excluded on that node. We do *not*
* remove from this when executors are removed from spark, so we can track when we get multiple
* successive excluded executors on one node. Nonetheless, it will not grow too large because
* there cannot be many excluded executors on one node, before we stop requesting more
* executors on that node, and we clean up the list of exluded executors once an executor has
* been excluded for EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS.
*/
val nodeToExcludedExecs = new HashMap[String, HashSet[String]]()
/**
* Include executors and nodes that have been excluded for at least
* EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS
*/
def applyExcludeOnFailureTimeout(): Unit = {
val now = clock.getTimeMillis()
// quickly check if we've got anything to expire that is excluded -- if not,
// avoid doing any work
if (now > nextExpiryTime) {
// Apply the timeout to excluded nodes and executors
val execsToInclude = executorIdToExcludedStatus.filter(_._2.expiryTime < now).keys
if (execsToInclude.nonEmpty) {
// Include any executors that have been exluded longer than the excludeOnFailure timeout.
logInfo(s"Removing executors $execsToInclude from exclude list because the " +
s"the executors have reached the timed out")
execsToInclude.foreach { exec =>
val status = executorIdToExcludedStatus.remove(exec).get
val failedExecsOnNode = nodeToExcludedExecs(status.node)
// post both to keep backwards compatibility
listenerBus.post(SparkListenerExecutorUnblacklisted(now, exec))
listenerBus.post(SparkListenerExecutorUnexcluded(now, exec))
failedExecsOnNode.remove(exec)
if (failedExecsOnNode.isEmpty) {
nodeToExcludedExecs.remove(status.node)
}
}
}
val nodesToInclude = nodeIdToExcludedExpiryTime.filter(_._2 < now).keys
if (nodesToInclude.nonEmpty) {
// Include any nodes that have been excluded longer than the excludeOnFailure timeout.
logInfo(s"Removing nodes $nodesToInclude from exclude list because the " +
s"nodes have reached has timed out")
nodesToInclude.foreach { node =>
nodeIdToExcludedExpiryTime.remove(node)
// post both to keep backwards compatibility
listenerBus.post(SparkListenerNodeUnblacklisted(now, node))
listenerBus.post(SparkListenerNodeUnexcluded(now, node))
}
_excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet)
}
updateNextExpiryTime()
}
}
private def updateNextExpiryTime(): Unit = {
val execMinExpiry = if (executorIdToExcludedStatus.nonEmpty) {
executorIdToExcludedStatus.map{_._2.expiryTime}.min
} else {
Long.MaxValue
}
val nodeMinExpiry = if (nodeIdToExcludedExpiryTime.nonEmpty) {
nodeIdToExcludedExpiryTime.values.min
} else {
Long.MaxValue
}
nextExpiryTime = math.min(execMinExpiry, nodeMinExpiry)
}
private def killExecutor(exec: String, msg: String): Unit = {
val fullMsg = if (EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED) {
s"${msg} (actually decommissioning)"
} else {
msg
}
allocationClient match {
case Some(a) =>
logInfo(fullMsg)
if (EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED) {
a.decommissionExecutor(exec, ExecutorDecommissionInfo(fullMsg),
adjustTargetNumExecutors = false)
} else {
a.killExecutors(Seq(exec), adjustTargetNumExecutors = false, countFailures = false,
force = true)
}
case None =>
logInfo(s"Not attempting to kill excluded executor id $exec " +
s"since allocation client is not defined.")
}
}
private def killExcludedExecutor(exec: String): Unit = {
if (conf.get(config.EXCLUDE_ON_FAILURE_KILL_ENABLED)) {
killExecutor(exec, s"Killing excluded executor id $exec since " +
s"${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.")
}
}
private[scheduler] def killExcludedIdleExecutor(exec: String): Unit = {
killExecutor(exec,
s"Killing excluded idle executor id $exec because of task unschedulability and trying " +
"to acquire a new executor.")
}
private def killExecutorsOnExcludedNode(node: String): Unit = {
if (conf.get(config.EXCLUDE_ON_FAILURE_KILL_ENABLED)) {
allocationClient match {
case Some(a) =>
if (EXCLUDE_ON_FAILURE_DECOMMISSION_ENABLED) {
logInfo(s"Decommissioning all executors on excluded host $node " +
s"since ${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.")
if (!a.decommissionExecutorsOnHost(node)) {
logError(s"Decommissioning executors on $node failed.")
}
} else {
logInfo(s"Killing all executors on excluded host $node " +
s"since ${config.EXCLUDE_ON_FAILURE_KILL_ENABLED.key} is set.")
if (!a.killExecutorsOnHost(node)) {
logError(s"Killing executors on node $node failed.")
}
}
case None =>
logWarning(s"Not attempting to kill executors on excluded host $node " +
s"since allocation client is not defined.")
}
}
}
def updateExcludedForFetchFailure(host: String, exec: String): Unit = {
if (EXCLUDE_FETCH_FAILURE_ENABLED) {
// If we exclude on fetch failures, we are implicitly saying that we believe the failure is
// non-transient, and can't be recovered from (even if this is the first fetch failure,
// stage is retried after just one failure, so we don't always get a chance to collect
// multiple fetch failures).
// If the external shuffle-service is on, then every other executor on this node would
// be suffering from the same issue, so we should exclude (and potentially kill) all
// of them immediately.
val now = clock.getTimeMillis()
val expiryTimeForNewExcludes = now + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS
if (conf.get(config.SHUFFLE_SERVICE_ENABLED)) {
if (!nodeIdToExcludedExpiryTime.contains(host)) {
logInfo(s"excluding node $host due to fetch failure of external shuffle service")
nodeIdToExcludedExpiryTime.put(host, expiryTimeForNewExcludes)
// post both to keep backwards compatibility
listenerBus.post(SparkListenerNodeBlacklisted(now, host, 1))
listenerBus.post(SparkListenerNodeExcluded(now, host, 1))
_excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet)
killExecutorsOnExcludedNode(host)
updateNextExpiryTime()
}
} else if (!executorIdToExcludedStatus.contains(exec)) {
logInfo(s"Excluding executor $exec due to fetch failure")
executorIdToExcludedStatus.put(exec, ExcludedExecutor(host, expiryTimeForNewExcludes))
// We hardcoded number of failure tasks to 1 for fetch failure, because there's no
// reattempt for such failure.
// post both to keep backwards compatibility
listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, 1))
listenerBus.post(SparkListenerExecutorExcluded(now, exec, 1))
updateNextExpiryTime()
killExcludedExecutor(exec)
val excludedExecsOnNode = nodeToExcludedExecs.getOrElseUpdate(host, HashSet[String]())
excludedExecsOnNode += exec
}
}
}
def updateExcludedForSuccessfulTaskSet(
stageId: Int,
stageAttemptId: Int,
failuresByExec: HashMap[String, ExecutorFailuresInTaskSet]): Unit = {
// if any tasks failed, we count them towards the overall failure count for the executor at
// this point.
val now = clock.getTimeMillis()
failuresByExec.foreach { case (exec, failuresInTaskSet) =>
val appFailuresOnExecutor =
executorIdToFailureList.getOrElseUpdate(exec, new ExecutorFailureList)
appFailuresOnExecutor.addFailures(stageId, stageAttemptId, failuresInTaskSet)
appFailuresOnExecutor.dropFailuresWithTimeoutBefore(now)
val newTotal = appFailuresOnExecutor.numUniqueTaskFailures
val expiryTimeForNewExcludes = now + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS
// If this pushes the total number of failures over the threshold, exclude the executor.
// If its already excluded, we avoid "re-excluding" (which can happen if there were
// other tasks already running in another taskset when it got excluded), because it makes
// some of the logic around expiry times a little more confusing. But it also wouldn't be a
// problem to re-exclude, with a later expiry time.
if (newTotal >= MAX_FAILURES_PER_EXEC && !executorIdToExcludedStatus.contains(exec)) {
logInfo(s"Excluding executor id: $exec because it has $newTotal" +
s" task failures in successful task sets")
val node = failuresInTaskSet.node
executorIdToExcludedStatus.put(exec, ExcludedExecutor(node, expiryTimeForNewExcludes))
// post both to keep backwards compatibility
listenerBus.post(SparkListenerExecutorBlacklisted(now, exec, newTotal))
listenerBus.post(SparkListenerExecutorExcluded(now, exec, newTotal))
executorIdToFailureList.remove(exec)
updateNextExpiryTime()
killExcludedExecutor(exec)
// In addition to excluding the executor, we also update the data for failures on the
// node, and potentially exclude the entire node as well.
val excludedExecsOnNode = nodeToExcludedExecs.getOrElseUpdate(node, HashSet[String]())
excludedExecsOnNode += exec
// If the node is already excluded, we avoid adding it again with a later expiry
// time.
if (excludedExecsOnNode.size >= MAX_FAILED_EXEC_PER_NODE &&
!nodeIdToExcludedExpiryTime.contains(node)) {
logInfo(s"Excluding node $node because it has ${excludedExecsOnNode.size} " +
s"executors excluded: ${excludedExecsOnNode}")
nodeIdToExcludedExpiryTime.put(node, expiryTimeForNewExcludes)
// post both to keep backwards compatibility
listenerBus.post(SparkListenerNodeBlacklisted(now, node, excludedExecsOnNode.size))
listenerBus.post(SparkListenerNodeExcluded(now, node, excludedExecsOnNode.size))
_excludedNodeList.set(nodeIdToExcludedExpiryTime.keySet.toSet)
killExecutorsOnExcludedNode(node)
}
}
}
}
def isExecutorExcluded(executorId: String): Boolean = {
executorIdToExcludedStatus.contains(executorId)
}
/**
* Get the full set of nodes that are excluded. Unlike other methods in this class, this *IS*
* thread-safe -- no lock required on a taskScheduler.
*/
def excludedNodeList(): Set[String] = {
_excludedNodeList.get()
}
def isNodeExcluded(node: String): Boolean = {
nodeIdToExcludedExpiryTime.contains(node)
}
def handleRemovedExecutor(executorId: String): Unit = {
// We intentionally do not clean up executors that are already excluded in
// nodeToExcludedExecs, so that if another executor on the same node gets excluded, we can
// exclude the entire node. We also can't clean up executorIdToExcludedStatus, so we can
// eventually remove the executor after the timeout. Despite not clearing those structures
// here, we don't expect they will grow too big since you won't get too many executors on one
// node, and the timeout will clear it up periodically in any case.
executorIdToFailureList -= executorId
}
/**
* Tracks all failures for one executor (that have not passed the timeout).
*
* In general we actually expect this to be extremely small, since it won't contain more than the
* maximum number of task failures before an executor is failed (default 2).
*/
private[scheduler] final class ExecutorFailureList extends Logging {
private case class TaskId(stage: Int, stageAttempt: Int, taskIndex: Int)
/**
* All failures on this executor in successful task sets.
*/
private var failuresAndExpiryTimes = ArrayBuffer[(TaskId, Long)]()
/**
* As an optimization, we track the min expiry time over all entries in failuresAndExpiryTimes
* so its quick to tell if there are any failures with expiry before the current time.
*/
private var minExpiryTime = Long.MaxValue
def addFailures(
stage: Int,
stageAttempt: Int,
failuresInTaskSet: ExecutorFailuresInTaskSet): Unit = {
failuresInTaskSet.taskToFailureCountAndFailureTime.foreach {
case (taskIdx, (_, failureTime)) =>
val expiryTime = failureTime + EXCLUDE_ON_FAILURE_TIMEOUT_MILLIS
failuresAndExpiryTimes += ((TaskId(stage, stageAttempt, taskIdx), expiryTime))
if (expiryTime < minExpiryTime) {
minExpiryTime = expiryTime
}
}
}
/**
* The number of unique tasks that failed on this executor. Only counts failures within the
* timeout, and in successful tasksets.
*/
def numUniqueTaskFailures: Int = failuresAndExpiryTimes.size
def isEmpty: Boolean = failuresAndExpiryTimes.isEmpty
/**
* Apply the timeout to individual tasks. This is to prevent one-off failures that are very
* spread out in time (and likely have nothing to do with problems on the executor) from
* triggering exlusion. However, note that we do *not* remove executors and nodes from
* being excluded as we expire individual task failures -- each have their own timeout. E.g.,
* suppose:
* * timeout = 10, maxFailuresPerExec = 2
* * Task 1 fails on exec 1 at time 0
* * Task 2 fails on exec 1 at time 5
* --> exec 1 is excluded from time 5 - 15.
* This is to simplify the implementation, as well as keep the behavior easier to understand
* for the end user.
*/
def dropFailuresWithTimeoutBefore(dropBefore: Long): Unit = {
if (minExpiryTime < dropBefore) {
var newMinExpiry = Long.MaxValue
val newFailures = new ArrayBuffer[(TaskId, Long)]
failuresAndExpiryTimes.foreach { case (task, expiryTime) =>
if (expiryTime >= dropBefore) {
newFailures += ((task, expiryTime))
if (expiryTime < newMinExpiry) {
newMinExpiry = expiryTime
}
}
}
failuresAndExpiryTimes = newFailures
minExpiryTime = newMinExpiry
}
}
override def toString(): String = {
s"failures = $failuresAndExpiryTimes"
}
}
}
private[spark] object HealthTracker extends Logging {
private val DEFAULT_TIMEOUT = "1h"
/**
* Returns true if the excludeOnFailure is enabled, based on checking the configuration
* in the following order:
* 1. Is it specifically enabled or disabled?
* 2. Is it enabled via the legacy timeout conf?
* 3. Default is off
*/
def isExcludeOnFailureEnabled(conf: SparkConf): Boolean = {
conf.get(config.EXCLUDE_ON_FAILURE_ENABLED) match {
case Some(enabled) =>
enabled
case None =>
// if they've got a non-zero setting for the legacy conf, always enable it,
// otherwise, use the default.
val legacyKey = config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF.key
conf.get(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF).exists { legacyTimeout =>
if (legacyTimeout == 0) {
logWarning(s"Turning off excludeOnFailure due to legacy configuration: $legacyKey == 0")
false
} else {
logWarning(s"Turning on excludeOnFailure due to legacy configuration: $legacyKey > 0")
true
}
}
}
}
def getExludeOnFailureTimeout(conf: SparkConf): Long = {
conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF).getOrElse {
conf.get(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF).getOrElse {
Utils.timeStringAsMs(DEFAULT_TIMEOUT)
}
}
}
/**
* Verify that exclude on failure configurations are consistent; if not, throw an exception.
* Should only be called if excludeOnFailure is enabled.
*
* The configuration is expected to adhere to a few invariants. Default values
* follow these rules of course, but users may unwittingly change one configuration
* without making the corresponding adjustment elsewhere. This ensures we fail-fast when
* there are such misconfigurations.
*/
def validateExcludeOnFailureConfs(conf: SparkConf): Unit = {
def mustBePos(k: String, v: String): Unit = {
throw new IllegalArgumentException(s"$k was $v, but must be > 0.")
}
Seq(
config.MAX_TASK_ATTEMPTS_PER_EXECUTOR,
config.MAX_TASK_ATTEMPTS_PER_NODE,
config.MAX_FAILURES_PER_EXEC_STAGE,
config.MAX_FAILED_EXEC_PER_NODE_STAGE,
config.MAX_FAILURES_PER_EXEC,
config.MAX_FAILED_EXEC_PER_NODE
).foreach { config =>
val v = conf.get(config)
if (v <= 0) {
mustBePos(config.key, v.toString)
}
}
val timeout = getExludeOnFailureTimeout(conf)
if (timeout <= 0) {
// first, figure out where the timeout came from, to include the right conf in the message.
conf.get(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF) match {
case Some(t) =>
mustBePos(config.EXCLUDE_ON_FAILURE_TIMEOUT_CONF.key, timeout.toString)
case None =>
mustBePos(config.EXCLUDE_ON_FAILURE_LEGACY_TIMEOUT_CONF.key, timeout.toString)
}
}
val maxTaskFailures = conf.get(config.TASK_MAX_FAILURES)
val maxNodeAttempts = conf.get(config.MAX_TASK_ATTEMPTS_PER_NODE)
if (maxNodeAttempts >= maxTaskFailures) {
throw new IllegalArgumentException(s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " +
s"( = ${maxNodeAttempts}) was >= ${config.TASK_MAX_FAILURES.key} " +
s"( = ${maxTaskFailures} ). Though excludeOnFailure is enabled, with this configuration, " +
s"Spark will not be robust to one bad node. Decrease " +
s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.TASK_MAX_FAILURES.key}, " +
s"or disable excludeOnFailure with ${config.EXCLUDE_ON_FAILURE_ENABLED.key}")
}
}
}
private final case class ExcludedExecutor(node: String, expiryTime: Long)
| maropu/spark | core/src/main/scala/org/apache/spark/scheduler/HealthTracker.scala | Scala | apache-2.0 | 22,869 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.model
sealed trait ConsolidationFunction {
def name: String
def compute(b: Block, pos: Int, aggr: Int, multiple: Int): Double
override def toString: String = s":cf-$name"
}
object ConsolidationFunction {
import java.lang.{Double => JDouble}
sealed trait SumOrAvgCf extends ConsolidationFunction
case object Avg extends SumOrAvgCf {
def name: String = "avg"
def compute(b: Block, pos: Int, aggr: Int, multiple: Int): Double = {
val end = pos + multiple
var total = 0.0
var count = 0
var nanCount = 0
var i = pos
while (i < end) {
val v = b.get(i, aggr)
if (!JDouble.isNaN(v)) {
total += v
count += 1
} else {
nanCount += 1
}
i += 1
}
if (count == 0) Double.NaN else total / (count + nanCount)
}
}
case object Sum extends SumOrAvgCf {
def name: String = "sum"
def compute(b: Block, pos: Int, aggr: Int, multiple: Int): Double = {
val end = pos + multiple
var total = 0.0
var count = 0
var i = pos
while (i < end) {
val v = b.get(i, aggr)
if (!JDouble.isNaN(v)) {
total += v
count += 1
}
i += 1
}
if (count == 0) Double.NaN else total
}
}
case object Min extends ConsolidationFunction {
def name: String = "min"
def compute(b: Block, pos: Int, aggr: Int, multiple: Int): Double = {
val end = pos + multiple
var min = JDouble.MAX_VALUE
var count = 0
var i = pos
while (i < end) {
val v = b.get(i, aggr)
if (!JDouble.isNaN(v)) {
min = math.min(min, v)
count += 1
}
i += 1
}
if (count == 0) Double.NaN else min
}
}
case object Max extends ConsolidationFunction {
def name: String = "max"
def compute(b: Block, pos: Int, aggr: Int, multiple: Int): Double = {
val end = pos + multiple
var max = -JDouble.MAX_VALUE
var count = 0
var i = pos
while (i < end) {
val v = b.get(i, aggr)
if (!JDouble.isNaN(v)) {
max = math.max(max, v)
count += 1
}
i += 1
}
if (count == 0) Double.NaN else max
}
}
}
| rspieldenner/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/model/ConsolidationFunction.scala | Scala | apache-2.0 | 2,898 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.yggdrasil
package table
import com.precog.common._
import com.precog.common.Path
import com.precog.util.VectorCase
import com.precog.bytecode.{ JType, JBooleanT, JObjectUnfixedT, JArrayUnfixedT }
import com.precog.yggdrasil.jdbm3._
import com.precog.yggdrasil.util._
import com.precog.util._
import blueeyes.bkka._
import blueeyes.json._
import org.apache.commons.collections.primitives.ArrayIntList
import org.joda.time.DateTime
import com.google.common.io.Files
import com.weiglewilczek.slf4s.Logging
import org.apache.jdbm.DBMaker
import java.io.File
import java.util.SortedMap
import com.precog.util.{BitSet, BitSetUtil, Loop}
import com.precog.util.BitSetUtil.Implicits._
import scala.annotation.tailrec
import scalaz._
import scalaz.Ordering._
import scalaz.std.function._
import scalaz.std.list._
import scalaz.std.tuple._
//import scalaz.std.iterable._
import scalaz.std.option._
import scalaz.std.map._
import scalaz.std.set._
import scalaz.std.stream._
import scalaz.syntax.arrow._
import scalaz.syntax.monad._
import scalaz.syntax.traverse._
import scalaz.syntax.std.boolean._
import scalaz.syntax.bifunctor._
trait SliceTransforms[M[+_]] extends TableModule[M]
with ColumnarTableTypes[M]
with ObjectConcatHelpers
with ArrayConcatHelpers
with MapUtils {
import TableModule._
import trans._
import trans.constants._
protected object SliceTransform {
def identity[A](initial: A) = SliceTransform1.liftM[A](initial, (a: A, s: Slice) => (a, s))
def left[A](initial: A) = SliceTransform2.liftM[A](initial, (a: A, sl: Slice, sr: Slice) => (a, sl))
def right[A](initial: A) = SliceTransform2.liftM[A](initial, (a: A, sl: Slice, sr: Slice) => (a, sr))
def liftM(f: Slice => Slice): SliceTransform1[Unit] =
SliceTransform1.liftM[Unit]((), { (u, s) => (u, f(s)) })
def lift(f: Slice => M[Slice]): SliceTransform1[Unit] =
SliceTransform1[Unit]((), { (_, s) => f(s) map { ((), _) } })
def composeSliceTransform(spec: TransSpec1): SliceTransform1[_] = {
composeSliceTransform2(spec).parallel
}
// No transform defined herein may reduce the size of a slice. Be it known!
def composeSliceTransform2(spec: TransSpec[SourceType]): SliceTransform2[_] = {
//todo missing case WrapObjectDynamic
val result = spec match {
case Leaf(source) if source == Source || source == SourceLeft =>
SliceTransform.left(())
case Leaf(source) if source == SourceRight =>
SliceTransform.right(())
case Map1(source, f) =>
composeSliceTransform2(source) map {
_ mapRoot f
}
case DeepMap1(source, f) =>
composeSliceTransform2(source) map {
_ mapColumns f
}
case Map2(left, right, f) =>
val l0 = composeSliceTransform2(left)
val r0 = composeSliceTransform2(right)
l0.zip(r0) { (sl, sr) =>
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
val resultColumns = for {
cl <- sl.columns collect { case (ref, col) if ref.selector == CPath.Identity => col }
cr <- sr.columns collect { case (ref, col) if ref.selector == CPath.Identity => col }
result <- f(cl, cr)
} yield result
resultColumns.groupBy(_.tpe) map {
case (tpe, cols) =>
val col = cols reduceLeft { (c1, c2) =>
Column.unionRightSemigroup.append(c1, c2)
}
(ColumnRef(CPath.Identity, tpe), col)
}
}
}
}
case Filter(source, predicate) =>
val typed = Typed(predicate, JBooleanT)
composeSliceTransform2(source).zip(composeSliceTransform2(typed)) { (s: Slice, filter: Slice) =>
assert(filter.size == s.size)
if (s.columns.isEmpty) {
s
} else {
val definedAt = new BitSet
filter.columns.values.foreach {
case col: BoolColumn => {
cf.util.isSatisfied(col).foreach {
c => definedAt.or(c.definedAt(0, s.size))
}
}
}
s mapColumns { cf.util.filter(0, s.size, definedAt) }
}
}
case Equal(left, right) =>
val l0 = composeSliceTransform2(left)
val r0 = composeSliceTransform2(right)
/*
* 1. split each side into numeric and non-numeric columns
* 2. cogroup non-numerics by ColumnRef
* 3. for each pair of matched columns, compare by cf.std.Eq
* 4. when matched column is undefined, return true iff other side is undefined
* 5. reduce non-numeric matches by And
* 6. analogously for numeric columns, save for the following
* 7. for numeric columns with multiple possible types, consider all
* matches and reduce the results by And
* 8. reduce numeric and non-numeric results by And
* 9. mask definedness on output column by definedness on input columns
*
* Generally speaking, for each pair of columns cogrouped by path, we
* compute a single equality column which is defined everywhere and
* definedness behaves according to the following truth table:
*
* - v1 = v2 == <equality>
* - v1 = undefined = false
* - undefined = v2 = false
* - undefined = undefined = true
*
* The undefined comparison is required because we are comparing columns
* that may be nested inside a larger object. We don't have the information
* at the pair level to determine whether the answer should be
* undefined for the entire row, or if the current column pair simply
* does not contribute to the final truth value. Thus, we return a
* "non-contributing" value (the zero for boolean And) and mask definedness
* as a function of all columns. A computed result will be overridden
* by undefined (masked off) if *either* the LHS or the RHS is fully
* undefined (for all columns) at a particular row.
*/
l0.zip(r0) { (sl, sr) =>
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
val (leftNonNum, leftNum) = sl.columns partition {
case (ColumnRef(_, CLong | CDouble | CNum), _) => false
case _ => true
}
val (rightNonNum, rightNum) = sr.columns partition {
case (ColumnRef(_, CLong | CDouble | CNum), _) => false
case _ => true
}
val groupedNonNum = (leftNonNum mapValues { _ :: Nil }) cogroup (rightNonNum mapValues { _ :: Nil })
val simplifiedGroupNonNum = groupedNonNum map {
case (_, Left3(column)) => Left(column)
case (_, Right3(column)) => Left(column)
case (_, Middle3((left :: Nil, right :: Nil))) => Right((left, right))
}
class FuzzyEqColumn(left: Column, right: Column) extends BoolColumn {
val equality = cf.std.Eq(left, right).get.asInstanceOf[BoolColumn] // yay!
def isDefinedAt(row: Int) = (left isDefinedAt row) || (right isDefinedAt row)
def apply(row: Int) = equality.isDefinedAt(row) && equality(row)
}
val testedNonNum: Array[BoolColumn] = simplifiedGroupNonNum.map({
case Left(column) => new BoolColumn {
def isDefinedAt(row: Int) = column.isDefinedAt(row)
def apply(row: Int) = false
}
case Right((left, right)) =>
new FuzzyEqColumn(left, right)
})(collection.breakOut)
// numeric stuff
def stripTypes(cols: Map[ColumnRef, Column]) = {
cols.foldLeft(Map[CPath, Set[Column]]()) {
case (acc, (ColumnRef(path, _), column)) => {
val set = acc get path map { _ + column } getOrElse Set(column)
acc.updated(path, set)
}
}
}
val leftNumMulti = stripTypes(leftNum)
val rightNumMulti = stripTypes(rightNum)
val groupedNum = leftNumMulti cogroup rightNumMulti
val simplifiedGroupedNum = groupedNum map {
case (_, Left3(column)) => Left(column): Either[Column, (Set[Column], Set[Column])]
case (_, Right3(column)) => Left(column): Either[Column, (Set[Column], Set[Column])]
case (_, Middle3((left, right))) =>
Right((left, right)): Either[Column, (Set[Column], Set[Column])]
}
val testedNum: Array[BoolColumn] = simplifiedGroupedNum.map({
case Left(column) =>
new BoolColumn {
def isDefinedAt(row: Int) = column.isDefinedAt(row)
def apply(row: Int) = false
}
case Right((left, right)) =>
val tests: Array[BoolColumn] = (for (l <- left; r <- right) yield {
new FuzzyEqColumn(l, r)
}).toArray
new OrLotsColumn(tests)
})(collection.breakOut)
val unifiedNonNum = new AndLotsColumn(testedNonNum)
val unifiedNum = new AndLotsColumn(testedNum)
val unified = new BoolColumn {
def isDefinedAt(row: Int): Boolean = unifiedNonNum.isDefinedAt(row) || unifiedNum.isDefinedAt(row)
def apply(row: Int): Boolean = {
val left = !unifiedNonNum.isDefinedAt(row) || unifiedNonNum(row)
val right = !unifiedNum.isDefinedAt(row) || unifiedNum(row)
left && right
}
}
val mask = sl.definedAt & sr.definedAt
val column = new BoolColumn {
def isDefinedAt(row: Int) = mask(row) && unified.isDefinedAt(row)
def apply(row: Int) = unified(row)
}
Map(ColumnRef(CPath.Identity, CBoolean) -> column)
}
}
}
case EqualLiteral(source, value, invert) => {
val id = System.currentTimeMillis
import cf.std.Eq
val sourceSlice = composeSliceTransform2(source)
def complement(col: BoolColumn) = new BoolColumn {
def isDefinedAt(row: Int) = col.isDefinedAt(row)
def apply(row: Int) = !col(row)
}
sourceSlice map { ss =>
new Slice {
val size = ss.size
val columns = {
val (comparable0, other0) = ss.columns.toList.partition {
case (ref @ ColumnRef(CPath.Identity, tpe), col) if CType.canCompare(CType.of(value),tpe) => true
case _ => false
}
val comparable = comparable0.map(_._2).flatMap { col => Eq.partialRight(value)(col).map(_.asInstanceOf[BoolColumn]) }
val other = other0.map(_._2).map { col => new Map1Column(col) with BoolColumn { def apply(row: Int) = false } }
val columns = comparable ++ other
val aggregate = new BoolColumn {
def isDefinedAt(row: Int) = columns.exists { _.isDefinedAt(row) }
def apply(row: Int) = columns.exists { col => col.isDefinedAt(row) && col(row) }
}
Map(ColumnRef(CPath.Identity, CBoolean) -> (if(invert) complement(aggregate) else aggregate))
}
}
}
}
case ConstLiteral(value, target) =>
composeSliceTransform2(target) map { _.definedConst(value) }
case WrapObject(source, field) =>
composeSliceTransform2(source) map {
_ wrap CPathField(field)
}
case WrapArray(source) =>
composeSliceTransform2(source) map {
_ wrap CPathIndex(0)
}
case OuterObjectConcat(objects @ _*) =>
if (objects.size == 1) {
val typed = Typed(objects.head, JObjectUnfixedT)
composeSliceTransform2(typed)
} else {
objects.map(composeSliceTransform2).reduceLeft { (l0, r0) =>
l0.zip(r0) { (sl, sr) =>
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
val (leftObjectBits, leftEmptyBits) = buildFilters(sl.columns, sl.size, filterObjects, filterEmptyObjects)
val (rightObjectBits, rightEmptyBits) = buildFilters(sr.columns, sr.size, filterObjects, filterEmptyObjects)
val (leftFields, rightFields) = buildFields(sl.columns, sr.columns)
val emptyBits = buildOuterBits(leftEmptyBits, rightEmptyBits, leftObjectBits, rightObjectBits)
val emptyObjects = buildEmptyObjects(emptyBits)
val nonemptyObjects = buildNonemptyObjects(leftFields, rightFields)
emptyObjects ++ nonemptyObjects
}
}
}
}
}
case InnerObjectConcat(objects @ _*) =>
/**
* This test is for special casing object concats when we know we
* won't have any unions, or funky behaviour arising from empty
* objects.
*/
def isDisjoint(s1: Slice, s2: Slice): Boolean = {
false // TODO: We really want to optimize the case where
// we are constructing a simple object from some
// other object where usually the definedness is equal
// on both sides, so we can just ++ the columns. But,
// we need to be a bit smarter about checking for equal
// definedness.
// def containsEmptyObject(slice: Slice): Boolean =
// slice.columns.exists(_._1.ctype == CEmptyObject)
// if (containsEmptyObject(s1) || containsEmptyObject(s2))
// return false
// val keys = s1.columns.map(_._1.selector).toSet
// !s2.columns.map(_._1.selector).exists(keys)
}
if (objects.size == 1) {
val typed = Typed(objects.head, JObjectUnfixedT)
composeSliceTransform2(typed)
} else {
objects map composeSliceTransform2 reduceLeft { (l0, r0) =>
l0.zip(r0) { (sl0, sr0) =>
val sl = sl0.typed(JObjectUnfixedT) // Help out the special cases.
val sr = sr0.typed(JObjectUnfixedT)
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
if (sl.columns.isEmpty || sr.columns.isEmpty) {
Map.empty[ColumnRef, Column]
} else if (isDisjoint(sl, sr)) {
// If we know sl & sr are disjoint, which is often the
// case for queries where objects are constructed
// manually, then we can do a lot less work.
sl.columns ++ sr.columns
} else {
val (leftObjectBits, leftEmptyBits) = buildFilters(sl.columns, sl.size, filterObjects, filterEmptyObjects)
val (rightObjectBits, rightEmptyBits) = buildFilters(sr.columns, sr.size, filterObjects, filterEmptyObjects)
val (leftFields, rightFields) = buildFields(sl.columns, sr.columns)
val (emptyBits, nonemptyBits) = buildInnerBits(leftEmptyBits, rightEmptyBits, leftObjectBits, rightObjectBits)
val emptyObjects = buildEmptyObjects(emptyBits)
val nonemptyObjects = buildNonemptyObjects(leftFields, rightFields)
val result = emptyObjects ++ nonemptyObjects
result lazyMapValues { col =>
cf.util.filter(0, sl.size max sr.size, nonemptyBits)(col).get
}
}
}
}
}
}
}
case OuterArrayConcat(elements @ _*) =>
if (elements.size == 1) {
val typed = Typed(elements.head, JArrayUnfixedT)
composeSliceTransform2(typed)
} else {
elements.map(composeSliceTransform2).reduceLeft { (l0, r0) =>
l0.zip(r0) { (sl, sr) =>
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
val (leftArrayBits, leftEmptyBits) = buildFilters(sl.columns, sl.size, filterArrays, filterEmptyArrays)
val (rightArrayBits, rightEmptyBits) = buildFilters(sr.columns, sr.size, filterArrays, filterEmptyArrays)
val emptyBits = buildOuterBits(leftEmptyBits, rightEmptyBits, leftArrayBits, rightArrayBits)
val emptyArrays = buildEmptyArrays(emptyBits)
val nonemptyArrays = buildNonemptyArrays(sl.columns, sr.columns)
emptyArrays ++ nonemptyArrays
}
}
}
}
}
case InnerArrayConcat(elements @ _*) =>
if (elements.size == 1) {
val typed = Typed(elements.head, JArrayUnfixedT)
composeSliceTransform2(typed)
} else {
elements.map(composeSliceTransform2).reduceLeft { (l0, r0) =>
l0.zip(r0) { (sl, sr) =>
new Slice {
val size = sl.size
val columns: Map[ColumnRef, Column] = {
if (sl.columns.isEmpty || sr.columns.isEmpty) {
Map.empty[ColumnRef, Column]
} else {
val (leftArrayBits, leftEmptyBits) = buildFilters(sl.columns, sl.size, filterArrays, filterEmptyArrays)
val (rightArrayBits, rightEmptyBits) = buildFilters(sr.columns, sr.size, filterArrays, filterEmptyArrays)
val (emptyBits, nonemptyBits) = buildInnerBits(leftEmptyBits, rightEmptyBits, leftArrayBits, rightArrayBits)
val emptyArrays = buildEmptyArrays(emptyBits)
val nonemptyArrays = buildNonemptyArrays(sl.columns, sr.columns)
val result = emptyArrays ++ nonemptyArrays
result lazyMapValues { col =>
cf.util.filter(0, sl.size max sr.size, nonemptyBits)(col).get
}
}
}
}
}
}
}
case ObjectDelete(source, mask) =>
composeSliceTransform2(source) map {
_ deleteFields mask
}
case Typed(source, tpe) =>
composeSliceTransform2(source) map {
_ typed tpe
}
case TypedSubsumes(source, tpe) =>
composeSliceTransform2(source) map {
_ typedSubsumes tpe
}
case IsType(source, tpe) =>
composeSliceTransform2(source) map {
_ isType tpe
}
case Scan(source, scanner) =>
composeSliceTransform2(source) andThen {
SliceTransform1.liftM[scanner.A](
scanner.init,
{ (state: scanner.A, slice: Slice) =>
val (newState, newCols) = scanner.scan(state, slice.columns, 0 until slice.size)
val newSlice = new Slice {
val size = slice.size
val columns = newCols
}
(newState, newSlice)
}
)
}
case MapWith(source, mapper0) =>
composeSliceTransform2(source) andThen {
mapper0.fold({ mapper =>
SliceTransform1.liftM[Unit]((), { (_: Unit, slice: Slice) =>
val cols = mapper.map(slice.columns, 0 until slice.size)
((), Slice(cols, slice.size))
})
}, { mapper =>
SliceTransform1[Unit]((), { (_: Unit, slice: Slice) =>
mapper.map(slice.columns, 0 until slice.size) map { cols =>
((), Slice(cols, slice.size))
}
})
})
}
case DerefMetadataStatic(source, field) =>
composeSliceTransform2(source) map {
_ deref field
}
case DerefObjectStatic(source, field) =>
composeSliceTransform2(source) map {
_ deref field
}
case DerefObjectDynamic(source, ref) =>
val l0 = composeSliceTransform2(source)
val r0 = composeSliceTransform2(ref)
l0.zip(r0) { (slice, derefBy) =>
assert(derefBy.columns.size <= 1)
derefBy.columns.headOption collect {
case (ColumnRef(CPath.Identity, CString), c: StrColumn) =>
new DerefSlice(slice, { case row: Int if c.isDefinedAt(row) => CPathField(c(row)) })
} getOrElse {
slice
}
}
case DerefArrayStatic(source, element) =>
composeSliceTransform2(source) map {
_ deref element
}
case DerefArrayDynamic(source, ref) =>
val l0 = composeSliceTransform2(source)
val r0 = composeSliceTransform2(ref)
l0.zip(r0) { (slice, derefBy) =>
assert(derefBy.columns.size <= 1)
derefBy.columns.headOption collect {
case (ColumnRef(CPath.Identity, CLong), c: LongColumn) =>
new DerefSlice(slice, { case row: Int if c.isDefinedAt(row) => CPathIndex(c(row).toInt) })
case (ColumnRef(CPath.Identity, CDouble), c: DoubleColumn) =>
new DerefSlice(slice, { case row: Int if c.isDefinedAt(row) => CPathIndex(c(row).toInt) })
case (ColumnRef(CPath.Identity, CNum), c: NumColumn) =>
new DerefSlice(slice, { case row: Int if c.isDefinedAt(row) => CPathIndex(c(row).toInt) })
} getOrElse {
slice
}
}
case ArraySwap(source, index) =>
composeSliceTransform2(source) map {
_ arraySwap index
}
case FilterDefined(source, definedFor, definedness) =>
val sourceTransform = composeSliceTransform2(source)
val keyTransform = composeSliceTransform2(definedFor)
sourceTransform.zip(keyTransform) { (s1, s2) => s1.filterDefined(s2, definedness) }
case Cond(pred, left, right) => {
val predTransform = composeSliceTransform2(pred)
val leftTransform = composeSliceTransform2(left)
val rightTransform = composeSliceTransform2(right)
predTransform.zip2(leftTransform, rightTransform) { (predS, leftS, rightS) =>
new Slice {
val size = predS.size
val columns: Map[ColumnRef, Column] = {
predS.columns get ColumnRef(CPath.Identity, CBoolean) map { predC =>
val leftMask = predC.asInstanceOf[BoolColumn].asBitSet(false, size)
val rightMask = predC.asInstanceOf[BoolColumn].asBitSet(true, size)
rightMask.flip(0, size)
val grouped = (leftS.columns mapValues { _ :: Nil }) cogroup (rightS.columns mapValues { _ :: Nil })
val joined: Map[ColumnRef, Column] = grouped.map({
case (ref, Left3(col)) =>
ref -> cf.util.filter(0, size, leftMask)(col).get
case (ref, Right3(col)) =>
ref -> cf.util.filter(0, size, rightMask)(col).get
case (ref, Middle3((left :: Nil, right :: Nil))) => {
val left2 = cf.util.filter(0, size, leftMask)(left).get
val right2 = cf.util.filter(0, size, rightMask)(right).get
ref -> cf.util.MaskedUnion(leftMask)(left2, right2).get // safe because types are grouped
}
})(collection.breakOut)
joined
} getOrElse Map[ColumnRef, Column]()
}
}
}
}
}
result
}
}
protected sealed trait SliceTransform1[A] {
import SliceTransform1._
def initial: A
def f: (A, Slice) => M[(A, Slice)]
def advance(slice: Slice): M[(SliceTransform1[A], Slice)]
def unlift: Option[(A, Slice) => (A, Slice)] = None
def apply(slice: Slice): M[(A, Slice)] = f(initial, slice)
def mapState[B](f: A => B, g: B => A): SliceTransform1[B] =
MappedState1[A, B](this, f, g)
def zip[B](that: SliceTransform1[B])(combine: (Slice, Slice) => Slice): SliceTransform1[(A, B)] = {
(this, that) match {
case (sta: SliceTransform1S[_], stb: SliceTransform1S[_]) =>
SliceTransform1S[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), s0) =>
val (a, sa) = sta.f0(a0, s0)
val (b, sb) = stb.f0(b0, s0)
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
})
case (sta: SliceTransform1S[_], stb) =>
SliceTransform1M[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), s0) =>
val (a, sa) = sta.f0(a0, s0)
stb.f(b0, s0) map { case (b, sb) =>
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
case (sta, stb: SliceTransform1S[_]) =>
SliceTransform1M[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), s0) =>
sta.f(a0, s0) map { case (a, sa) =>
val (b, sb) = stb.f0(b0, s0)
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
case (sta, stb) =>
SliceTransform1[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), s0) =>
for (ares <- sta.f(a0, s0); bres <- stb.f(b0, s0)) yield {
val (a, sa) = ares
val (b, sb) = bres
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
}
}
def zip2[B, C](t: SliceTransform1[B], t2: SliceTransform1[C])(combine: (Slice, Slice, Slice) => Slice): SliceTransform1[(A, B, C)] = {
// We can do this in 4 cases efficiently simply be re-ordering the 3 sts.
// Since they're done in parallel, we just need to make sure combine works.
(this, t, t2) match {
case (sta: SliceTransform1S[_], stb: SliceTransform1S[_], stc: SliceTransform1S[_]) =>
SliceTransform1S((sta.initial, stb.initial, stc.initial), { case ((a0, b0, c0), s0) =>
val (a, sa) = sta.f0(a0, s0)
val (b, sb) = stb.f0(b0, s0)
val (c, sc) = stc.f0(c0, s0)
((a, b, c), combine(sa, sb, sc))
})
case (sta, stb, stc) =>
SliceTransform1M((sta.initial, stb.initial, stc.initial), { case ((a0, b0, c0), s0) =>
for {
resa <- sta.f(a0, s0)
resb <- stb.f(b0, s0)
resc <- stc.f(c0, s0)
} yield {
val (a, sa) = resa
val (b, sb) = resb
val (c, sc) = resc
((a, b, c), combine(sa, sb, sc))
}
})
}
}
def map(mapFunc: Slice => Slice): SliceTransform1[A] = SliceTransform1.map(this)(mapFunc)
def andThen[B](that: SliceTransform1[B]): SliceTransform1[(A, B)] = SliceTransform1.chain(this, that)
}
object SliceTransform1 {
def liftM[A](init: A, f: (A, Slice) => (A, Slice)): SliceTransform1[A] =
SliceTransform1S(init, f)
def apply[A](init: A, f: (A, Slice) => M[(A, Slice)]): SliceTransform1[A] =
SliceTransform1M(init, f)
private[table] val Identity: SliceTransform1S[Unit] = SliceTransform1S[Unit]((), { (u, s) => (u, s) })
private[table] def mapS[A](st: SliceTransform1S[A])(f: Slice => Slice): SliceTransform1S[A] =
SliceTransform1S(st.initial, { case (a, s) => st.f0(a, s) :-> f })
private def map[A](st: SliceTransform1[A])(f: Slice => Slice): SliceTransform1[A] = st match {
case (st: SliceTransform1S[_]) => mapS(st)(f)
case SliceTransform1M(i, g) => SliceTransform1M(i, { case (a, s) => g(a, s) map (_ :-> f) })
case SliceTransform1SMS(sta, stb, stc) => SliceTransform1SMS(sta, stb, mapS(stc)(f))
case MappedState1(sta, to, from) => MappedState1(map(sta)(f), to, from)
}
private def chainS[A, B](sta: SliceTransform1S[A], stb: SliceTransform1S[B]): SliceTransform1S[(A, B)] = {
(sta, stb) match {
case (Identity, stb) =>
SliceTransform1S((sta.initial, stb.initial), { case ((_, b0), s0) =>
{ (b: B) => (sta.initial, b) } <-: stb.f0(b0, s0)
})
case (sta, Identity) =>
SliceTransform1S((sta.initial, stb.initial), { case ((a0, _), s0) =>
{ (a: A) => (a, stb.initial) } <-: sta.f0(a0, s0)
})
case (SliceTransform1S(i1, f1), SliceTransform1S(i2, f2)) =>
SliceTransform1S((i1, i2), { case ((a0, b0), s0) =>
val (a, s1) = f1(a0, s0)
val (b, s) = f2(b0, s1)
((a, b), s)
})
}
}
// Note: This is here, rather than in SliceTransform1 trait, because Scala's
// type unification doesn't deal well with `this`.
private def chain[A, B](st0: SliceTransform1[A], st1: SliceTransform1[B]): SliceTransform1[(A, B)] = {
(st0, st1) match {
case (sta: SliceTransform1S[_], stb: SliceTransform1S[_]) =>
chainS(sta, stb)
case (SliceTransform1M(i0, f0), SliceTransform1M(i1, f1)) =>
SliceTransform1M((i0, i1), { case ((a0, b0), s0) =>
for (r0 <- f0(i0, s0); r1 <- f1(i1, r0._2)) yield ((r0._1, r1._1), r1._2)
})
case (sta: SliceTransform1S[_], stb: SliceTransform1M[_]) =>
val st = SliceTransform1SMS(sta, stb, Identity)
st.mapState({ case (a, b, _) => (a, b) }, { case (a, b) => (a, b, ()) })
case (sta: SliceTransform1M[_], stb: SliceTransform1S[_]) =>
val st = SliceTransform1SMS(Identity, sta, stb)
st.mapState({ case (_, a, b) => (a, b) }, { case (a, b) => ((), a, b) })
case (sta: SliceTransform1S[_], SliceTransform1SMS(stb, stc, std)) =>
val st = SliceTransform1SMS(chainS(sta, stb), stc, std)
st.mapState({ case ((a, b), c, d) => (a, (b, c, d)) },
{ case (a, (b, c, d)) => ((a, b), c, d) })
case (SliceTransform1SMS(sta, stb, stc), std: SliceTransform1S[_]) =>
val st = SliceTransform1SMS(sta, stb, chainS(stc, std))
st.mapState({ case (a, b, (c, d)) => ((a, b, c), d) },
{ case ((a, b, c), d) => (a, b, (c, d)) })
case (sta: SliceTransform1M[_], SliceTransform1SMS(stb, stc, std)) =>
val st = SliceTransform1SMS(Identity, sta andThen stb andThen stc, std)
st.mapState({ case (_, ((a, b), c), d) => (a, (b, c, d)) },
{ case (a, (b, c, d)) => ((), ((a, b), c), d) })
case (SliceTransform1SMS(sta, stb, stc), std: SliceTransform1M[_]) =>
val st = SliceTransform1SMS(sta, stb andThen stc andThen std, Identity)
st.mapState({ case (a, ((b, c), d), _) => ((a, b, c), d) },
{ case ((a, b, c), d) => (a, ((b, c), d), ()) })
case (SliceTransform1SMS(sta, stb, stc), SliceTransform1SMS(std, ste, stf)) =>
val st = SliceTransform1SMS(sta, stb andThen stc andThen std andThen ste, stf)
st.mapState({ case (a, (((b, c), d), e), f) => ((a, b, c), (d, e, f)) },
{ case ((a, b, c), (d, e, f)) => (a, (((b, c), d), e), f) })
case (MappedState1(sta, f, g), stb) =>
(sta andThen stb).mapState(f <-: _, g <-: _)
case (sta, MappedState1(stb, f, g)) =>
(sta andThen stb).mapState(_ :-> f, _ :-> g)
}
}
private[table] case class SliceTransform1S[A](initial: A, f0: (A, Slice) => (A, Slice)) extends SliceTransform1[A] {
override def unlift = Some(f0)
val f: (A, Slice) => M[(A, Slice)] = { case (a, s) => M point f0(a, s) }
def advance(s: Slice): M[(SliceTransform1[A], Slice)] =
M point ({ (a: A) => SliceTransform1S[A](a, f0) } <-: f0(initial, s))
}
private[table] case class SliceTransform1M[A](initial: A, f: (A, Slice) => M[(A, Slice)]) extends SliceTransform1[A] {
def advance(s: Slice): M[(SliceTransform1[A], Slice)] = apply(s) map { case (next, slice) =>
(SliceTransform1M[A](next, f), slice)
}
}
private[table] case class SliceTransform1SMS[A,B,C](before: SliceTransform1S[A], transM: SliceTransform1[B], after: SliceTransform1S[C]) extends SliceTransform1[(A, B, C)] {
def initial: (A, B, C) = (before.initial, transM.initial, after.initial)
val f: ((A, B, C), Slice) => M[((A, B, C), Slice)] = { case ((a0, b0, c0), s) =>
val (a, slice0) = before.f0(a0, s)
transM.f(b0, slice0) map { case (b, slice1) =>
val (c, slice) = after.f0(c0, slice1)
((a, b, c), slice)
}
}
def advance(s: Slice): M[(SliceTransform1[(A, B, C)], Slice)] = apply(s) map { case ((a, b, c), slice) =>
val transM0 = SliceTransform1M(b, transM.f)
(SliceTransform1SMS[A, B, C](before.copy(initial = a), transM0, after.copy(initial = c)), slice)
}
}
private[table] case class MappedState1[A, B](st: SliceTransform1[A], to: A => B, from: B => A) extends SliceTransform1[B] {
def initial: B = to(st.initial)
def f: (B, Slice) => M[(B, Slice)] = { (b, s) => st.f(from(b), s) map (to <-: _) }
def advance(s: Slice): M[(SliceTransform1[B], Slice)] =
st.advance(s) map { case (st0, s0) => (MappedState1[A, B](st0, to, from), s0) }
}
}
protected sealed trait SliceTransform2[A] {
import SliceTransform2._
def initial: A
def f: (A, Slice, Slice) => M[(A, Slice)]
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[A], Slice)]
def unlift: Option[(A, Slice, Slice) => (A, Slice)] = None
def apply(sl: Slice, sr: Slice): M[(A, Slice)] = f(initial, sl, sr)
def mapState[B](f: A => B, g: B => A): SliceTransform2[B] =
MappedState2[A, B](this, f, g)
def zip[B](that: SliceTransform2[B])(combine: (Slice, Slice) => Slice): SliceTransform2[(A, B)] = {
(this, that) match {
case (sta: SliceTransform2S[_], stb: SliceTransform2S[_]) =>
SliceTransform2S[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), sl0, sr0) =>
val (a, sa) = sta.f0(a0, sl0, sr0)
val (b, sb) = stb.f0(b0, sl0, sr0)
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
})
case (sta: SliceTransform2S[_], stb) =>
SliceTransform2M[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), sl0, sr0) =>
val (a, sa) = sta.f0(a0, sl0, sr0)
stb.f(b0, sl0, sr0) map { case (b, sb) =>
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
case (sta, stb: SliceTransform2S[_]) =>
SliceTransform2M[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), sl0, sr0) =>
sta.f(a0, sl0, sr0) map { case (a, sa) =>
val (b, sb) = stb.f0(b0, sl0, sr0)
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
case (sta, stb) =>
SliceTransform2[(A, B)]((sta.initial, stb.initial), { case ((a0, b0), sl0, sr0) =>
for (ares <- sta.f(a0, sl0, sr0); bres <- stb.f(b0, sl0, sr0)) yield {
val (a, sa) = ares
val (b, sb) = bres
assert(sa.size == sb.size)
((a, b), combine(sa, sb))
}
})
}
}
def zip2[B, C](t: SliceTransform2[B], t2: SliceTransform2[C])(combine: (Slice, Slice, Slice) => Slice): SliceTransform2[(A, B, C)] = {
// We can do this in 4 cases efficiently simply be re-ordering the 3 sts.
// Since they're done in parallel, we just need to make sure combine works.
(this, t, t2) match {
case (sta: SliceTransform2S[_], stb: SliceTransform2S[_], stc: SliceTransform2S[_]) =>
SliceTransform2S((sta.initial, stb.initial, stc.initial), { case ((a0, b0, c0), sl0, sr0) =>
val (a, sa) = sta.f0(a0, sl0, sr0)
val (b, sb) = stb.f0(b0, sl0, sr0)
val (c, sc) = stc.f0(c0, sl0, sr0)
((a, b, c), combine(sa, sb, sc))
})
case (sta, stb, stc) =>
SliceTransform2M((sta.initial, stb.initial, stc.initial), { case ((a0, b0, c0), sl0, sr0) =>
for {
resa <- sta.f(a0, sl0, sr0)
resb <- stb.f(b0, sl0, sr0)
resc <- stc.f(c0, sl0, sr0)
} yield {
val (a, sa) = resa
val (b, sb) = resb
val (c, sc) = resc
((a, b, c), combine(sa, sb, sc))
}
})
}
}
def map(mapFunc: Slice => Slice): SliceTransform2[A] = SliceTransform2.map(this)(mapFunc)
def andThen[B](that: SliceTransform1[B]): SliceTransform2[(A, B)] = SliceTransform2.chain(this, that)
def parallel: SliceTransform1[A] = this match {
case (st: SliceTransform2S[_]) => SliceTransform1.liftM[A](initial, { (a, s) => st.f0(a, s, s) })
case _ => SliceTransform1[A](initial, { (a, s) => f(a, s, s) })
}
}
object SliceTransform2 {
import SliceTransform1.{ SliceTransform1S, SliceTransform1M, SliceTransform1SMS, MappedState1 }
def liftM[A](init: A, f: (A, Slice, Slice) => (A, Slice)): SliceTransform2[A] =
SliceTransform2S(init, f)
def apply[A](init: A, f: (A, Slice, Slice) => M[(A, Slice)]): SliceTransform2[A] =
SliceTransform2M(init, f)
private def mapS[A](st: SliceTransform2S[A])(f: Slice => Slice): SliceTransform2S[A] =
SliceTransform2S(st.initial, { case (a, sl, sr) => st.f0(a, sl, sr) :-> f })
private def map[A](st: SliceTransform2[A])(f: Slice => Slice): SliceTransform2[A] = st match {
case (st: SliceTransform2S[_]) => mapS(st)(f)
case SliceTransform2M(i, g) => SliceTransform2M(i, { case (a, sl, sr) => g(a, sl, sr) map (_ :-> f) })
case SliceTransform2SM(sta, stb) => SliceTransform2SM(sta, stb map f)
case SliceTransform2MS(sta, stb) => SliceTransform2MS(sta, SliceTransform1.mapS(stb)(f))
case MappedState2(sta, to, from) => MappedState2(map(sta)(f), to, from)
}
private def chainS[A, B](sta: SliceTransform2S[A], stb: SliceTransform1S[B]): SliceTransform2S[(A, B)] = {
(sta, stb) match {
case (sta, SliceTransform1.Identity) =>
SliceTransform2S((sta.initial, stb.initial), { case ((a0, _), sl0, sr0) =>
{ (a: A) => (a, stb.initial) } <-: sta.f0(a0, sl0, sr0)
})
case (SliceTransform2S(i1, f1), SliceTransform1S(i2, f2)) =>
SliceTransform2S((i1, i2), { case ((a0, b0), sl0, sr0) =>
val (a, s1) = f1(a0, sl0, sr0)
val (b, s) = f2(b0, s1)
((a, b), s)
})
}
}
private def chain[A, B](st0: SliceTransform2[A], st1: SliceTransform1[B]): SliceTransform2[(A, B)] = {
(st0, st1) match {
case (sta, MappedState1(stb, f, g)) =>
chain(sta, stb).mapState( _ :-> f, _ :-> g)
case (sta: SliceTransform2S[_], stb: SliceTransform1S[_]) =>
chainS(sta, stb)
case (sta: SliceTransform2S[_], stb: SliceTransform1[_]) =>
SliceTransform2SM(sta, stb)
case (sta: SliceTransform2M[_], stb: SliceTransform1S[_]) =>
SliceTransform2MS(sta, stb)
case (sta: SliceTransform2M[_], stb: SliceTransform1[_]) =>
SliceTransform2M((sta.initial, stb.initial), { case ((a0, b0), sl0, sr0) =>
sta.f(a0, sl0, sr0) flatMap { case (a, s0) =>
stb.f(b0, s0) map { case (b, s) => ((a, b), s) }
}
})
case (SliceTransform2SM(sta, stb), stc) =>
val st = SliceTransform2SM(sta, stb andThen stc)
st.mapState({ case (a, (b, c)) => ((a, b), c) }, { case ((a, b), c) => (a, (b, c)) })
case (SliceTransform2MS(sta, stb), stc) =>
val st = chain(sta, stb andThen stc)
st.mapState({ case (a, (b, c)) => ((a, b), c) },
{ case ((a, b), c) => (a, (b, c)) })
case (MappedState2(sta, f, g), stb) =>
chain(sta, stb).mapState(f <-: _, g <-: _)
}
}
private case class SliceTransform2S[A](initial: A, f0: (A, Slice, Slice) => (A, Slice)) extends SliceTransform2[A] {
override def unlift = Some(f0)
val f: (A, Slice, Slice) => M[(A, Slice)] = { case (a, sl, sr) => M point f0(a, sl, sr) }
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[A], Slice)] =
M point ({ (a: A) => SliceTransform2S[A](a, f0) } <-: f0(initial, sl, sr))
}
private case class SliceTransform2M[A](initial: A, f: (A, Slice, Slice) => M[(A, Slice)]) extends SliceTransform2[A] {
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[A], Slice)] = apply(sl, sr) map { case (next, slice) =>
(SliceTransform2M[A](next, f), slice)
}
}
private case class SliceTransform2SM[A,B](before: SliceTransform2S[A], after: SliceTransform1[B]) extends SliceTransform2[(A, B)] {
def initial: (A, B) = (before.initial, after.initial)
val f: ((A, B), Slice, Slice) => M[((A, B), Slice)] = { case ((a0, b0), sl0, sr0) =>
val (a, s0) = before.f0(a0, sl0, sr0)
after.f(b0, s0) map { case (b, s) => ((a, b), s) }
}
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[(A, B)], Slice)] = apply(sl, sr) map { case ((a, b), slice) =>
val after0 = SliceTransform1M(b, after.f)
(SliceTransform2SM[A, B](before.copy(initial = a), after0), slice)
}
}
private case class SliceTransform2MS[A,B](before: SliceTransform2[A], after: SliceTransform1S[B]) extends SliceTransform2[(A, B)] {
def initial: (A, B) = (before.initial, after.initial)
val f: ((A, B), Slice, Slice) => M[((A, B), Slice)] = { case ((a0, b0), sl0, sr0) =>
before.f(a0, sl0, sr0) map { case (a, s0) =>
val (b, s) = after.f0(b0, s0)
((a, b), s)
}
}
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[(A, B)], Slice)] = apply(sl, sr) map { case ((a, b), slice) =>
val before0 = SliceTransform2M(a, before.f)
(SliceTransform2MS[A, B](before0, after.copy(initial = b)), slice)
}
}
private case class MappedState2[A, B](st: SliceTransform2[A], to: A => B, from: B => A) extends SliceTransform2[B] {
def initial: B = to(st.initial)
def f: (B, Slice, Slice) => M[(B, Slice)] = { (b, sl, sr) => st.f(from(b), sl, sr) map (to <-: _) }
def advance(sl: Slice, sr: Slice): M[(SliceTransform2[B], Slice)] =
st.advance(sl, sr) map { case (st0, s0) => (MappedState2[A, B](st0, to, from), s0) }
}
}
}
trait ConcatHelpers {
def buildFilters(columns: Map[ColumnRef, Column], size: Int, filter: Map[ColumnRef, Column] => Map[ColumnRef, Column], filterEmpty: Map[ColumnRef, Column] => Map[ColumnRef, Column]) = {
val definedBits = filter(columns).values.map(_.definedAt(0, size)).reduceOption(_ | _) getOrElse new BitSet
val emptyBits = filterEmpty(columns).values.map(_.definedAt(0, size)).reduceOption(_ | _) getOrElse new BitSet
(definedBits, emptyBits)
}
def buildOuterBits(leftEmptyBits: BitSet, rightEmptyBits: BitSet, leftDefinedBits: BitSet, rightDefinedBits: BitSet): BitSet = {
(rightEmptyBits & leftEmptyBits) |
(rightEmptyBits &~ leftDefinedBits) |
(leftEmptyBits &~ rightDefinedBits)
}
def buildInnerBits(leftEmptyBits: BitSet, rightEmptyBits: BitSet, leftDefinedBits: BitSet, rightDefinedBits: BitSet) = {
val emptyBits = rightEmptyBits & leftEmptyBits
val nonemptyBits = leftDefinedBits & rightDefinedBits
(emptyBits, nonemptyBits)
}
}
trait ArrayConcatHelpers extends ConcatHelpers {
def filterArrays(columns: Map[ColumnRef, Column]) = columns.filter {
case (ColumnRef(CPath(CPathIndex(_), _ @ _*), _), _) => true
case (ColumnRef(CPath.Identity, CEmptyArray), _) => true
case _ => false
}
def filterEmptyArrays(columns: Map[ColumnRef, Column]) = columns.filter {
case (ColumnRef(CPath.Identity, CEmptyArray), _) => true
case _ => false
}
def collectIndices(columns: Map[ColumnRef, Column]) = columns.collect {
case (ref @ ColumnRef(CPath(CPathIndex(i), xs @ _*), ctype), col) => (i, xs, ref, col)
}
def buildEmptyArrays(emptyBits: BitSet) = Map(ColumnRef(CPath.Identity, CEmptyArray) -> EmptyArrayColumn(emptyBits))
def buildNonemptyArrays(left: Map[ColumnRef, Column], right: Map[ColumnRef, Column]) = {
val leftIndices = collectIndices(left)
val rightIndices = collectIndices(right)
val maxId = if (leftIndices.isEmpty) -1 else leftIndices.map(_._1).max
val newCols = (leftIndices map { case (_, _, ref, col) => ref -> col }) ++
(rightIndices map { case (i, xs, ref, col) => ColumnRef(CPath(CPathIndex(i + maxId + 1) :: xs.toList), ref.ctype) -> col })
newCols.toMap
}
}
trait ObjectConcatHelpers extends ConcatHelpers {
def filterObjects(columns: Map[ColumnRef, Column]) = columns.filter {
case (ColumnRef(CPath(CPathField(_), _ @ _*), _), _) => true
case (ColumnRef(CPath.Identity, CEmptyObject), _) => true
case _ => false
}
def filterEmptyObjects(columns: Map[ColumnRef, Column]) = columns.filter {
case (ColumnRef(CPath.Identity, CEmptyObject), _) => true
case _ => false
}
def filterFields(columns: Map[ColumnRef, Column]) = columns.filter {
case (ColumnRef(CPath(CPathField(_), _ @ _*), _), _) => true
case _ => false
}
def buildFields(leftColumns: Map[ColumnRef, Column], rightColumns: Map[ColumnRef, Column]) =
(filterFields(leftColumns), filterFields(rightColumns))
def buildEmptyObjects(emptyBits: BitSet) = {
if (emptyBits.isEmpty) Map.empty[ColumnRef, Column]
else Map(ColumnRef(CPath.Identity, CEmptyObject) -> EmptyObjectColumn(emptyBits))
}
def buildNonemptyObjects(leftFields: Map[ColumnRef, Column], rightFields: Map[ColumnRef, Column]) = {
val (leftInner, leftOuter) = leftFields partition {
case (ColumnRef(path, _), _) =>
rightFields exists { case (ColumnRef(path2, _), _) => path == path2 }
}
val (rightInner, rightOuter) = rightFields partition {
case (ColumnRef(path, _), _) =>
leftFields exists { case (ColumnRef(path2, _), _) => path == path2 }
}
val innerPaths = Set(leftInner.keys map { _.selector } toSeq: _*)
val mergedPairs: Set[(ColumnRef, Column)] = innerPaths flatMap { path =>
val rightSelection = rightInner filter {
case (ColumnRef(path2, _), _) => path == path2
}
val leftSelection = leftInner filter {
case (ref @ ColumnRef(path2, _), _) =>
path == path2 && !rightSelection.contains(ref)
}
val rightMerged = rightSelection map {
case (ref, col) => {
if (leftInner contains ref)
ref -> cf.util.UnionRight(leftInner(ref), col).get
else
ref -> col
}
}
rightMerged ++ leftSelection
}
leftOuter ++ rightOuter ++ mergedPairs
}
}
| precog/platform | yggdrasil/src/main/scala/com/precog/yggdrasil/table/SliceTransform.scala | Scala | agpl-3.0 | 50,104 |
package com.eclipsesource.schema
import java.net.{URL, URLStreamHandler}
import com.eclipsesource.schema.drafts.{Version4, Version7}
import com.eclipsesource.schema.internal._
import com.eclipsesource.schema.internal.refs.{DocumentCache, Ref, SchemaRefResolver, SchemaResolutionScope}
import com.eclipsesource.schema.internal.url.UrlStreamResolverFactory
import com.eclipsesource.schema.internal.validators.DefaultFormats
import com.osinka.i18n.{Lang, Messages}
import play.api.libs.json._
import scalaz.\/
import scala.io.Source
import scala.util.Try
/**
* Allows customizations of the validation process.
*/
trait SchemaConfigOptions {
def supportsExternalReferences: Boolean
def formats: Map[String, SchemaFormat]
}
object SchemaValidator { self =>
def apply(version: Option[SchemaVersion] = None)
(implicit lang: Lang = Lang.Default): SchemaValidator = {
val validator = new SchemaValidator(version, version.map(_.options.formats).getOrElse(DefaultFormats.formats))
version.fold(validator) {
case _: Version4 => validator.addSchema(Version4.SchemaUrl, Version4.Schema)
case _: Version7 => validator.addSchema(Version7.SchemaUrl, Version7.Schema)
case other =>
throw new RuntimeException(s"Could not read schema file $other.")
}
}
}
/**
* The schema validator.
*
*/
class SchemaValidator(
val schemaVersion: Option[SchemaVersion] = None,
val formats: Map[String, SchemaFormat] = DefaultFormats.formats,
val resolverFactory: UrlStreamResolverFactory = UrlStreamResolverFactory(),
val cache: DocumentCache = DocumentCache()
)(implicit val lang: Lang) {
val DefaultVersion: SchemaVersion = Version7
/**
* Add a URLStreamHandler that is capable of handling absolute with a specific scheme.
*
* @param handler the UrlHandler to be added
* @return a new validator instance
*/
def addUrlHandler(handler: URLStreamHandler, scheme: String): SchemaValidator = {
new SchemaValidator(
schemaVersion,
formats,
resolverFactory.addUrlHandler(scheme, handler),
cache
)
}
/**
* Add a custom format
*
* @param format the custom format
* @return a new validator instance containing the custom format
*/
def addFormat(format: SchemaFormat): SchemaValidator =
new SchemaValidator(schemaVersion, formats + (format.name -> format), resolverFactory, cache)
/**
* Add a schema.
*
* @param id the id of the schema
* @param schema the schema
*/
def addSchema(id: String, schema: SchemaType): SchemaValidator = {
new SchemaValidator(
schemaVersion,
formats,
resolverFactory,
cache.add(Ref(id))(schema)
.addAll(collectSchemas(schema, Some(Ref(id))))
)
}
private def buildContext(refResolver: SchemaRefResolver, schema: SchemaType, schemaUrl: Option[Ref]): SchemaResolutionContext =
SchemaResolutionContext(
refResolver,
SchemaResolutionScope(
schema,
schema.constraints.id.map(Ref(_)) orElse schemaUrl,
Some(JsPath \ "#")
),
formats = formats
)
private[schema] def readJson(json: JsValue)(implicit reads: Reads[SchemaType], lang: Lang): \/[JsonValidationError, SchemaType] = {
\/.fromEither(Json.fromJson[SchemaType](json).asEither)
.leftMap(errors =>
JsonValidationError(Messages("err.parse.json"), JsError.toJson(errors))
)
}
private def parseJson(source: Source): \/[JsonValidationError, JsValue] = \/.fromEither(Try {
Json.parse(source.getLines().mkString)
}.toJsonEither)
private def obtainVersion(json: JsValue): SchemaVersion = {
val version = schemaVersion orElse (json \ "$schema").toOption.map {
case JsString(Version4.SchemaUrl) => Version4
case _ => DefaultVersion
}
version.getOrElse(DefaultVersion)
}
private def obtainVersion(schema: SchemaType): SchemaVersion = {
val $schema = schema match {
case SchemaRoot(v, _) => v
case _ => None
}
val version = schemaVersion orElse $schema
version.getOrElse(DefaultVersion)
}
def validate(schemaUrl: URL): JsValue => JsResult[JsValue] = {
doValidate(Source.fromURL(schemaUrl), Some(Ref(schemaUrl.toString)))
}
def validate(schemaSource: Source): JsValue => JsResult[JsValue] = {
doValidate(schemaSource, None)
}
private def doValidate(schemaSource: Source, schemaUrl: Option[Ref] = None): JsValue => JsResult[JsValue] = {
val context: JsResult[SchemaResolutionContext] = parseJson(schemaSource).toJsResult.flatMap {
json =>
val version = obtainVersion(json)
import version._
val schema = readJson(json).toJsResult
val id = schema.asOpt.flatMap(s => s.constraints.id.map(Ref(_))) orElse schemaUrl
val refResolver = SchemaRefResolver(
version,
schema.fold(_ => cache, s => cache.addAll(collectSchemas(s, id))),
resolverFactory
)
schema.map(s => buildContext(refResolver, s, id))
}
input: JsValue =>
context
.flatMap(ctx => ctx.scope.documentRoot.validate(input, ctx).toJsResult)
}
/**
* Validate the given JsValue against the schema located at the given URL
* and convert the result via the specified Reads instance in case
* it has been successful.
*
* @param schemaSource source from where to read the schema from
* @param input the value to be validated
* @return a JsResult holding the validation result
*/
def validate[A](schemaSource: Source, input: => JsValue, reads: Reads[A]): JsResult[A] = {
validate(schemaSource)(input).fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
/**
* Validate the given JsValue against the schema located at the given URL
* and convert the result via the specified Reads instance in case
* it has been successful.
*
* @param schemaUrl the URL from where to read the schema from
* @param input the value to be validated
* @return a JsResult holding the validation result
*/
def validate[A](schemaUrl: URL, input: => JsValue, reads: Reads[A]): JsResult[A] = {
validate(schemaUrl)(input).fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
/**
* Convert the given value via the specified Writes instance to a JsValue
* and validate it against the schema located at the given URL.
*
* @param schemaSource source from where to read the scheam from
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A](schemaSource: Source, input: => A, writes: Writes[A]): JsResult[JsValue] = {
val inputJs = writes.writes(input)
validate(schemaSource)(inputJs)
}
/**
* Convert the given value via the specified Writes instance to a JsValue
* and validate it against the schema located at the given URL.
*
* @param schemaUrl the URL from where to read the schema from
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A](schemaUrl: URL, input: => A, writes: Writes[A]): JsResult[JsValue] = {
val inputJs = writes.writes(input)
validate(schemaUrl)(inputJs)
}
/**
* Convert the given value via the specified Format instance to a JsValue,
* validate it against the schema at the given URL, and convert it back.
*
* @param schemaSource source from where to read the scheam from
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A : Format](schemaSource: Source, input: A): JsResult[A] = {
val writes = implicitly[Writes[A]]
val reads = implicitly[Reads[A]]
validate(schemaSource, input, writes).fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
/**
* Convert the given value via the specified Format instance to a JsValue,
* validate it against the schema at the given URL, and convert it back.
*
* @param schemaUrl source from where to read the scheam from
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A : Format](schemaUrl: URL, input: A): JsResult[A] = {
val writes = implicitly[Writes[A]]
val reads = implicitly[Reads[A]]
validate(schemaUrl, input, writes).fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
//
// --
//
/**
* Validate the given JsValue against the given schema.
*
* @param schema the schema to validate against
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate(schema: SchemaType)(input: => JsValue): JsResult[JsValue] = {
val ref: Option[Ref] = schema.constraints.id.map(Ref(_))
val version: SchemaVersion = obtainVersion(schema)
val id = schema.constraints.id.map(Ref(_))
val refResolver = SchemaRefResolver(version, cache.addAll(collectSchemas(schema, id)), resolverFactory)
val context = SchemaResolutionContext(
refResolver,
SchemaResolutionScope(schema, ref, Some(JsPath \ "#")),
formats = formats
)
schema.validate(input, context).toJsResult
}
/**
* Validate the given JsValue against the schema and convert the result
* via the specified Reads instance in case it has been successful.
*
* @param schema the schema to validate against
* @param input the value to be validated
* @return a JsResult holding the validation result
*/
def validate[A](schema: SchemaType, input: => JsValue, reads: Reads[A]): JsResult[A] = {
val result = validate(schema)(input)
result.fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
/**
* Convert the given value via the specified Writes instance to a JsValue
* and validate it against the schema.
*
* @param schema the schema to validate against
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A](schema: SchemaType, input: A, writes: Writes[A]): JsResult[JsValue] = {
val inputJs = writes.writes(input)
validate(schema)(inputJs)
}
/**
* Convert the given value via the specified Format instance to a JsValue,
* validate it against the given schema and convert it back.
*
* @param schema the schema to validate against
* @param input the value to be validated
* @return a JsResult holding the valid result
*/
def validate[A: Format](schema: SchemaType, input: A): JsResult[A] = {
val writes = implicitly[Writes[A]]
val reads = implicitly[Reads[A]]
val inputJs = writes.writes(input)
val result = validate(schema)(inputJs)
result.fold(
valid = readWith(reads),
invalid = errors => JsError(errors)
)
}
private def readWith[A](reads: Reads[A]): JsValue => JsResult[A] = json =>
reads.reads(json) match {
case JsSuccess(success, _) => JsSuccess(success)
case JsError(errors) => JsError(errors)
}
}
| edgarmueller/play-json-schema-validator | src/main/scala/com/eclipsesource/schema/SchemaValidator.scala | Scala | apache-2.0 | 11,279 |
package glaux.linearalgebra
import glaux.linearalgebra.Tensor._
// Implement this trait to provide an implementation of the linarg interfaces.
trait Implementation {
implicit val rBuilder: RowBuilder
implicit val mBuilder: MatrixBuilder
implicit val vBuilder: VolBuilder
implicit val t4Builder: Tensor4Builder
implicit val genBuilder: GenTensorBuilder[Tensor]
}
| A-Noctua/glaux | linear-algebra/src/main/scala/glaux/linearalgebra/Implementation.scala | Scala | mit | 374 |
package org.raisercostin.jedi
import org.raisercostin.jedi.Locations._
import org.scalatest._
import org.junit.runner.RunWith
import org.junit.Assert._
import org.scalatest.junit.JUnitRunner
import scala.util.Try
import java.util.regex.Pattern.Loop
import Locations._
import scala.collection.mutable.ArrayBuffer
@RunWith(classOf[JUnitRunner])
class VfsLocationsTest extends FunSuite {
import org.scalatest.Matchers._
test("vfs from zip") {
val zip = Locations.classpath("location.zip")
println(zip+"->"+zip.absolute)
val vfs = Locations.vfs("zip:"+zip.absolute)
println(vfs)
vfs.name shouldBe ""
println("["+vfs.file.getParent+"]")
vfs.external.name shouldBe "location.zip"
vfs.list.size shouldBe 3
vfs.list.foreach(println)
vfs.list.map(_.name) shouldBe ArrayBuffer("c","a.txt","b.txt")
}
test("vfs from zip inside zip") {
//see http://stackoverflow.com/questions/9661214/uri-for-nested-zip-files-in-apaches-common-vfs
val zip = Locations.classpath("location.zip")
val subzip = Locations.vfs("zip:zip:"+zip.absolute+"!/c/subzip.zip!/")
println(subzip)
subzip.list.map(_.name) shouldBe ArrayBuffer("r","p.txt","inside.txt","q.txt")
}
test("vfs from zip inside zip with childs") {
val zip = Locations.classpath("location.zip")
val vfs = Locations.vfs("zip:"+zip.absolute)
val subzip = vfs.child("c").child("subzip.zip").withProtocol("zip")
println(subzip)
subzip.list.map(_.name) shouldBe ArrayBuffer("r","p.txt","inside.txt","q.txt")
}
test("vfs http") {
val vfs = Locations.vfs("http://google.com")
vfs.readContentAsText.get.take(10) shouldBe "<!doctype "
}
test("vfs https") {
val vfs = Locations.vfs("https://google.com")
vfs.readContentAsText.get.take(10) shouldBe "<!doctype "
}
ignore("vfs webdav") {
val vfs = Locations.vfs("webdav://demo:[email protected]/demo/")
vfs.list.map(_.name) shouldBe ArrayBuffer("r","p.txt","inside.txt","q.txt")
}
}
| raisercostin/jedi-io | src/test/scala/org/raisercostin/jedi/VfsLocationsTest.scala | Scala | apache-2.0 | 1,988 |
package org.skrushingiv.repository
/**
* The base trait for all entities.
*
* The idea behind this Entity trait is that implementations will most
* often be case classes, and that these classes will know the type of
* their primary identifier, even if they don't contain one yet.
*
* The entity itself may not include the actual ID, as it may not yet
* have been persisted or prepared for persisting.
*/
trait Entity {
/**
* The type that IDs must conform to in order to be paired with this Entity class.
*/
type Id
}
| srushingiv/org.skrushingiv | src/main/scala/org/skrushingiv/repository/entities.scala | Scala | mit | 542 |
/*******************************************************************************
* Copyright (c) 2013 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
******************************************************************************/
package list.P05
class sol03 extends P05 {
def reverse[T](l: List[T]): List[T] =
(l foldLeft List[T]())((newList, elt) => (elt +: newList))
}
| GuillaumeDD/scala99problems | src/main/scala/list/P05/sol03.scala | Scala | gpl-3.0 | 784 |
package com.wavesplatform.events
import com.google.common.primitives.Ints
import com.wavesplatform.api.common.CommonBlocksApi
import com.wavesplatform.api.grpc._
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.Base58
import com.wavesplatform.database.{DBExt, DBResource}
import com.wavesplatform.events.protobuf.BlockchainUpdated.Append.Body
import com.wavesplatform.events.protobuf.{BlockchainUpdated => PBBlockchainUpdated}
import com.wavesplatform.protobuf._
import com.wavesplatform.protobuf.block.PBBlock
import com.wavesplatform.utils.ScorexLogging
import monix.reactive.Observable
import org.iq80.leveldb.DB
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Success, Try}
class Loader(db: DB, blocksApi: CommonBlocksApi, target: Option[(Int, ByteStr)], streamId: String) extends ScorexLogging {
private def loadBatch(res: DBResource, fromHeight: Int): Try[Seq[PBBlockchainUpdated]] = Try {
res.iterator.seek(Ints.toByteArray(fromHeight))
val buffer = ArrayBuffer[PBBlockchainUpdated]()
while (res.iterator.hasNext && buffer.size < 100 && target.forall { case (h, _) => fromHeight + buffer.size <= h }) {
buffer.append(Loader.parseUpdate(res.iterator.next().getValue, blocksApi, fromHeight + buffer.size))
}
for ((h, id) <- target if h == fromHeight + buffer.size - 1; u <- buffer.lastOption) {
require(
u.id.toByteArray.sameElements(id.arr),
s"Stored update ${Base58.encode(u.id.toByteArray)} at ${u.height} does not match target $id at $h"
)
}
buffer.toSeq
}
private def streamFrom(fromHeight: Int): Observable[PBBlockchainUpdated] = db.resourceObservable.flatMap { res =>
loadBatch(res, fromHeight) match {
case Success(nextBatch) =>
if (nextBatch.isEmpty) Observable.empty[PBBlockchainUpdated]
else Observable.fromIterable(nextBatch) ++ streamFrom(fromHeight + nextBatch.size)
case Failure(exception) => Observable.raiseError(exception)
}
}
def loadUpdates(fromHeight: Int): Observable[PBBlockchainUpdated] = {
log.trace(s"[$streamId] Loading stored updates from $fromHeight up to ${target.fold("the most recent one") { case (h, id) => s"$id at $h" }}")
streamFrom(fromHeight)
}
}
object Loader {
def parseUpdate(bs: Array[Byte], blocksApi: CommonBlocksApi, height: Int): PBBlockchainUpdated =
PBBlockchainUpdated
.parseFrom(bs)
.update(
_.append.update(
_.body.modify {
case Body.Block(value) =>
Body.Block(value.copy(block = blocksApi.blockAtHeight(height).map {
case (meta, txs) => PBBlock(Some(meta.header.toPBHeader), meta.signature.toByteString, txs.map(_._2.toPB))
}))
case other => other
}
)
)
def loadUpdate(res: DBResource, blocksApi: CommonBlocksApi, height: Int): PBBlockchainUpdated =
parseUpdate(res.get(Repo.keyForHeight(height)), blocksApi, height)
}
| wavesplatform/Waves | grpc-server/src/main/scala/com/wavesplatform/events/Loader.scala | Scala | mit | 2,998 |
package org.mellowtech.gapi.drive
import java.io.{ByteArrayOutputStream, InputStream, OutputStream}
import java.nio.charset.Charset
import java.nio.file.Path
import com.google.api.client.auth.oauth2.Credential
import com.google.api.client.http.{ByteArrayContent, FileContent, InputStreamContent}
import com.google.api.services.drive.Drive
import com.google.api.services.drive.model.{About, File, FileList}
import org.mellowtech.gapi.config.GApiConfig
import org.mellowtech.gapi.service.GService
import scala.concurrent.{ExecutionContext, Future}
/** Scala Wrapper for the Google Drive API
*
* DriveService simplifies usage of the Google Drive API for scala developers. It contains methods
* for common Drive operations as well as giving the developer the option to work directly against
* the underlying Drive API.
*
* @see <a href="https://developers.google.com/apis-explorer/#p/drive/v3/">Drive API Explorer</a>
* @see <a href="https://developers.google.com/resources/api-libraries/documentation/drive/v3/java/latest/">Drive Javadoc</a>
* @param credential
* @param ec
*/
class DriveService(val credential: Credential)(implicit val ec: ExecutionContext, c: GApiConfig) extends GService[Drive] {
import AboutField._
import FileListField.FileListField
import Operators._
import org.mellowtech.gapi.GApiImplicits._
val drive: Drive = new Drive.Builder(httpTransport, jsonFactory,
credential).setApplicationName(c.applicationName).build()
val service = drive
val createFolder: (String, Seq[String]) => Future[File] = create(DriveService.GFOLDER)
val createDocument: (String, Seq[String]) => Future[File] = create(DriveService.GDOCUMENT)
val createSheet: (String, Seq[String]) => Future[File] = create(DriveService.GSHEET)
val createPresentation: (String, Seq[String]) => Future[File] = create(DriveService.GPRESENTATION)
def aboutAll: Future[About] = {
about(AboutField.values.toSeq: _*)
}
def about(fields: AboutField*): Future[About] = {
execA[About] {
val exec = drive.about.get.setFields(fields.mkString(","))
exec.execute()
}
}
def file(id: String, all: Boolean = true): Future[File] = execA[File] {
val fields: String = all match {
case true => {
FileField.allFields
}
case false => ""
}
drive.files().get(id).setFields(fields).execute()
}
def root: Future[File] = execA[File] {
val f = drive.files().get("root").execute()
f
}
def create(mimeType: String)(name: String, parentIds: Seq[String]): Future[File] = execA {
val f = DriveService.toGoogleFile(name, parentIds, mimeType)
val cf = drive.files.create(f).execute()
cf
}
def list(parent: File): Future[FileList] = list(parent.getId)
def list(parentId: String): Future[FileList] = list(Clause(parents in parentId))
def list(q: Clause): Future[FileList] = listOf(fl => fl.setQ(q.render))
/** list files
*
* Control your file listing using some common options
*
* @param q
* @param pageSize
* @param orderBy
* @param pageToken
* @param fileFields
* @return
*/
def list(q: Option[Clause], pageSize: Option[Int], orderBy: Option[Seq[String]], pageToken: Option[String],
fileFields: Option[Seq[FileListField]]) = listOf(l => {
var fields = ""
if (q.isDefined) l.setQ(q.get.render)
if (pageSize.isDefined) {
//println("nextPageToken = " + FileListField.nextPageToken.toString)
//fields = FileListField.nextPageToken.toString //"nextPageToken"
fields = "nextPageToken"
l.setPageSize(pageSize.get)
}
if (orderBy.isDefined) l.setOrderBy(orderBy.get.mkString(","))
if (pageToken.isDefined) l.setPageToken(pageToken.get)
if (fileFields.isDefined) {
if (pageSize.isDefined)
fields += ",files(" + fileFields.get.mkString(",") + ")"
else
fields += "files(" + fileFields.get.mkString(",") + ")"
}
})
/** list files
*
* This method offers the most flexibility and allows you to
* configure the Drive.Files.List object directly. For an in-depth description of how
* you search and list files see <a href="https://developers.google.com/drive/v3/web/search-parameters">search for files</a>
*
* @param f function to config the file listing
* @return A Future to a FileList
*/
def listOf(f: Drive#Files#List => Unit): Future[FileList] = {
val fl = drive.files().list()
f(fl)
execA(fl.execute())
}
def export(to: OutputStream, mimeType: String, id: String): Future[Unit] = execU {
drive.files().export(id, mimeType).executeMediaAndDownloadTo(to)
}
def download(id: String, codec: String = "UTF-8"): Future[String] = execA {
val barr = new ByteArrayOutputStream
val d = drive.files().get(id)
d.executeMediaAndDownloadTo(barr)
new String(barr.toByteArray, Charset.forName(codec))
}
def download(to: OutputStream, id: String, range: Some[(Int, Int)]): Future[Unit] = execU {
val d = drive.files().get(id)
if (range.isDefined)
d.getRequestHeaders.setRange("bytes=" + range.get._1 + "-" + range.get._2)
d.executeMediaAndDownloadTo(to)
}
def upload[T](content: T, name: String, mimeType: String, parentId: Option[String] = None,
convertTo: Option[String] = None): Future[File] = {
import com.google.api.services.drive.model.{File => GFile}
val mc = content match {
case x: Path => new FileContent(mimeType, x.toFile)
case x: java.io.File => new FileContent(mimeType, x)
case x: Array[Byte] => new ByteArrayContent(mimeType, x)
case x: InputStream => new InputStreamContent(mimeType, x)
case x => {
val b = x.toString.getBytes("UTF-8")
new ByteArrayContent(mimeType, b)
}
}
val file = new GFile
file.setName(name)
if (parentId.isDefined)
file.setParents(java.util.Collections.singletonList(parentId.get))
if (convertTo.isDefined)
file.setMimeType(convertTo.get)
val create = drive.files().create(file, mc).setFields("id")
execA(create.execute())
}
}
object DriveService {
val GFOLDER = "application/vnd.google-apps.folder"
val GSHEET = "application/vnd.google-apps.spreadsheet"
val GDOCUMENT = "application/vnd.google-apps.document"
val GPRESENTATION = "application/vnd.google-apps.presentation"
def isFolder(f: File): Boolean = Option(f.getMimeType) match {
case Some(m) => m.equals(GFOLDER)
case None => false
}
import scala.collection.JavaConverters._
def toGoogleFile(name: String, parentIds: Seq[String], mimeType: String): File = {
val f = new File()
f.setName(name)
f.setParents(parentIds.asJava)
f.setMimeType(mimeType)
f
}
def apply(credential: Credential)(implicit ec: ExecutionContext, c: GApiConfig): DriveService = new DriveService(credential)
}
| msvens/gapi | drive/src/main/scala/org/mellowtech/gapi/drive/DriveService.scala | Scala | apache-2.0 | 6,881 |
package com.sfxcode.sapphire.core.demo.issues.controller
import com.sfxcode.sapphire.core.controller.ViewController
import com.sfxcode.sapphire.core.demo.issues.EmptyName
import com.sfxcode.sapphire.core.demo.issues.model.{ Issue, IssueDataBase }
import com.sfxcode.sapphire.core.value._
import com.typesafe.scalalogging.LazyLogging
import javafx.event.ActionEvent
import javafx.fxml.FXML
import javafx.scene.control.{ Button, ListView, TableView }
import javafx.scene.layout.AnchorPane
import javax.inject.Inject
import scalafx.Includes._
import scalafx.collections.ObservableBuffer
import scalafx.scene.control.SelectionMode
class IssueTrackingLiteController extends ViewController with LazyLogging {
@Inject
var emptyName: EmptyName = _
@FXML
var table: TableView[FXBean[Issue]] = _
@FXML
var list: ListView[String] = _
@FXML
var saveButton: Button = _
@FXML
var deleteButton: Button = _
@FXML
var detailPane: AnchorPane = _
lazy val issueAdapter = FXBeanAdapter[Issue](this, detailPane)
val displayedProjectNames = new ObservableBuffer[String]()
val displayedIssues = new ObservableBuffer[String]()
override def didGainVisibilityFirstTime(): Unit = {
super.didGainVisibilityFirstTime()
detailPane.setVisible(false)
deleteButton.setVisible(false)
saveButton.setVisible(false)
list.getSelectionModel.setSelectionMode(SelectionMode.Single)
list.getSelectionModel.selectedItemProperty.onChange((_, oldValue, newValue) => updateProject(oldValue, newValue))
table.getSelectionModel.selectedItemProperty.onChange((_, _, newValue) => selectIssue(newValue))
}
override def didGainVisibility() {
super.didGainVisibility()
issueAdapter.addBindings(KeyBindings("synopsis", "description"))
issueAdapter.addBinding(saveButton.visibleProperty(), "_hasChanges")
// issueAdapter.parent = detailPane
displayedProjectNames.++=(IssueDataBase.projectNames.sortBy(name => name))
list.setItems(displayedProjectNames)
detailPane.visibleProperty.bind(issueAdapter.hasBeanProperty)
deleteButton.visibleProperty.bind(issueAdapter.hasBeanProperty)
}
def selectedProjectName: Option[String] = {
val selected = list.getSelectionModel.selectedItem
if (selected.value == null)
return None
Some(selected.value)
}
def actionCreateIssue(event: ActionEvent) {
selectedProjectName.foreach { projectName =>
val newIssue = IssueDataBase.createIssue(projectName, emptyName.name)
updateProject(projectName, projectName)
selectIssue(newIssue)
}
}
def actionDeleteIssue(event: ActionEvent) {
selectedProjectName.foreach { projectName =>
IssueDataBase.deleteIssue(issueAdapter.beanProperty.value.bean.id)
updateProject(projectName, projectName)
}
}
def actionSaveIssue(event: ActionEvent) {
issueAdapter.clearChanges()
}
def selectIssue(issue: FXBean[Issue]) {
issue match {
case issue: FXBean[Issue] =>
issueAdapter.revert()
issueAdapter.beanProperty.value = issue
case _ =>
issueAdapter.unset()
}
}
def updateProject(oldValue: String, newValue: String) {
projectUnselected(oldValue)
projectSelected(newValue)
}
def projectSelected(projectName: String) {
projectName match {
case name: String =>
val newItems = IssueDataBase.projectsMap(projectName)
newItems.foreach(item => table.getItems.add(item))
}
}
def projectUnselected(projectName: String) {
table.getItems.clear()
}
}
| sfxcode/sapphire-core | demos/issues/src/main/scala/com/sfxcode/sapphire/core/demo/issues/controller/IssueTrackingLiteController.scala | Scala | apache-2.0 | 3,547 |
package com.twitter.finagle.memcached.protocol.text.server
import com.twitter.finagle.memcached.protocol.ClientError
import com.twitter.finagle.memcached.protocol.text._
import com.twitter.finagle.memcached.util.ChannelBufferUtils._
import com.twitter.finagle.memcached.util.ParserUtils
import com.twitter.finagle.netty3.ChannelBufferBuf
import com.twitter.io.Buf
import com.twitter.util.StateMachine
import org.jboss.netty.buffer.ChannelBuffer
import org.jboss.netty.channel._
class Decoder(storageCommands: collection.Set[ChannelBuffer]) extends AbstractDecoder with StateMachine {
case class AwaitingCommand() extends State
case class AwaitingData(tokens: Seq[ChannelBuffer], bytesNeeded: Int) extends State
final protected[memcached] def start() {
state = AwaitingCommand()
}
override def exceptionCaught(ctx: ChannelHandlerContext, e: ExceptionEvent) {
super.exceptionCaught(ctx, e)
}
def decode(ctx: ChannelHandlerContext, channel: Channel, buffer: ChannelBuffer): Decoding = {
state match {
case AwaitingCommand() =>
decodeLine(buffer, needsData) { tokens =>
Tokens(tokens.map { ChannelBufferBuf.Owned(_) })
}
case AwaitingData(tokens, bytesNeeded) =>
decodeData(bytesNeeded, buffer) { data =>
val commandName = ChannelBufferBuf.Owned(tokens.head)
if (commandName.equals(Buf.Utf8("cas"))) {// cas command
TokensWithData(
tokens.slice(0, 5).map { ChannelBufferBuf.Owned(_) },
ChannelBufferBuf.Owned(data),
Some(ChannelBufferBuf.Owned(tokens(5)))
)
} else { // other commands
TokensWithData(
tokens.map { ChannelBufferBuf.Owned(_) },
ChannelBufferBuf.Owned(data)
)
}
}
}
}
final protected[memcached] def awaitData(tokens: Seq[ChannelBuffer], bytesNeeded: Int) {
state = AwaitingData(tokens, bytesNeeded)
}
private[this] def needsData(tokens: Seq[ChannelBuffer]): Int = {
val commandName = tokens.head
if (storageCommands.contains(commandName)) {
validateStorageCommand(tokens)
val bytesNeeded = tokens(4).toInt
bytesNeeded
} else -1
}
private[this] def validateStorageCommand(tokens: Seq[ChannelBuffer]) = {
if (tokens.size < 5) throw new ClientError("Too few arguments")
if (tokens.size > 6) throw new ClientError("Too many arguments")
if (!ParserUtils.isDigits(tokens(4))) throw new ClientError("Bad frame length")
}
}
| sveinnfannar/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/server/Decoder.scala | Scala | apache-2.0 | 2,537 |
package im.actor.server.api.rpc.service.auth
import java.time.{ ZoneOffset, LocalDateTime }
import scala.concurrent._, duration._
import scala.concurrent.forkjoin.ThreadLocalRandom
import scala.language.postfixOps
import scalaz._
import akka.actor.{ ActorRef, ActorSystem }
import akka.event.Logging
import akka.util.Timeout
import org.joda.time.DateTime
import shapeless._
import slick.dbio.DBIO
import slick.driver.PostgresDriver.api._
import im.actor.api.rpc.DBIOResult._
import im.actor.api.rpc._
import im.actor.api.rpc.auth.EmailActivationType._
import im.actor.api.rpc.auth._
import im.actor.api.rpc.misc._
import im.actor.api.rpc.users.Sex.Sex
import im.actor.server.activation.internal.CodeActivation
import im.actor.server.db.DbExtension
import im.actor.server.oauth.{ OAuth2ProvidersDomains, GoogleProvider }
import im.actor.server.persist.auth.AuthTransaction
import im.actor.server.push.SeqUpdatesExtension
import im.actor.server.session._
import im.actor.server.social.{ SocialExtension, SocialManagerRegion }
import im.actor.server.util.PhoneNumberUtils._
import im.actor.server.user.{ UserViewRegion, UserExtension, UserOffice, UserProcessorRegion }
import im.actor.server.util._
import im.actor.server.{ persist, models }
case class PubSubMediator(mediator: ActorRef)
class AuthServiceImpl(val activationContext: CodeActivation, mediator: ActorRef)(
implicit
val sessionRegion: SessionRegion,
val actorSystem: ActorSystem,
val oauth2Service: GoogleProvider
) extends AuthService with AuthHelpers with Helpers {
import AnyRefLogSource._
import IdUtils._
private trait SignType
private case class Up(name: String, isSilent: Boolean) extends SignType
private case object In extends SignType
override implicit val ec: ExecutionContext = actorSystem.dispatcher
protected implicit val db: Database = DbExtension(actorSystem).db
protected implicit val seqUpdExt: SeqUpdatesExtension = SeqUpdatesExtension(actorSystem)
protected implicit val userProcessorRegion: UserProcessorRegion = UserExtension(actorSystem).processorRegion
protected implicit val userViewRegion: UserViewRegion = UserExtension(actorSystem).viewRegion
protected implicit val socialRegion: SocialManagerRegion = SocialExtension(actorSystem).region
protected val log = Logging(actorSystem, this)
private val maxGroupSize: Int = 300
implicit val mediatorWrap = PubSubMediator(mediator)
implicit protected val timeout = Timeout(10 seconds)
override def jhandleGetAuthSessions(clientData: ClientData): Future[HandlerResult[ResponseGetAuthSessions]] = {
val authorizedAction = requireAuth(clientData).map { client β
for {
sessionModels β persist.AuthSession.findByUserId(client.userId)
} yield {
val sessionStructs = sessionModels map { sessionModel β
val authHolder =
if (client.authId == sessionModel.authId) {
AuthHolder.ThisDevice
} else {
AuthHolder.OtherDevice
}
AuthSession(
sessionModel.id,
authHolder,
sessionModel.appId,
sessionModel.appTitle,
sessionModel.deviceTitle,
(sessionModel.authTime.getMillis / 1000).toInt,
sessionModel.authLocation,
sessionModel.latitude,
sessionModel.longitude
)
}
Ok(ResponseGetAuthSessions(sessionStructs.toVector))
}
}
db.run(toDBIOAction(authorizedAction))
}
def jhandleCompleteOAuth2(transactionHash: String, code: String, clientData: ClientData): Future[HandlerResult[ResponseAuth]] = {
val action: Result[ResponseAuth] =
for {
transaction β fromDBIOOption(AuthErrors.InvalidAuthTransaction)(persist.auth.AuthEmailTransaction.find(transactionHash))
token β fromDBIOOption(AuthErrors.FailedToGetOAuth2Token)(oauth2Service.completeOAuth(code, transaction.email, transaction.redirectUri))
profile β fromFutureOption(AuthErrors.FailedToGetOAuth2Token)(oauth2Service.fetchProfile(token.accessToken))
_ β fromBoolean(AuthErrors.OAuthUserIdDoesNotMatch)(transaction.email == profile.email)
_ β fromDBIO(persist.OAuth2Token.createOrUpdate(token))
_ β fromDBIO(AuthTransaction.updateSetChecked(transactionHash))
email β fromDBIOOption(AuthErrors.EmailUnoccupied)(persist.UserEmail.find(transaction.email))
user β authorizeT(email.userId, profile.locale.getOrElse(""), clientData)
userStruct β fromDBIO(DBIO.from(UserOffice.getApiStruct(user.id, user.id, clientData.authId)))
//refresh session data
authSession = models.AuthSession(
userId = user.id,
id = nextIntId(ThreadLocalRandom.current()),
authId = clientData.authId,
appId = transaction.appId,
appTitle = models.AuthSession.appTitleOf(transaction.appId),
deviceHash = transaction.deviceHash,
deviceTitle = transaction.deviceTitle,
authTime = DateTime.now,
authLocation = "",
latitude = None,
longitude = None
)
_ β fromDBIO(refreshAuthSession(transaction.deviceHash, authSession))
_ β fromDBIO(persist.auth.AuthTransaction.delete(transactionHash))
ack β fromFuture(authorize(user.id, clientData))
} yield ResponseAuth(userStruct, misc.Config(maxGroupSize))
db.run(action.run)
}
def jhandleGetOAuth2Params(transactionHash: String, redirectUrl: String, clientData: ClientData): Future[HandlerResult[ResponseGetOAuth2Params]] = {
val action =
for {
transaction β fromDBIOOption(AuthErrors.InvalidAuthTransaction)(persist.auth.AuthEmailTransaction.find(transactionHash))
url β fromOption(AuthErrors.RedirectUrlInvalid)(oauth2Service.getAuthUrl(redirectUrl, transaction.email))
_ β fromDBIO(persist.auth.AuthEmailTransaction.updateRedirectUri(transaction.transactionHash, redirectUrl))
} yield ResponseGetOAuth2Params(url)
db.run(action.run)
}
def jhandleStartPhoneAuth(phoneNumber: Long, appId: Int, apiKey: String, deviceHash: Array[Byte], deviceTitle: String, clientData: ClientData): Future[HandlerResult[ResponseStartPhoneAuth]] = {
val action = for {
normalizedPhone β fromOption(AuthErrors.PhoneNumberInvalid)(normalizeLong(phoneNumber).headOption)
optAuthTransaction β fromDBIO(persist.auth.AuthPhoneTransaction.findByPhone(normalizedPhone))
transactionHash β optAuthTransaction match {
case Some(transaction) β point(transaction.transactionHash)
case None β
val accessSalt = ACLUtils.nextAccessSalt()
val transactionHash = ACLUtils.authTransactionHash(accessSalt)
val phoneAuthTransaction = models.AuthPhoneTransaction(normalizedPhone, transactionHash, appId, apiKey, deviceHash, deviceTitle, accessSalt)
for {
_ β fromDBIO(persist.auth.AuthPhoneTransaction.create(phoneAuthTransaction))
_ β fromDBIO(sendSmsCode(normalizedPhone, genSmsCode(normalizedPhone), Some(transactionHash)))
} yield transactionHash
}
isRegistered β fromDBIO(persist.UserPhone.exists(normalizedPhone))
} yield ResponseStartPhoneAuth(transactionHash, isRegistered)
db.run(action.run)
}
def jhandleSignUp(transactionHash: String, name: String, sex: Option[Sex], clientData: ClientData): Future[HandlerResult[ResponseAuth]] = {
val action: Result[ResponseAuth] =
for {
//retrieve `authTransaction`
transaction β fromDBIOOption(AuthErrors.InvalidAuthTransaction)(persist.auth.AuthTransaction.findChildren(transactionHash))
//ensure that `authTransaction` is checked
_ β fromBoolean(AuthErrors.NotValidated)(transaction.isChecked)
signInORsignUp β transaction match {
case p: models.AuthPhoneTransaction β newUserPhoneSignUp(p, name, sex)
case e: models.AuthEmailTransaction β newUserEmailSignUp(e, name, sex)
}
//fallback to sign up if user exists
user β signInORsignUp match {
case -\\/((userId, countryCode)) β authorizeT(userId, countryCode, clientData)
case \\/-(user) β handleUserCreate(user, transaction, clientData.authId)
}
userStruct β fromDBIO(DBIO.from(UserOffice.getApiStruct(user.id, user.id, clientData.authId)))
//refresh session data
authSession = models.AuthSession(
userId = user.id,
id = nextIntId(ThreadLocalRandom.current()),
authId = clientData.authId,
appId = transaction.appId,
appTitle = models.AuthSession.appTitleOf(transaction.appId),
deviceHash = transaction.deviceHash,
deviceTitle = transaction.deviceTitle,
authTime = DateTime.now,
authLocation = "",
latitude = None,
longitude = None
)
_ β fromDBIO(refreshAuthSession(transaction.deviceHash, authSession))
ack β fromFuture(authorize(user.id, clientData))
} yield ResponseAuth(userStruct, misc.Config(maxGroupSize))
db.run(action.run)
}
def jhandleStartEmailAuth(email: String, appId: Int, apiKey: String, deviceHash: Array[Byte], deviceTitle: String, clientData: ClientData): Future[HandlerResult[ResponseStartEmailAuth]] = {
val action = for {
validEmail β fromEither(validEmail(email).leftMap(validationFailed("EMAIL_INVALID", _))) //it actually does not change input email
activationType = if (OAuth2ProvidersDomains.supportsOAuth2(email)) OAUTH2 else CODE
isRegistered β fromDBIO(persist.UserEmail.exists(validEmail))
optTransaction β fromDBIO(persist.auth.AuthEmailTransaction.findByEmail(validEmail))
transactionHash β optTransaction match {
case Some(trans) β point(trans.transactionHash)
case None β
val accessSalt = ACLUtils.nextAccessSalt()
val transactionHash = ACLUtils.authTransactionHash(accessSalt)
val emailAuthTransaction = models.AuthEmailTransaction(validEmail, None, transactionHash, appId, apiKey, deviceHash, deviceTitle, accessSalt)
activationType match {
case CODE β
for {
_ β fromDBIO(persist.auth.AuthEmailTransaction.create(emailAuthTransaction))
_ β fromDBIO(sendEmailCode(email, genCode(), transactionHash))
} yield transactionHash
case OAUTH2 β
for {
_ β fromDBIO(persist.auth.AuthEmailTransaction.create(emailAuthTransaction))
} yield transactionHash
}
}
} yield ResponseStartEmailAuth(transactionHash, isRegistered, activationType)
db.run(action.run)
}
//TODO: add email code validation
def jhandleValidateCode(transactionHash: String, code: String, clientData: ClientData): Future[HandlerResult[ResponseAuth]] = {
val action: Result[ResponseAuth] =
for {
//retreive `authTransaction`
transaction β fromDBIOOption(AuthErrors.InvalidAuthTransaction)(persist.auth.AuthTransaction.findChildren(transactionHash))
//validate code
userAndCounty β validateCode(transaction, code)
(userId, countryCode) = userAndCounty
//sign in user and delete auth transaction
user β authorizeT(userId, countryCode, clientData)
userStruct β fromDBIO(DBIO.from(UserOffice.getApiStruct(user.id, user.id, clientData.authId)))
_ β fromDBIO(persist.auth.AuthTransaction.delete(transaction.transactionHash))
//refresh session data
authSession = models.AuthSession(
userId = user.id,
id = nextIntId(ThreadLocalRandom.current()),
authId = clientData.authId,
appId = transaction.appId,
appTitle = models.AuthSession.appTitleOf(transaction.appId),
deviceHash = transaction.deviceHash,
deviceTitle = transaction.deviceTitle,
authTime = DateTime.now,
authLocation = "",
latitude = None,
longitude = None
)
_ β fromDBIO(refreshAuthSession(transaction.deviceHash, authSession))
ack β fromFuture(authorize(user.id, clientData))
} yield ResponseAuth(userStruct, misc.Config(maxGroupSize))
db.run(action.run)
}
override def jhandleSignOut(clientData: ClientData): Future[HandlerResult[ResponseVoid]] = {
val action = requireAuth(clientData) map { implicit client β
persist.AuthSession.findByAuthId(client.authId) flatMap {
case Some(session) β
for (_ β DBIO.from(UserOffice.logout(session))) yield Ok(misc.ResponseVoid)
case None β throw new Exception(s"Cannot find AuthSession for authId: ${client.authId}")
}
}
db.run(toDBIOAction(action))
}
override def jhandleTerminateAllSessions(clientData: ClientData): Future[HandlerResult[ResponseVoid]] = {
val authorizedAction = requireAuth(clientData).map { client β
for {
sessions β persist.AuthSession.findByUserId(client.userId) map (_.filterNot(_.authId == client.authId))
_ β DBIO.from(Future.sequence(sessions map UserOffice.logout))
} yield {
Ok(ResponseVoid)
}
}
db.run(toDBIOAction(authorizedAction))
}
override def jhandleTerminateSession(id: Int, clientData: ClientData): Future[HandlerResult[ResponseVoid]] = {
val authorizedAction = requireAuth(clientData).map { client β
persist.AuthSession.find(client.userId, id).headOption flatMap {
case Some(session) β
for (_ β DBIO.from(UserOffice.logout(session))) yield Ok(ResponseVoid)
case None β
DBIO.successful(Error(AuthErrors.AuthSessionNotFound))
}
}
db.run(toDBIOAction(authorizedAction))
}
//TODO: move deprecated methods to separate trait
@deprecated("schema api changes", "2015-06-09")
override def jhandleSendAuthCallObsolete(
phoneNumber: Long,
smsHash: String,
appId: Int,
apiKey: String,
clientData: ClientData
): Future[HandlerResult[ResponseVoid]] =
Future {
throw new Exception("Not implemented")
}
@deprecated("schema api changes", "2015-06-09")
override def jhandleSendAuthCodeObsolete(
rawPhoneNumber: Long,
appId: Int,
apiKey: String,
clientData: ClientData
): Future[HandlerResult[ResponseSendAuthCodeObsolete]] = {
PhoneNumberUtils.normalizeLong(rawPhoneNumber).headOption match {
case None β
Future.successful(Error(AuthErrors.PhoneNumberInvalid))
case Some(normPhoneNumber) β
val action = persist.AuthSmsCodeObsolete.findByPhoneNumber(normPhoneNumber).headOption.flatMap {
case Some(models.AuthSmsCodeObsolete(_, _, smsHash, smsCode, _)) β
DBIO.successful(normPhoneNumber :: smsHash :: smsCode :: HNil)
case None β
val smsHash = genSmsHash()
val smsCode = genSmsCode(normPhoneNumber)
for (
_ β persist.AuthSmsCodeObsolete.create(
id = ThreadLocalRandom.current().nextLong(),
phoneNumber = normPhoneNumber,
smsHash = smsHash,
smsCode = smsCode
)
) yield (normPhoneNumber :: smsHash :: smsCode :: HNil)
}.flatMap { res β
persist.UserPhone.exists(normPhoneNumber) map (res :+ _)
}.map {
case number :: smsHash :: smsCode :: isRegistered :: HNil β
sendSmsCode(number, smsCode, None)
Ok(ResponseSendAuthCodeObsolete(smsHash, isRegistered))
}
db.run(action)
}
}
@deprecated("schema api changes", "2015-06-09")
override def jhandleSignInObsolete(
rawPhoneNumber: Long,
smsHash: String,
smsCode: String,
deviceHash: Array[Byte],
deviceTitle: String,
appId: Int,
appKey: String,
clientData: ClientData
): Future[HandlerResult[ResponseAuth]] =
handleSign(
In,
rawPhoneNumber, smsHash, smsCode,
deviceHash, deviceTitle, appId, appKey,
clientData
)
@deprecated("schema api changes", "2015-06-09")
override def jhandleSignUpObsolete(
rawPhoneNumber: Long,
smsHash: String,
smsCode: String,
name: String,
deviceHash: Array[Byte],
deviceTitle: String,
appId: Int,
appKey: String,
isSilent: Boolean,
clientData: ClientData
): Future[HandlerResult[ResponseAuth]] =
handleSign(
Up(name, isSilent),
rawPhoneNumber, smsHash, smsCode,
deviceHash, deviceTitle, appId, appKey,
clientData
)
private def handleSign(
signType: SignType,
rawPhoneNumber: Long,
smsHash: String,
smsCode: String,
deviceHash: Array[Byte],
deviceTitle: String,
appId: Int,
appKey: String,
clientData: ClientData
): Future[HandlerResult[ResponseAuth]] = {
normalizeWithCountry(rawPhoneNumber).headOption match {
case None β Future.successful(Error(AuthErrors.PhoneNumberInvalid))
case Some((normPhoneNumber, countryCode)) β
if (smsCode.isEmpty) Future.successful(Error(AuthErrors.PhoneCodeEmpty))
else {
val action = (for {
optCode β persist.AuthSmsCodeObsolete.findByPhoneNumber(normPhoneNumber).headOption
optPhone β persist.UserPhone.findByPhoneNumber(normPhoneNumber).headOption
} yield (optCode :: optPhone :: HNil)).flatMap {
case None :: _ :: HNil β DBIO.successful(Error(AuthErrors.PhoneCodeExpired))
case Some(smsCodeModel) :: _ :: HNil if smsCodeModel.smsHash != smsHash β
DBIO.successful(Error(AuthErrors.PhoneCodeExpired))
case Some(smsCodeModel) :: _ :: HNil if smsCodeModel.smsCode != smsCode β
DBIO.successful(Error(AuthErrors.PhoneCodeInvalid))
case Some(_) :: optPhone :: HNil β
signType match {
case Up(rawName, isSilent) β
persist.AuthSmsCodeObsolete.deleteByPhoneNumber(normPhoneNumber).andThen(
optPhone match {
// Phone does not exist, register the user
case None β withValidName(rawName) { name β
val rnd = ThreadLocalRandom.current()
val userId = nextIntId(rnd)
//todo: move this to UserOffice
val user = models.User(userId, ACLUtils.nextAccessSalt(rnd), name, countryCode, models.NoSex, models.UserState.Registered, LocalDateTime.now(ZoneOffset.UTC))
for {
_ β DBIO.from(UserOffice.create(user.id, user.accessSalt, user.name, user.countryCode, im.actor.api.rpc.users.Sex(user.sex.toInt), isBot = false))
_ β DBIO.from(UserOffice.auth(userId, clientData.authId))
_ β DBIO.from(UserOffice.addPhone(user.id, normPhoneNumber))
_ β persist.AvatarData.create(models.AvatarData.empty(models.AvatarData.OfUser, user.id.toLong))
} yield {
\\/-(user :: HNil)
}
}
// Phone already exists, fall back to SignIn
case Some(phone) β
signIn(clientData.authId, phone.userId, countryCode, clientData)
}
)
case In β
optPhone match {
case None β DBIO.successful(Error(AuthErrors.PhoneNumberUnoccupied))
case Some(phone) β
persist.AuthSmsCodeObsolete.deleteByPhoneNumber(normPhoneNumber).andThen(
signIn(clientData.authId, phone.userId, countryCode, clientData)
)
}
}
}.flatMap {
case \\/-(user :: HNil) β
val rnd = ThreadLocalRandom.current()
val authSession = models.AuthSession(
userId = user.id,
id = nextIntId(rnd),
authId = clientData.authId,
appId = appId,
appTitle = models.AuthSession.appTitleOf(appId),
deviceHash = deviceHash,
deviceTitle = deviceTitle,
authTime = DateTime.now,
authLocation = "",
latitude = None,
longitude = None
)
for {
prevSessions β persist.AuthSession.findByDeviceHash(deviceHash)
_ β DBIO.from(Future.sequence(prevSessions map UserOffice.logout))
_ β persist.AuthSession.create(authSession)
userStruct β DBIO.from(UserOffice.getApiStruct(user.id, user.id, clientData.authId))
} yield {
Ok(
ResponseAuth(
userStruct,
misc.Config(maxGroupSize)
)
)
}
case error @ -\\/(_) β DBIO.successful(error)
}
for {
result β db.run(action)
} yield {
result match {
case Ok(r: ResponseAuth) β
sessionRegion.ref ! SessionEnvelope(clientData.authId, clientData.sessionId).withAuthorizeUser(AuthorizeUser(r.user.id))
case _ β
}
result
}
}
}
}
private def signIn(authId: Long, userId: Int, countryCode: String, clientData: ClientData) = {
persist.User.find(userId).headOption.flatMap {
case None β throw new Exception("Failed to retrieve user")
case Some(user) β
for {
_ β DBIO.from(UserOffice.changeCountryCode(userId, countryCode))
_ β DBIO.from(UserOffice.auth(userId, clientData.authId))
} yield \\/-(user :: HNil)
}
}
}
| lstNull/actor-platform | actor-server/actor-rpc-api/src/main/scala/im/actor/server/api/rpc/service/auth/AuthServiceImpl.scala | Scala | mit | 22,275 |
package org.ai4fm.proofprocess.cdo
import java.net.URI
import org.ai4fm.proofprocess.cdo.internal.PProcessCDOPlugin
import org.eclipse.core.runtime.{IProgressMonitor, NullProgressMonitor}
import org.eclipse.emf.cdo.session.CDOSession
/**
* A facade to access ProofProcess CDO link.
*
* @author Andrius Velykis
*/
object PProcessCDO {
/**
* Retrieves an open session for the given repository.
*
* Initialises a new repository if one does not exist, migrates existing file-based data or
* upgrades the repository if it uses old EMF packages.
*/
def session(databaseLoc: URI,
repositoryName: String,
monitor: IProgressMonitor = new NullProgressMonitor): CDOSession =
PProcessCDOPlugin.plugin.session(databaseLoc, repositoryName)
/**
* Forces upgrade of the repository.
*
* Note that this action may also help compact the repository in the database.
*
* The existing CDO sessions on the repository will not work after upgrade:
* need to reinitialise new CDO sessions.
*/
def upgradeRepository(databaseLoc: URI,
repositoryName: String,
monitor: IProgressMonitor = new NullProgressMonitor) =
PProcessCDOPlugin.plugin.upgradeRepository(databaseLoc, repositoryName)
}
| andriusvelykis/proofprocess | org.ai4fm.proofprocess.cdo/src/org/ai4fm/proofprocess/cdo/PProcessCDO.scala | Scala | epl-1.0 | 1,304 |
package chandu0101.scalajs.react.components.util
import japgolly.scalajs.react.vdom.prefix_<^._
/**
* Created by chandrasekharkode .
*/
trait CommonStyles extends RCustomTags{
val cursorPointer = ^.cursor := "pointer"
val cursorDefault = ^.cursor := "default"
val displayInlineBlock = ^.display := "inline-block"
val displayBlock = ^.display := "block"
val textAlignCenter = ^.textAlign := "center"
val textAlignLeft = ^.textAlign := "left"
val textAlignRight = ^.textAlign := "right"
val positionAbsolute = ^.position := "absolute"
val positionRelative = ^.position := "relative"
val overFlowHidden = ^.overflow := "hidden"
val outlineNone= ^.outline := "none"
def backgroundClipPreFixer(value : String) = Seq(^.backgroundClip := value, webkitBackgroundClip := value)
def boxShadowPreFixer(value : String) = Seq(^.boxShadow := value,WebkitBoxShadow := value)
def styleSet1(st1 : TagMod , more : (TagMod,Boolean) * ) : TagMod = {
st1.+(more.filter(_._2).map(_._1))
}
def styleSet(styles : (TagMod,Boolean) * ) : TagMod = {
styles.filter(_._2).map(_._1)
}
}
| coreyauger/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/util/CommonStyles.scala | Scala | apache-2.0 | 1,123 |
package advancedgol
trait Cell {
def alive: Boolean
def tick(neighbours: Int): Unit
}
class GoLCell(var alive: Boolean) extends Cell {
def this() = this(false)
def tick(neighbours: Int) =
if (neighbours != 2) {
if (neighbours == 3) alive = true
else alive = false
}
}
class Wall(val alive: Boolean) extends Cell {
def tick(neighbours: Int) = {}
}
class Portal(val cell: Cell) extends Cell {
def alive = cell.alive
def tick(neighbours: Int) = {}
} | tailcalled/Advanced-Game-of-Life | src/advancedgol/Cell.scala | Scala | agpl-3.0 | 478 |
package monocle.function
import monocle.MonocleSuite
import scala.collection.immutable.SortedMap
class FilterIndexExample extends MonocleSuite {
test("filterIndexes creates Traversal from a SortedMap, IMap to all values where the index matches the predicate") {
(SortedMap("One" -> 1, "Two" -> 2) applyTraversal filterIndex { k: String =>
k.toLowerCase.contains("o")
} getAll) shouldEqual List(
1,
2
)
(SortedMap("One" -> 1, "Two" -> 2) applyTraversal filterIndex { k: String =>
k.startsWith("T")
} set 3) shouldEqual SortedMap(
"One" -> 1,
"Two" -> 3
)
}
test(
"filterIndexes creates Traversal from a List, IList, Vector or Stream to all values where the index matches the predicate"
) {
(List(1, 3, 5, 7) applyTraversal filterIndex { i: Int => i % 2 == 0 } getAll) shouldEqual List(1, 5)
(List(1, 3, 5, 7) applyTraversal filterIndex { i: Int => i >= 2 } modify (_ + 2)) shouldEqual List(1, 3, 7, 9)
(Vector(1, 3, 5, 7) applyTraversal filterIndex { i: Int => i >= 2 } modify (_ + 2)) shouldEqual Vector(1, 3, 7, 9)
}
}
| aoiroaoino/Monocle | example/src/test/scala/monocle/function/FilterIndexExample.scala | Scala | mit | 1,111 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.archivebag
import java.io.{ File, IOException }
import java.net.URI
import java.nio.file.Path
import net.lingala.zip4j.ZipFile
import net.lingala.zip4j.model.ZipParameters
import nl.knaw.dans.easy.validate.DansBagValidationResult
import nl.knaw.dans.lib.error._
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
import nl.knaw.dans.lib.string._
import nl.knaw.dans.lib.encode.StringEncoding
import org.apache.commons.codec.digest.DigestUtils
import org.apache.commons.io.FileUtils
import resource.Using
import scalaj.http.HttpResponse
import scala.util.control.NonFatal
import scala.util.{ Failure, Success, Try }
object EasyArchiveBag extends Bagit5FacadeComponent with DebugEnhancedLogging {
override val bagFacade = new Bagit5Facade()
def run(implicit ps: Parameters): Try[URI] = {
logger.info(s"[${ ps.bagId }] Archiving bag")
for {
maybeVersionOfId <- bagFacade.getIsVersionOf(ps.bagDir.toPath)
_ <- handleIsVersionOf(maybeVersionOfId)
zippedBag <- generateUncreatedTempFile()
_ <- zipDir(ps.bagDir, zippedBag)
response <- putFile(zippedBag)
location <- handleBagStoreResponse(response)
.doIfSuccess(_ => zippedBag.delete())
.doIfFailure { case _ => zippedBag.delete() }
} yield location
}
private def handleIsVersionOf(maybeVersionOfId: Option[BagId])(implicit ps: Parameters): Try[Unit] = {
if (maybeVersionOfId.isDefined) {
for {
_ <- validateBag()
_ <- createRefBagsTxt(maybeVersionOfId.get)
} yield ()
}
else
Success(())
}
private def handleBagStoreResponse(response: HttpResponse[String])(implicit ps: Parameters): Try[URI] = {
response.code match {
case 201 =>
response.location
.map(loc => Success(new URI(loc)))
.getOrElse(Failure(new RuntimeException("No bag archival location found in response after successful upload")))
.doIfSuccess(uri => logger.info(s"Bag archival location created at: $uri"))
.flatMap(location => addBagToIndex(ps.bagId)
.map(_ => location)
.recover {
case t =>
logger.warn(s"BAG ${ ps.bagId } NOT ADDED TO BAG-INDEX. SUBSEQUENT REVISIONS WILL NOT BE PRUNED", t)
location
})
case 401 =>
Failure(UnauthorizedException(ps.bagId))
case _ =>
logger.error(s"${ ps.storageDepositService } returned:[ ${ response.statusLine } ]. Body = ${ response.body }")
Failure(new RuntimeException(s"Bag archiving failed: ${ response.statusLine }"))
}
}
private def addBagToIndex(bagId: BagId)(implicit ps: Parameters): Try[Unit] = Try {
val response = ps.http(ps.bagIndexService.resolve(s"bags/$bagId").toASCIIString)
.method("PUT")
.asString
response.code match {
case 201 => ()
case _ =>
logger.error(s"${ ps.bagIndexService } returned:[ ${ response.statusLine } ] while adding new bag to bag index. Body = ${ response.body }")
throw new IllegalStateException("Error trying to add bag to index")
}
}
def validateBag()(implicit ps: Parameters): Try[Unit] = {
Try {
val bagDirUri = ps.bagDir.toPath.toUri.toString.escapeString
val bagStoreUri = ps.storageDepositService.toString.escapeString
val validationUrlString = s"${ ps.validateDansBagService }validate?infoPackageType=AIP&bag-store=$bagStoreUri&uri=$bagDirUri"
logger.info(s"Calling Dans Bag Validation Service with ${ validationUrlString }")
ps.http(s"${ validationUrlString }")
.timeout(connTimeoutMs = 10000, readTimeoutMs = ps.readTimeOut)
.method("POST")
.header("Accept", "application/json")
.asString
} flatMap {
case r if r.code == 200 => for {
result <- DansBagValidationResult.fromJson(r.body)
validResult <- checkDansBagValidationResult(result)
} yield validResult
case r => Failure(new RuntimeException(s"Dans Bag Validation failed (${ r.code }): ${ r.body }"))
}
}
private def checkDansBagValidationResult(result: DansBagValidationResult)(implicit ps: Parameters): Try[Unit] = {
if (result.isCompliant)
Success(())
else
Failure(BagValidationException(ps.bagId, result.ruleViolations.get.mkString))
}
private def createRefBagsTxt(versionOfId: BagId)(implicit ps: Parameters): Try[Unit] = {
for {
refBagsTxt <- getBagSequence(versionOfId)
_ <- writeRefBagsTxt(ps.bagDir.toPath)(refBagsTxt)
} yield ()
}
private def getBagSequence(bagId: BagId)(implicit ps: Parameters): Try[String] = Try {
val uri = ps.bagIndexService.resolve(s"bag-sequence?contains=$bagId")
val response = ps.http(uri.toASCIIString)
.header("Accept", "text/plain;charset=utf-8")
.asString
response.code match {
case 200 =>
val body = response.body
if (body.isBlank) {
logger.error(s"Empty response body from [$uri]")
throw InvalidIsVersionOfException(s"Bag with bag-id $bagId, pointed to by Is-Version-Of field in bag-info.txt is not found in bag index.")
}
else body
case _ =>
logger.error(s"$uri returned: [ ${ response.statusLine } ] while getting bag Sequence for bag $bagId. Body = ${ response.body }")
throw new IllegalStateException(s"Error retrieving bag-sequence for bag: $bagId. [$uri] returned ${ response.statusLine }")
}
}
private def writeRefBagsTxt(bagDir: Path)(refBagsTxt: String): Try[Unit] = Try {
FileUtils.write(bagDir.resolve("refbags.txt").toFile, refBagsTxt, "UTF-8")
}
private def generateUncreatedTempFile()(implicit tempDir: File): Try[File] = Try {
val tempFile = File.createTempFile("easy-archive-bag-", ".zip", tempDir)
tempFile.delete()
debug(s"Generated unique temporary file name: $tempFile")
tempFile
} recoverWith {
case NonFatal(e) => Failure(new IOException(s"Could not create temp file in $tempDir: ${ e.getMessage }", e))
}
private def zipDir(dir: File, zip: File)(implicit bagId: BagId): Try[Unit] = Try {
logger.info(s"[$bagId] Zipping directory $dir to file $zip")
if (zip.exists) zip.delete
val zf = new ZipFile(zip)
val parameters = new ZipParameters
zf.addFolder(dir, parameters)
}
private def putFile(file: File)(implicit s: Parameters): Try[HttpResponse[String]] = {
for {
md5Hex <- computeMd5(file)
_ = debug(s"Content-MD5 = $md5Hex")
_ = logger.info(s"Sending bag to ${ s.storageDepositService }, id = ${ s.bagId }, with user = ${ s.username }, password = ****")
response <- Using.fileInputStream(file)
.map(fileStream => {
s.http(s.storageDepositService.toURI.resolve("bags/").resolve(s.bagId.toString).toASCIIString)
.copy(connectFunc = InputStreamBodyConnectFunc(fileStream, Option(file.length)))
.header("Content-Disposition", "attachment; filename=bag.zip")
.header("Content-MD5", md5Hex)
.header("Content-Type", "application/zip")
.method("PUT")
.auth(s.username, s.password)
.asString
})
.tried
} yield response
}
private def computeMd5(file: File): Try[String] = {
Using.fileInputStream(file).map(DigestUtils.md5Hex).tried
}
}
| DANS-KNAW/easy-archive-bag | lib/src/main/scala/nl.knaw.dans.easy.archivebag/EasyArchiveBag.scala | Scala | apache-2.0 | 7,991 |
package bootstrap.liftweb
import net.liftweb.util._
import net.liftweb.http._
import net.liftweb.sitemap._
import net.liftweb.sitemap.Loc._
import Helpers._
import _root_.net.liftweb._
import util.{Helpers, Box, Full, Empty, Failure, Log}
import http._
import sitemap._
import Helpers._
/**
*/
class Boot {
def boot {
LiftRules.addToPackages("net.liftweb.flot_demo.web")
// Build SiteMap
val entries = Menu(Loc("Home", List ("index"), "Home")) ::
Menu(Loc("Flot: Basic", List ("basic"), "Flot: Basic example")) ::
Menu(Loc("Flot: Graph-Types", List ("graph-types"), "Flot: Different graph types")) ::
Menu(Loc("Flot: Setting-Option", List ("setting-option"), "Flot: Setting various options")) ::
Menu(Loc("Flot: Selection", List ("selection"), "Flot: Selection and zooming")) ::
Menu(Loc("Flot: Zooming", List ("zooming"), "Flot: Zooming with overview")) ::
Menu(Loc("Flot: Time", List ("time"), "Flot: Plotting times series")) ::
Menu(Loc("Flot: Visitors", List ("visitors"), "Flot: Visitors per day")) ::
Menu(Loc("Flot: Interacting", List ("interacting"), "Flot: Interacting with the data")) ::
Menu(Loc("flot+comet", List ("flot-comet"), "Flot+Comet")) ::
Nil
LiftRules.setSiteMap(SiteMap(entries:_*))
// register treetable resources (javascript and gifs)
net.liftweb.widgets.flot.Flot.init ()
// used to test the comet actor
net.liftweb.flot_demo.web.model.Sensor.start
}
}
| beni55/liftweb | sites/flotDemo/src/main/scala/bootstrap/liftweb/Boot.scala | Scala | apache-2.0 | 1,604 |
package org.jetbrains.plugins.scala
package codeInspection
package monads
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScMethodCall
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api.ParameterizedType
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* @author Sergey Tolmachev ([email protected])
* @since 29.09.15
*/
final class NestedStatefulMonadsInspection extends AbstractInspection(NestedStatefulMonadsInspection.Description) {
import NestedStatefulMonadsInspection._
override def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Unit] = {
case call: ScMethodCall =>
import call.projectContext
for {
Typeable(genericType@ParameterizedType(_, arguments)) <- Some(call)
if isStatefulMonadType(genericType) && arguments.exists(isStatefulMonadType)
} holder.registerProblem(call, Description)
}
}
object NestedStatefulMonadsInspection {
private[monads] final val Description = "Nested stateful monads"
private final val StatefulMonadsTypesNames = Set("scala.concurrent.Future", "scala.util.Try")
private def isStatefulMonadType(scType: ScType)
(implicit context: ProjectContext): Boolean =
StatefulMonadsTypesNames.exists(conformsToTypeFromClass(scType, _))
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/monads/NestedStatefulMonadsInspection.scala | Scala | apache-2.0 | 1,512 |
package org.oxbow.codebridge.ui.repo
import scala.collection.JavaConversions.asJavaCollection
import org.oxbow.codebridge.scm.Ref
import org.oxbow.codebridge.scm.Repository
import org.oxbow.codebridge.scm.Topic
import org.oxbow.codebridge.scm.Workspace
import org.oxbow.codebridge.ui.RepositoryAware
import org.oxbow.codebridge.ui.RepositoryElementTreeCell
import org.oxbow.codebridge.util.JFXImplicits._
import javafx.fxml.FXML
import javafx.scene.control.TreeItem
import javafx.scene.control.TreeView
import javafx.scene.control.TableColumn
import org.oxbow.codebridge.scm.Commit
import java.util.Date
import javafx.scene.control.cell.PropertyValueFactory
import org.oxbow.codebridge.ui.TableCommitDescriptionCell
import javafx.scene.control.TableView
import javafx.fxml.Initializable
import collection.JavaConversions._
import javafx.scene.control.SplitPane
import org.oxbow.codebridge.RepoViewState
import java.io.File
class RepositoryBrowserController extends RepositoryAware {
@FXML var splitter: SplitPane = _
@FXML var repoTree: TreeView[Ref] = _
@FXML var repoTable: TableView[Commit] = _
@FXML var tcCommit: TableColumn[Commit,String] = _
@FXML var tcDescription: TableColumn[Commit,List[Ref]] = _
@FXML var tcAuthor: TableColumn[Commit,String] = _
@FXML var tcDate: TableColumn[Commit,Date] = _
repositoryProperty.addListener { repo: Repository =>
repoTree.setCellFactory { new RepositoryElementTreeCell(repo) }
val (model, currentBranch) = createRepoTreeModel(repo)
repoTree.setRoot(model)
currentBranch.foreach(repoTree.getSelectionModel.select)
}
@FXML def initialize: Unit = {
SplitPane.setResizableWithParent(repoTree, false);
// setup table columns
tcCommit.setCellValueFactory( new PropertyValueFactory("hash"))
tcDescription.setCellValueFactory( new PropertyValueFactory("parts"))
tcDescription.setCellFactory{ c: TableColumn[Commit,List[Ref]] => new TableCommitDescriptionCell }
tcDate.setCellValueFactory( new PropertyValueFactory("time"))
tcAuthor.setCellValueFactory( new PropertyValueFactory("author"))
tcCommit.setCellValueFactory( new PropertyValueFactory("hash"))
// setup table refresh on tree selection
repoTree.getSelectionModel.selectedItemProperty.addListener{ item: TreeItem[Ref] =>
item.getValue match {
case ref: Ref => {
repoTable.getItems.setAll(repository.log(ref.id))
repoTable.getItems.find(_.id == ref.id).foreach { commit =>
repoTable.getSelectionModel.select(commit)
repoTable.scrollTo(commit)
}
}
case _ => repoTable.getItems.clear
}
}
}
def state = RepoViewState( repository.location.toString, false, splitter.getDividerPositions()(0))
def state_=( state: RepoViewState ): Unit = {
repository = Repository.open(state.locationFile)
splitter.setDividerPositions(state.dividerPos)
}
// returns tree model and optional tree item related to current branch
private def createRepoTreeModel(repo: Repository): (TreeItem[Ref], Option[TreeItem[Ref]]) = {
def createTopicFromElements(name: String, children: Iterable[Ref], expand: Boolean): TreeItem[Ref] = {
return createTopicFromTreeItems(name, children.map(new TreeItem(_)), expand);
}
def createTopicFromTreeItems(name: String, children: Iterable[TreeItem[Ref]], expand: Boolean): TreeItem[Ref] = {
val result = new TreeItem[Ref](new Topic(name))
result.setExpanded(expand)
result.getChildren.addAll(children)
result
}
val branches = createTopicFromElements("Branches", repo.getBranches, true)
val selection = repo.getCurrentBranch.flatMap{ currentRef =>
branches.getChildren.find{ currentRef == _.getValue }
}
val topics = List(
createTopicFromElements("File Status", List(new Workspace()), true),
branches,
createTopicFromElements("Tags", repo.getTags, false),
createTopicFromElements("Remotes", repo.getRemotes, true))
return (createTopicFromTreeItems(repo.name, topics, true), selection)
}
} | eugener/codebridge | codebridge/src/main/scala/org/oxbow/codebridge/ui/repo/RepositoryBrowserController.scala | Scala | gpl-3.0 | 4,548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.