code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package es.weso.shex
import scala.util.parsing.input.Positional
import es.weso.rdf.nodes._
import es.weso.rdf._
import es.weso.utils.PrefixMapUtils._
/**
* Labels
*/
sealed trait Label {
def getNode(): RDFNode
def show(implicit pm: PrefixMap): String
}
object Label {
def labelStr(str: String): IRILabel = {
IRILabel(IRI(str))
}
def mkLabel(node: RDFNode): Option[Label] = {
node match {
case b: BNodeId => Some(BNodeLabel(b))
case i: IRI => Some(IRILabel(i))
case _ => None
}
}
}
case class IRILabel(iri: IRI) extends Label {
override def getNode = iri
override def show(implicit pm: PrefixMap): String = {
qualify(iri)
}
override def toString: String = {
iri.toString
}
}
case class BNodeLabel(bnode: BNodeId) extends Label {
override def getNode = bnode
override def show(implicit pm: PrefixMap): String =
"_:" + bnode.id
override def toString: String =
"_:" + bnode.id
}
| labra/ShExcala | src/main/scala/es/weso/shex/Label.scala | Scala | mit | 980 |
package com.ubeeko.htalk.criteria
import com.ubeeko.htalk.bytesconv._
import org.specs2.mutable.Specification
class QualifierSpec extends Specification {
"A qualifier" can {
"be created from Array[Byte]" in {
val b = Array[Byte](1, 2, 55, 3, 4)
val q = Qualifier(b)
q.value must beEqualTo(b)
}
"be created from String" in {
val s = "hello"
val q = Qualifier(s)
q.value must beEqualTo(bytesFrom[String](s))
}
}
}
| eric-leblouch/htalk | src/test/scala/com/ubeeko/htalk/criteria/QualifierSpec.scala | Scala | apache-2.0 | 471 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.layers
import com.intel.analytics.bigdl.dllib.nn.{Threshold => BThreshold}
import com.intel.analytics.bigdl.dllib.keras.layers.{Threshold => ZThreshold}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.Shape
import com.intel.analytics.bigdl.dllib.keras.ZooSpecHelper
import com.intel.analytics.bigdl.dllib.keras.serializer.ModuleSerializationTest
class ThresholdSpec extends ZooSpecHelper {
"Threshold 3D Zoo" should "be the same as BigDL" in {
val blayer = BThreshold[Float]()
val zlayer = ZThreshold[Float](inputShape = Shape(3, 4))
zlayer.build(Shape(-1, 3, 4))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 3, 4))
val input = Tensor[Float](Array(2, 3, 4)).rand()
compareOutputAndGradInput(blayer, zlayer, input)
}
"Threshold 4D Zoo" should "be the same as BigDL" in {
val blayer = BThreshold[Float](1e-5)
val zlayer = ZThreshold[Float](1e-5, inputShape = Shape(4, 8, 8))
zlayer.build(Shape(-1, 4, 8, 8))
zlayer.getOutputShape().toSingle().toArray should be (Array(-1, 4, 8, 8))
val input = Tensor[Float](Array(3, 4, 8, 8)).rand()
compareOutputAndGradInput(blayer, zlayer, input)
}
}
class ThresholdSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = Threshold[Float](inputShape = Shape(4, 5))
layer.build(Shape(2, 4, 5))
val input = Tensor[Float](2, 4, 5).rand()
runSerializationTest(layer, input)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/keras/layers/ThresholdSpec.scala | Scala | apache-2.0 | 2,140 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{RelationalTestDB, AsyncTest}
class RelationalScalarFunctionTest extends AsyncTest[RelationalTestDB] {
import tdb.profile.api._
def test = {
def check[T : BaseColumnType](q: Rep[T], exp: T) = q.result.map(_ shouldBe exp)
def checkLit[T : BaseColumnType](v: T) = check(LiteralColumn(v), v)
val s = "abcdefghijklmnopqrstuvwxyz"
seq(
// Literals
checkLit(false),
checkLit(true),
checkLit(42: Byte),
checkLit(-42: Byte),
checkLit(42),
checkLit(-42),
checkLit(17.5),
checkLit(-17.5),
checkLit(17.5f),
checkLit(-17.5f),
checkLit(42L),
checkLit(-42L),
checkLit("foo"),
check("42".asColumnOf[Int], 42),
check(42.asColumnOf[BigDecimal], BigDecimal(42)),
check(LiteralColumn("foo").length, 3),
check(LiteralColumn("foo") ++ "bar", "foobar"),
check(LiteralColumn(1) ifNull 42, 1),
check(LiteralColumn[Option[Int]](None) ifNull 42, 42),
check(LiteralColumn("Foo").toUpperCase, "FOO"),
check(LiteralColumn("Foo").toLowerCase, "foo"),
check(LiteralColumn(" foo ").ltrim, "foo "),
check(LiteralColumn(" foo ").rtrim, " foo"),
// FIXME: broken in DB2, which does not seem to support nested {fn ...} calls
// check(LiteralColumn(" foo ").trim, "foo")
Functions.database.toLowerCase.result,
Functions.user.toLowerCase.result,
check(LiteralColumn(8) % 3, 2),
check(LiteralColumn(-12.5).abs, 12.5),
check(LiteralColumn(1.9).ceil, 2.0),
check(LiteralColumn(1.5).ceil, 2.0),
check(LiteralColumn(1.4).ceil, 2.0),
check(LiteralColumn(-1.9).ceil, -1.0),
check(LiteralColumn(-1.5).ceil, -1.0),
check(LiteralColumn(-1.4).ceil, -1.0),
check(LiteralColumn(1.5).floor, 1.0),
check(LiteralColumn(1.4).floor, 1.0),
check(LiteralColumn(-1.5).floor, -2.0),
check(LiteralColumn(-10.0).sign, -1),
Functions.pi.toDegrees.result.map(_.should(r => r > 179.9999 && r < 180.0001)),
(Functions.pi.toDegrees.toRadians - Functions.pi).abs.result.map(_.should(_ <= 0.00001)),
check(LiteralColumn(s).substring(3, 5), s.substring(3, 5)),
check(LiteralColumn(s).substring(3), s.substring(3)),
check(LiteralColumn(s).take(3), s.take(3)),
check(LiteralColumn(s).drop(3), s.drop(3)),
ifCap(rcap.replace)(check(LiteralColumn(s).replace("cd", "XXX"), s.replace("cd", "XXX"))),
ifCap(rcap.reverse)(check(LiteralColumn(s).reverseString, s.reverse)),
ifCap(rcap.indexOf)(seq(
check(LiteralColumn(s).indexOf("o"), s.indexOf("o")),
check(LiteralColumn(s).indexOf("7"), s.indexOf("7"))
)),
ifCap(rcap.repeat)(check(LiteralColumn(s) * 2, s * 2))
)
}
}
| slick/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/RelationalScalarFunctionTest.scala | Scala | bsd-2-clause | 2,834 |
/*
* Copyright (C) 2012 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.environment.ssh
import org.openmole.core.exception.UserBadDataError
import org.openmole.core.workspace.{ Workspace, AuthenticationProvider }
object SSHAuthentication {
def apply()(implicit authentications: AuthenticationProvider) = authentications(classOf[SSHAuthentication])
def update(i: Int, a: SSHAuthentication) = Workspace.authentications.save(i, a)
def apply(i: Int)(implicit authentications: AuthenticationProvider) = authentications(classOf[SSHAuthentication])(i)
def apply(target: String, authentications: AuthenticationProvider) = {
val list = authentications(classOf[SSHAuthentication])
list.find { e ⇒ target.matches(e.regexp) }.getOrElse(throw new UserBadDataError("No authentication method found for " + target))
}
def apply(login: String, host: String, port: Int, authentications: AuthenticationProvider): SSHAuthentication = apply(address(login, host, port), authentications)
def address(login: String, host: String, port: Int) = s"$login@$host:$port"
def +=(a: SSHAuthentication) =
update(Workspace.authentications.size[SSHAuthentication], a)
}
trait SSHAuthentication {
def target: String
def login: String
def regexp = ".*" + login + "@" + target + ".*"
def apply(implicit authenticationProvider: AuthenticationProvider): fr.iscpif.gridscale.ssh.SSHAuthentication
override def toString = "Target = " + target
}
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.environment.ssh/src/main/scala/org/openmole/plugin/environment/ssh/SSHAuthentication.scala | Scala | agpl-3.0 | 2,103 |
println("Hola dende un script")
| jmlb23/scala | libro_odersky/scripts_CH2/hello.scala | Scala | gpl-3.0 | 32 |
/*
* Copyright (C) 2011-2013 exsplay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package exsplay.api.routing
import spray.http.HttpMethod
/**
* User: wert
* Date: 12.07.13
* Time: 2:02
*/
case class Route(method: HttpMethod, path: List[String])
| wertlex/exsplay | exsplay/src/main/scala/exsplay/api/routing/Route.scala | Scala | apache-2.0 | 766 |
package com.seanshubin.uptodate.logic
case class ReportNames(pom: String,
repository: String,
inconsistency: String,
upgradesToApply: String,
upgradesToIgnore: String,
statusQuo: String,
notFound: String,
byDependency: String,
summary: String,
unexpandedPom: String,
propertyConflict: String)
| SeanShubin/up-to-date | logic/src/main/scala/com/seanshubin/uptodate/logic/ReportNames.scala | Scala | unlicense | 524 |
package predict4s
package sgp
import org.scalactic.Or
import spire.algebra._
//import spire.math._
import spire.implicits._
//import spire.syntax.primitives._
import predict4s.coord._
import predict4s.coord.CoordinatesConversions._
/**
* The SGP-4 theory is applied for all orbits with periods of T <= 225 min.
* It performs a propagation in time of doubly averaged elements according to their
* secular rates of change due to the zonal harmonics J2 and J4 of the Earth potential,
* and due to drag perturbations in an atmosphere with a power-law altitude profile of air density.
* The propagated, doubly averaged elements at epoch are subsequently
* converted into singly averaged elements, by overlaying long-periodic
* perturbations due to J3, before a final conversion step to osculating elements by superimposition
* of first-order, short-period perturbation amplitudes due to J2.
* (from Space Debris, by H. Klinkrad, pag 216).
*/
abstract class SGP4[@sp(Double) F : Field : NRoot : Order : Trig](
val sec : BrouwerLaneSecularCorrections[F]
) {
type Minutes = sec.Minutes
def gsto : F = TimeUtils.gstime(sec.elem0Ctx.elem.epoch + 2433281.5)
def propagate(t: Minutes): SGPPropResult[F] = propagate2CartesianContext(t)
def propagate2CartesianContext(t: Minutes) : SGPPropResult[F] =
for {
secularElemt <- secularCorrections(t)
spn <- periodicCorrections(secularElemt)
unitpv = spn2UnscaledCartesian(spn)
pv = scaleUnitCartesians(unitpv,spn.r)
} yield (pv, unitpv, spn)
def corrections2CartesianContext(secularElemt : SGPSecularCtx[F]) : SGPPropResult[F] =
for {
spn <- periodicCorrections(secularElemt)
unitpv = spn2UnscaledCartesian(spn)
pv = scaleUnitCartesians(unitpv,spn.r)
} yield (pv, unitpv, spn)
def propagate2SPNContext(t: Minutes) =
for {
sc <- secularCorrections(t)
pc <- periodicCorrections(sc)
} yield (pc, sc)
/**
* Calculates the new secular elements at time t in minutes from the epoch of the initial elements
*/
def secularCorrections(t: Minutes): SGPSecularResult[F] = sec.secularCorrections(t)
/**
* Applies the periodic corrections to the secular elements at time t in minutes from the epoch of the initial elements
*/
def periodicCorrections(secularElemt : SGPSecularCtx[F]) : SGPSPNResult[F]
/**
* Vallado's code works with internal units of length LU (units of earth’s radius
* R⊕ in km) and time TU (units of the orbit’s period in min)
* TU = 60 * sqrt( (R⊕ km)³ /(μ km³ /s²) ) min
* where μ is the earth’s gravitational constant; μ = 1 UL³/UT² in internal units.
*/
def scaleUnitCartesians(unitElems: CartesianElems[F], r: F): CartesianElems[F] = {
import sec.elem0Ctx.wgs.{aE,vkmpersec}, unitElems._
CartesianElems( (aE*r)*x, (aE*r)*y , (aE*r)*z, vkmpersec*vx, vkmpersec*vy, vkmpersec*vz)
}
}
| pleira/SGP4Extensions | core/src/main/scala/predict4s/sgp/sgp4.scala | Scala | apache-2.0 | 2,915 |
package articles.repositories
import articles.controllers.{Page, PageRequest}
import articles.models.{Article, ArticleId, ArticleMetaModel}
import commons.models.{IdMetaModel, Property}
import commons.repositories._
import commons.repositories.mappings.JavaTimeDbMappings
import slick.dbio.DBIO
import slick.jdbc.MySQLProfile.api.{DBIO => _, MappedTo => _, Rep => _, TableQuery => _, _}
import slick.lifted.{ProvenShape, _}
import scala.concurrent.ExecutionContext
class ArticleRepo(override protected val dateTimeProvider: DateTimeProvider,
implicit private val ec: ExecutionContext)
extends BaseRepo[ArticleId, Article, ArticleTable]
with AuditDateTimeRepo[ArticleId, Article, ArticleTable] {
def byPageRequest(pageRequest: PageRequest): DBIO[Page[Article]] = {
require(pageRequest != null)
val count = query.size.result
val slickOrderings = pageRequest.orderings.map(toSlickOrderingSupplier).reverse
var articlesQuery = query
.drop(pageRequest.offset)
.take(pageRequest.limit)
.sortBy(slickOrderings.head)
slickOrderings.tail.foreach(getSlickOrdering => {
articlesQuery = articlesQuery.sortBy(getSlickOrdering)
})
articlesQuery.result.zip(count)
.map(articlesAndCount => Page(articlesAndCount._1, articlesAndCount._2))
}
override protected val mappingConstructor: Tag => ArticleTable = new ArticleTable(_)
override protected val modelIdMapping: BaseColumnType[ArticleId] = MappedColumnType.base[ArticleId, Long](
vo => vo.value,
id => ArticleId(id)
)
override protected val metaModel: IdMetaModel = ArticleMetaModel
override protected val metaModelToColumnsMapping: Map[Property[_], (ArticleTable) => Rep[_]] = Map(
ArticleMetaModel.id -> (table => table.id),
ArticleMetaModel.modifiedAt -> (table => table.modifiedAt),
)
}
protected class ArticleTable(tag: Tag) extends IdTable[ArticleId, Article](tag, "articles")
with AuditDateTimeTable
with JavaTimeDbMappings {
def slug: Rep[String] = column(ArticleMetaModel.slug.name)
def title: Rep[String] = column(ArticleMetaModel.title.name)
def description: Rep[String] = column(ArticleMetaModel.description.name)
def body: Rep[String] = column(ArticleMetaModel.body.name)
def * : ProvenShape[Article] = (id, slug, title, description, body, createdAt, modifiedAt) <> (Article.tupled,
Article.unapply)
}
| Dasiu/play-framework-test-project | app/articles/repositories/ArticleRepo.scala | Scala | mit | 2,399 |
package net.javachallenge.runner
import org.specs._
import org.specs.matcher._
import org.specs.runner.{ JUnitSuiteRunner, JUnit }
import org.junit.runner.RunWith
import net.javachallenge.scene.MainScene
import net.javachallenge.scene.VeinScene
import net.javachallenge.scene.console.ConsoleScene
import net.javachallenge.scene.MainRunnerScene
import net.javachallenge.scene.InitialRunnerScene
import net.javachallenge.GameEnvironment
import net.javachallenge.entity.Game
import net.javachallenge.RunnerInitializer
import net.javachallenge.entity.Field
import net.javachallenge.scene.ResultScene
import net.javachallenge.scene.ResultScene
import net.javachallenge.scene.EmptyScene
@RunWith(classOf[JUnitSuiteRunner])
class ReplaySpecTest extends Specification with JUnit {
val env = GameEnvironment()
"Replay runner" should {
"read a replay file" in {
val fileName = "replay/2012_11_16_16_32__tokoharuAI_Sabateur_JoeJack_near_player_Wand_Player_Myu.rep"
val (irs, mrs, names, settings, random) = RunnerInitializer.initializeReplay(fileName)
env.game = Game(names, settings, Field(settings, random))
val man = env.getSceneManager.setFps(9999)
val endScene = new EmptyScene(null) with ResultScene with ConsoleScene
val mainScene = new MainScene(endScene) with ConsoleScene with MainRunnerScene
val veinScene = new VeinScene(mainScene) with ConsoleScene with InitialRunnerScene
mainScene.runners = Vector(mrs: _*)
veinScene.runners = Vector(irs: _*)
man.initialize(env, veinScene)
while (man.runOneStep(env, veinScene) == veinScene) {}
while (man.runOneStep(env, mainScene) == mainScene) {}
mainScene.game.field.countVeins(0) must_== 10
mainScene.game.field.countVeins(1) must_== 30
mainScene.game.field.countVeins(2) must_== 0
mainScene.game.field.countVeins(3) must_== 0
mainScene.game.field.countVeins(4) must_== 0
mainScene.game.field.countVeins(5) must_== 0
mainScene.game.field.sumRobots(0) must_== 5643
mainScene.game.field.sumRobots(1) must_== 15466
mainScene.game.field.sumRobots(2) must_== 0
mainScene.game.field.sumRobots(3) must_== 0
mainScene.game.field.sumRobots(4) must_== 0
mainScene.game.field.sumRobots(5) must_== 0
mainScene.game.getTotalMoneyWhenSellingAllMaterials(0) must_== 383037
mainScene.game.getTotalMoneyWhenSellingAllMaterials(1) must_== 207339
mainScene.game.getTotalMoneyWhenSellingAllMaterials(2) must_== 74113
mainScene.game.getTotalMoneyWhenSellingAllMaterials(3) must_== 19087
mainScene.game.getTotalMoneyWhenSellingAllMaterials(4) must_== 59035
mainScene.game.getTotalMoneyWhenSellingAllMaterials(5) must_== 14118
}
}
} | AI-comp/JavaChallenge2012 | src/test/scala/net/javachallenge/runner/ReplaySpecTest.scala | Scala | apache-2.0 | 2,749 |
package com.typesafe.sbt.packager.archetypes.systemloader
import sbt._
import sbt.Keys.{sourceDirectory, target}
import com.typesafe.sbt.packager.Keys.{
defaultLinuxStartScriptLocation,
killTimeout,
linuxMakeStartScript,
linuxPackageMappings,
linuxScriptReplacements,
linuxStartScriptName,
linuxStartScriptTemplate,
maintainerScripts,
packageName,
requiredStartFacilities,
requiredStopFacilities,
serverLoading,
startRunlevels,
stopRunlevels
}
import com.typesafe.sbt.SbtNativePackager.{Debian, Linux, Rpm, Universal}
import com.typesafe.sbt.packager.archetypes.MaintainerScriptHelper.maintainerScriptsAppend
import com.typesafe.sbt.packager.debian.DebianPlugin
import com.typesafe.sbt.packager.debian.DebianPlugin.autoImport.DebianConstants
import com.typesafe.sbt.packager.rpm.RpmPlugin
import com.typesafe.sbt.packager.rpm.RpmPlugin.autoImport.RpmConstants
import java.nio.file.{Files, Paths}
object SystemdPlugin extends AutoPlugin {
override def requires = SystemloaderPlugin
object autoImport {
val systemdSuccessExitStatus =
settingKey[Seq[String]]("SuccessExitStatus property")
val systemdIsServiceFileConfig =
settingKey[Boolean]("Make app_name.service file as config.")
}
import autoImport._
override def projectSettings: Seq[Setting[_]] =
debianSettings ++ inConfig(Debian)(systemdSettings) ++ rpmSettings ++ inConfig(Rpm)(systemdSettings)
def systemdSettings: Seq[Setting[_]] =
Seq(
// used by other archetypes to define systemloader dependent behaviour
serverLoading := Some(ServerLoader.Systemd),
// Systemd settings
startRunlevels := None,
stopRunlevels := None,
requiredStartFacilities := Some("network.target"),
requiredStopFacilities := Some("network.target"),
systemdSuccessExitStatus := Seq.empty,
linuxStartScriptName := Some(packageName.value + ".service"),
systemdIsServiceFileConfig := true,
// add systemloader to mappings
linuxPackageMappings ++= startScriptMapping(
linuxStartScriptName.value,
linuxMakeStartScript.value,
defaultLinuxStartScriptLocation.value,
systemdIsServiceFileConfig.value
),
// add additional system configurations to script replacements
linuxScriptReplacements += ("SuccessExitStatus" -> systemdSuccessExitStatus.value.mkString(" ")),
linuxScriptReplacements += ("TimeoutStopSec" -> killTimeout.value.toString)
)
def debianSettings: Seq[Setting[_]] = inConfig(Debian)(defaultLinuxStartScriptLocation := "/lib/systemd/system")
def rpmSettings: Seq[Setting[_]] = inConfig(Rpm)(defaultLinuxStartScriptLocation := "/usr/lib/systemd/system")
}
| kardapoltsev/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/archetypes/systemloader/SystemdPlugin.scala | Scala | bsd-2-clause | 2,704 |
package be.mygod.app
import android.annotation.TargetApi
import android.content.Intent
import android.os.Bundle
import android.view.{View, ViewGroup}
import be.mygod.os.Build
import be.mygod.transition.CircularReveal
object CircularRevealActivity {
final val EXTRA_SPAWN_LOCATION_X = "be.mygod.app.CircularRevealActivity.SPAWN_LOCATION_X"
final val EXTRA_SPAWN_LOCATION_Y = "be.mygod.app.CircularRevealActivity.SPAWN_LOCATION_Y"
def putLocation(intent: Intent, location: (Float, Float)): Intent =
intent.putExtra(EXTRA_SPAWN_LOCATION_X, location._1).putExtra(EXTRA_SPAWN_LOCATION_Y, location._2)
}
trait CircularRevealActivity extends LocationObservedActivity {
import CircularRevealActivity._
@TargetApi(21)
private lazy val circularRevealTransition = new CircularReveal(this)
override protected def onCreate(savedInstanceState: Bundle) {
super.onCreate(savedInstanceState)
val window = getWindow
val decor = window.getDecorView.asInstanceOf[ViewGroup]
decor.setBackgroundColor(android.R.color.transparent) // prevent fading of background
if (Build.version >= 21) {
window.setEnterTransition(circularRevealTransition)
window.setReturnTransition(circularRevealTransition)
for (i <- 0 until decor.getChildCount) { // decor.setTransitionGroup(true) won't work
val child = decor.getChildAt(i).asInstanceOf[ViewGroup]
if (child != null) child.setTransitionGroup(true)
}
if (savedInstanceState == null) {
val intent = getIntent
val x = intent.getFloatExtra(EXTRA_SPAWN_LOCATION_X, Float.NaN)
if (!x.isNaN) {
val y = intent.getFloatExtra(EXTRA_SPAWN_LOCATION_Y, Float.NaN)
if (!y.isNaN) circularRevealTransition.spawnLocation = (x, y)
}
}
}
}
def finish(stopper: View) {
if (Build.version >= 21) circularRevealTransition.stopper = stopper
supportFinishAfterTransition()
}
override def navigateUp(stopper: View) {
if (Build.version >= 21) circularRevealTransition.stopper = stopper
super.navigateUp(stopper)
}
}
| Mygod/mygod-lib-android | src/main/scala/be/mygod/app/CircularRevealActivity.scala | Scala | gpl-3.0 | 2,096 |
/*
* Copyright 2016 Uncharted Software Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package software.uncharted.salt.core.analytic.numeric
import software.uncharted.salt.core.analytic.Aggregator
/**
* Useful for calculating the maximum and minimum values
* across values derived from source records
*/
object MinMaxAggregator extends Aggregator[Double, (Double, Double), (Double, Double)] {
def default(): (Double, Double) = {
(MinAggregator.default, MaxAggregator.default)
}
override def add(current: (Double, Double), next: Option[Double]): (Double, Double) = {
if (next.isDefined) {
(MinAggregator.add(current._1, Some(next.get.doubleValue)), MaxAggregator.add(current._2, Some(next.get.doubleValue)))
} else {
(MinAggregator.add(current._1, None), MaxAggregator.add(current._2, None))
}
}
override def merge(left: (Double, Double), right: (Double, Double)): (Double, Double) = {
(MinAggregator.merge(left._1, right._1), MaxAggregator.merge(left._2, right._2))
}
def finish(intermediate: (Double, Double)): (Double, Double) = {
(MinAggregator.finish(intermediate._1), MaxAggregator.finish(intermediate._2))
}
}
| unchartedsoftware/salt | src/main/scala/software/uncharted/salt/core/analytic/numeric/MinMaxAggregator.scala | Scala | apache-2.0 | 1,696 |
import scala.tools.partest.DirectTest
object Test extends DirectTest {
override def extraSettings: String =
s"-usejavacp -Vprint-pos -Vprint:parser -Yrangepos -Ystop-after:parser -d ${testOutput.path} -cp ${testOutput.path}"
// test/files/pos/t6124.scala
override def code = """
trait T {
def i: Int = 1_024
def j: Long = 1_024L * 1_024
//def k = 1'024
def f = 3_14e-2
def d = 3_14E-2_1
def z = 0
}
""".trim
override def show(): Unit = Console.withErr(System.out) {
compile()
}
}
// support classes here, as needed
| martijnhoekstra/scala | test/files/run/literals-parsing.scala | Scala | apache-2.0 | 586 |
override def map[A,B](functor: F[A])(g: A => B): F[B] =
ap(functor)(pure(g))
| grzegorzbalcerek/scala-exercises | Applic/stepApplicMap.scala | Scala | bsd-2-clause | 79 |
package dao
import com.google.inject.ImplementedBy
import dao.impl.ExtAuthDAOImpl
import models.User
import scala.concurrent.Future
/**
* Created by khanguyen on 3/30/16.
*/
@ImplementedBy(classOf[ExtAuthDAOImpl])
trait ExtAuthDAO {
def userExists(email: String): Future[Boolean]
def getUser(email: String): Future[Option[User]]
}
| MedTalk/services | app/dao/ExtAuthDAO.scala | Scala | mit | 344 |
/*
* Copyright (c) 2013, Scodec
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scodec
package codecs
import scodec.bits.BitVector
/** Indicates a checksum over `bits` did not match the expected value. */
case class ChecksumMismatch(
bits: BitVector,
expected: BitVector,
actual: BitVector,
context: List[String] = Nil
) extends Err:
def message: String = s"checksum mismatch for bits: $bits, expected: $expected, actual: $actual"
def pushContext(ctx: String): Err = copy(context = ctx :: context)
| scodec/scodec | shared/src/main/scala/scodec/codecs/ChecksumMismatch.scala | Scala | bsd-3-clause | 2,024 |
/*
* Copyright 2021 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.persistence.model
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class DataSourceSpec extends AnyFlatSpec with Matchers {
behavior of "DataSource.getName"
it should "return last part of string between slashes" in {
DataSource.getName("foo") shouldEqual "foo"
DataSource.getName("/foo") shouldEqual "foo"
DataSource.getName("foo/") shouldEqual "foo"
DataSource.getName("/foo//") shouldEqual "foo"
DataSource.getName("a://b/c/d/foo") shouldEqual "foo"
}
it should "return empty string when input string only contains slashes" in {
DataSource.getName("///") shouldEqual ""
}
it should "return empty string when input string is empty" in {
DataSource.getName("") shouldEqual ""
}
}
| AbsaOSS/spline | persistence/src/test/scala/za/co/absa/spline/persistence/model/DataSourceSpec.scala | Scala | apache-2.0 | 1,397 |
package models
import play.api.libs.json.{Reads, JsPath, Writes}
import play.api.libs.functional.syntax._
case class PasswordEntryKeyword(id: Option[Int],
passwordEntryID: Option[Int],
keyword: String)
extends BaseModel {
override def equals(other: Any) = {
other match {
case PasswordEntryKeyword(_, _, k) => keyword == k
case _ => false
}
}
override val hashCode = keyword.hashCode
}
object PasswordEntryKeywordHelper {
object json {
object implicits {
implicit val passwordEntryKeywordWrites: Writes[PasswordEntryKeyword] = (
(JsPath \\ "id").write[Option[Int]] and
(JsPath \\ "keyword").write[String]
)(unlift(unapply))
implicit val passwordEntryKeywordReads: Reads[PasswordEntryKeyword] = (
(JsPath \\ "id").read[Option[Int]] and
(JsPath \\ "keyword").read[String]
)(apply _)
private def apply(id: Option[Int], keyword: String) = {
PasswordEntryKeyword(id, None, keyword)
}
private def unapply(k: PasswordEntryKeyword) = {
Some(k.id, k.keyword)
}
}
}
}
| bmc/pwguard | app/models/PasswordEntryKeyword.scala | Scala | bsd-3-clause | 1,211 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.stats
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MethodProfilingTest extends Specification {
"MethodProfiling" should {
"keep track of explicit timings" in {
class Profiling extends MethodProfiling {
val timings = new TimingsImpl
def slowMethod(): String = {
Thread.sleep(10)
"test"
}
def exec: String = {
profile(time => timings.occurrence("1", time))(slowMethod())
}
}
val profiling = new Profiling
val result = profiling.exec
result mustEqual "test"
profiling.timings.occurrences("1") mustEqual 1
profiling.timings.time("1") must beGreaterThanOrEqualTo(10L)
}
}
"Timing" should {
"keep track of total time" in {
val timing = new Timing
timing.time mustEqual 0
timing.occurrences mustEqual 0
timing.occurrence(100)
timing.time mustEqual 100
timing.occurrences mustEqual 1
timing.occurrence(50)
timing.time mustEqual 150
timing.occurrences mustEqual 2
}
"compute averages" in {
val timing = new Timing
timing.average().toString mustEqual Double.NaN.toString
timing.occurrence(100)
timing.average mustEqual 100
timing.occurrence(50)
timing.average mustEqual 75
}
}
"Timings" should {
"keep track of total time" in {
val timings = new TimingsImpl
timings.time("1") mustEqual 0
timings.occurrences("1") mustEqual 0
timings.occurrence("1", 100)
timings.occurrence("2", 200)
timings.time("1") mustEqual 100
timings.occurrences("1") mustEqual 1
timings.time("2") mustEqual 200
timings.occurrences("2") mustEqual 1
}
"compute average times" in {
val timings = new TimingsImpl
timings.averageTimes mustEqual "No occurrences"
timings.occurrence("1", 100)
timings.occurrence("2", 200)
timings.averageTimes() mustEqual "Total time: 300 ms. Percent of time - 1: 33.3% 1 times at 100.0000 ms avg, 2: 66.7% 1 times at 200.0000 ms avg"
}
"compute average occurrences" in {
val timings = new TimingsImpl
timings.averageTimes mustEqual "No occurrences"
timings.occurrence("1", 100)
timings.occurrence("2", 200)
timings.averageOccurrences() mustEqual "Total occurrences: 2. Percent of occurrences - 1: 50.0%, 2: 50.0%"
}
}
} | elahrvivaz/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/MethodProfilingTest.scala | Scala | apache-2.0 | 3,009 |
package com.twitter.finagle.loadbalancer.aperture
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.Address.Inet
import com.twitter.finagle._
import com.twitter.finagle.loadbalancer.EndpointFactory
import com.twitter.finagle.loadbalancer.LoadBalancerFactory.PanicMode
import com.twitter.finagle.loadbalancer.NodeT
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.Rng
import com.twitter.util.Activity
import com.twitter.util.Await
import com.twitter.util.Duration
import com.twitter.util.NullTimer
import com.twitter.util.Var
import java.net.InetSocketAddress
import org.scalactic.source.Position
import org.scalatest.Tag
import org.scalatest.funsuite.AnyFunSuite
class ApertureTest extends BaseApertureTest(doesManageWeights = false)
abstract class BaseApertureTools(doesManageWeights: Boolean)
extends AnyFunSuite
with ApertureSuite {
/**
* A simple aperture balancer which doesn't have a controller or load metric
* mixed in since we only want to test the aperture behavior exclusive of
* these.
*
* This means that the aperture will not expand or contract automatically. Thus, each
* test in this suite must manually adjust it or rely on the "rebuild" functionality
* provided by [[Balancer]] which kicks in when we select a down node. Since aperture
* uses P2C to select nodes, we inherit the same probabilistic properties that help
* us avoid down nodes with the important caveat that we only select over a subset.
*/
private[aperture] class Bal extends TestBal {
val manageWeights: Boolean = doesManageWeights
protected def nodeLoad: Double = 0.0
protected def statsReceiver: StatsReceiver = NullStatsReceiver
class Node(val factory: EndpointFactory[Unit, Unit])
extends ServiceFactoryProxy[Unit, Unit](factory)
with NodeT[Unit, Unit]
with ApertureNode[Unit, Unit] {
override def tokenRng: Rng = rng
// We don't need a load metric since this test only focuses on
// the internal behavior of aperture.
def id: Int = 0
def load: Double = nodeLoad
def pending: Int = 0
override val token: Int = 0
}
protected def newNode(factory: EndpointFactory[Unit, Unit]): Node =
new Node(factory)
var rebuilds: Int = 0
override def rebuild(): Unit = {
rebuilds += 1
super.rebuild()
}
}
}
abstract class BaseApertureTest(doesManageWeights: Boolean)
extends BaseApertureTools(doesManageWeights) {
// Ensure the flag value is 12 since many of the tests depend on it.
override protected def test(
testName: String,
testTags: Tag*
)(
testFun: => Any
)(
implicit pos: Position
): Unit =
super.test(testName, testTags: _*) {
minDeterminsticAperture.let(12) {
testFun
}
}
test("requires minAperture > 0") {
intercept[IllegalArgumentException] {
new ApertureLeastLoaded[Unit, Unit](
endpoints = Activity.pending,
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 0,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = NullStatsReceiver,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = None,
eagerConnections = false,
manageWeights = doesManageWeights
)
}
}
test("dapertureActive does not create LoadBand metrics") {
ProcessCoordinate.setCoordinate(1, 4)
val stats = new InMemoryStatsReceiver
val aperture = new ApertureLeastLoaded[Unit, Unit](
// we need to hydrate the endpoints to avoid the `EmptyVector` Distributor.
endpoints = Activity.value(IndexedSeq(new Factory(0))),
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = stats,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(true),
eagerConnections = false,
manageWeights = doesManageWeights
)
assert(!stats.gauges.contains(Seq("loadband", "offered_load_ema")))
assert(!stats.counters.contains(Seq("loadband", "widen")))
assert(!stats.counters.contains(Seq("loadband", "narrow")))
ProcessCoordinate.unsetCoordinate()
}
test("closing ApertureLeastLoaded removes the loadband ema gauge") {
val stats = new InMemoryStatsReceiver
val aperture = new ApertureLeastLoaded[Unit, Unit](
endpoints = Activity.pending,
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = stats,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(false),
eagerConnections = false,
manageWeights = doesManageWeights
)
assert(stats.gauges.contains(Seq("loadband", "offered_load_ema")))
Await.result(aperture.close(), 10.seconds)
assert(!stats.gauges.contains(Seq("loadband", "offered_load_ema")))
}
test("closing AperturePeakEwma removes the loadband ema gauge") {
val stats = new InMemoryStatsReceiver
val aperture = new AperturePeakEwma[Unit, Unit](
endpoints = Activity.pending,
smoothWin = Duration.Bottom,
decayTime = 10.seconds,
nanoTime = () => System.nanoTime(),
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = stats,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(false),
eagerConnections = false,
manageWeights = doesManageWeights
)
assert(stats.gauges.contains(Seq("loadband", "offered_load_ema")))
Await.result(aperture.close(), 10.seconds)
assert(!stats.gauges.contains(Seq("loadband", "offered_load_ema")))
}
test("eagerly connects to only new endpoints in the aperture") {
val factories = IndexedSeq(new Factory(0), new Factory(1))
val endpoints = Var(Activity.Ok(factories))
ProcessCoordinate.setCoordinate(instanceId = 0, totalInstances = 1)
val aperture = new ApertureLeastLoaded[Unit, Unit](
endpoints = Activity(endpoints),
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = NullStatsReceiver,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(true),
eagerConnections = true,
manageWeights = doesManageWeights
)
assert(factories.forall(_.total == 1))
val newEndpoint = new Factory(2)
endpoints.update(Activity.Ok(factories :+ newEndpoint))
assert(newEndpoint.total == 1)
assert(factories.forall(_.total == 1))
}
test("daperture does not rebuild on max effort exhausted") {
// This tests both traditional and weighted daperture
// When manageWeights is true, weighted dAperture is enabled.
for (manageWeights <- Seq(true, false)) {
val factory = new Factory(0)
ProcessCoordinate.setCoordinate(1, 4)
val stats = new InMemoryStatsReceiver
val aperture = new ApertureLeastLoaded[Unit, Unit](
endpoints = Activity.value(IndexedSeq(factory)),
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = stats,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(true),
eagerConnections = false,
manageWeights = manageWeights
)
assert(stats.counters(Seq("rebuilds")) == 1)
factory.status = Status.Busy
assert(stats.gauges(Seq("size"))() == 1)
assert(stats.gauges(Seq("busy"))() == 1)
aperture.apply()
assert(stats.counters(Seq("max_effort_exhausted")) == 1)
assert(stats.counters(Seq("rebuilds")) == 1)
ProcessCoordinate.unsetCoordinate()
}
}
test("daperture only rebuilds on coordinate changes") {
// This tests both traditional and weighted daperture
// When manageWeights is true, weighted dAperture is enabled.
for (manageWeights <- Seq(true, false)) {
val factory = new Factory(0)
ProcessCoordinate.setCoordinate(1, 4)
val stats = new InMemoryStatsReceiver
val aperture = new ApertureLeastLoaded[Unit, Unit](
endpoints = Activity.value(IndexedSeq(factory)),
smoothWin = Duration.Bottom,
lowLoad = 0,
highLoad = 0,
minAperture = 10,
panicMode = new PanicMode(0),
rng = Rng.threadLocal,
statsReceiver = stats,
label = "",
timer = new NullTimer,
emptyException = new NoBrokersAvailableException,
useDeterministicOrdering = Some(true),
eagerConnections = false,
manageWeights = manageWeights
)
assert(stats.counters(Seq("rebuilds")) == 1)
ProcessCoordinate.setCoordinate(1, 4)
assert(stats.counters(Seq("rebuilds")) == 1)
// rebuild only on new change
ProcessCoordinate.setCoordinate(1, 5)
assert(stats.counters(Seq("rebuilds")) == 2)
ProcessCoordinate.unsetCoordinate()
}
}
test("minAperture <= vector.size") {
val min = 100
val bal = new Bal {
override private[aperture] val minAperture = min
}
val counts = new Counts
val vectorSize = min - 1
bal.update(counts.range(vectorSize))
// verify that we pick 2 within bounds
bal.applyn(100)
assert(bal.aperturex == vectorSize)
}
test("aperture <= vector.size") {
val min = 100
val bal = new Bal {
override private[aperture] val minAperture = min
}
val counts = new Counts
val vectorSize = min + 1
bal.update(counts.range(vectorSize))
assert(bal.aperturex == min)
// increment by 100, should be bound by vector size
bal.adjustx(100)
assert(bal.aperturex == vectorSize)
}
test("Empty vectors") {
val bal = new Bal
intercept[Empty] { Await.result(bal.apply()) }
// transient update
val counts = new Counts
bal.update(counts.range(5))
bal.applyn(100)
assert(counts.nonzero.size > 0)
// go back to zero
bal.update(Vector.empty)
intercept[Empty] { Await.result(bal.apply()) }
}
test("Balance only within the aperture") {
val counts = new Counts
val bal = new Bal
bal.update(counts.range(10))
assert(bal.maxUnitsx == 10)
bal.applyn(100)
assert(counts.nonzero.size == 1)
bal.adjustx(1)
bal.applyn(100)
assert(counts.nonzero.size == 2)
counts.clear()
bal.adjustx(-1)
bal.applyn(100)
assert(counts.nonzero.size == 1)
}
test("min aperture size is not > the number of active nodes") {
val counts = new Counts
val bal = new Bal {
override private[aperture] val minAperture = 4
}
bal.update(counts.range(10))
// Check that the custom minAperture is enforced.
bal.adjustx(-100)
bal.applyn(1000)
assert(counts.nonzero.size == 4)
// Now close 8
counts.clear()
counts.take(8).foreach(_.status = Status.Closed)
bal.update(counts.range(10))
bal.applyn(1000)
assert(counts.nonzero.size == 2)
}
test("Don't operate outside of aperture range") {
val counts = new Counts
val bal = new Bal
bal.update(counts.range(10))
bal.adjustx(10000)
bal.applyn(1000)
assert(counts.nonzero.size == 10)
counts.clear()
bal.adjustx(-100000)
bal.applyn(1000)
assert(counts.nonzero.size == 1)
}
test("Avoid unavailable hosts") {
val counts = new Counts
val bal = new Bal
bal.update(counts.range(10))
bal.adjustx(3)
bal.applyn(100)
assert(counts.nonzero.size == 4)
// Since tokens are assigned, we don't know apriori what's in the
// aperture*, so figure it out by observation.
//
// *Ok, technically we can, since we're using deterministic
// randomness.
for (unavailableStatus <- List(Status.Closed, Status.Busy)) {
val nonZeroKeys = counts.nonzero
val closed0 = counts(nonZeroKeys.head)
val closed1 = counts(nonZeroKeys.tail.head)
closed0.status = unavailableStatus
closed1.status = unavailableStatus
val closed0Req = closed0.total
val closed1Req = closed1.total
bal.applyn(100)
// We want to make sure that we haven't sent requests to the
// `Closed` nodes since our aperture is wide enough to avoid
// them.
assert(closed0Req == closed0.total)
assert(closed1Req == closed1.total)
}
}
test("Nonavailable vectors") {
val counts = new Counts
val bal = new Bal
bal.update(counts.range(10))
for (f <- counts)
f.status = Status.Closed
assert(bal.status == Status.Closed)
bal.applyn(1000)
assert(bal.aperturex == 1)
// since our status sort is stable, we know that
// even though we rebuild, we will still only be
// sending load to the head.
assert(counts.nonzero.size == 1)
val goodkey = 0
counts(goodkey).status = Status.Open
counts.clear()
bal.applyn(1000)
assert(counts.nonzero == Set(goodkey))
assert(bal.status == Status.Open)
}
test("status, unavailable endpoints in the aperture") {
val counts = new Counts
val bal = new Bal {
override protected val useDeterministicOrdering = Some(true)
}
ProcessCoordinate.setCoordinate(instanceId = 0, totalInstances = 12)
bal.update(counts.range(24))
bal.rebuildx()
assert(bal.isDeterministicAperture)
assert(bal.minUnitsx == 12)
// mark all endpoints within the aperture as busy
for (i <- 0 until 12) {
counts(i).status = Status.Busy
}
assert(bal.status == Status.Busy)
// one endpoint in the aperture that's open
counts(0).status = Status.Open
assert(bal.status == Status.Open)
}
test("status, respects vector order in random aperture") {
val counts = new Counts
val bal = new Bal {
override protected val useDeterministicOrdering = Some(false)
}
bal.update(counts.range(2))
assert(bal.aperturex == 1)
assert(bal.isRandomAperture)
// last endpoint outside the aperture is open.
counts(0).status = Status.Busy
// should be available due to the single endpoint
assert(bal.status == Status.Open)
// should be moved forward on rebuild
val svc = Await.result(bal(ClientConnection.nil))
assert(bal.rebuilds == 1)
assert(bal.status == Status.Open)
assert(svc.status == Status.Open)
}
test("useDeterministicOrdering, clients evenly divide servers") {
val counts = new Counts
val bal = new Bal {
override protected val useDeterministicOrdering = Some(true)
}
ProcessCoordinate.setCoordinate(instanceId = 0, totalInstances = 12)
bal.update(counts.range(24))
bal.rebuildx()
assert(bal.isDeterministicAperture)
assert(bal.minUnitsx == 12)
bal.applyn(2000)
assert(counts.nonzero == (0 to 11).toSet)
}
test("useDeterministicOrdering, clients unevenly divide servers") {
val counts = new Counts
val bal = new Bal {
override protected val useDeterministicOrdering = Some(true)
}
ProcessCoordinate.setCoordinate(instanceId = 1, totalInstances = 4)
bal.update(counts.range(18))
bal.rebuildx()
assert(bal.isDeterministicAperture)
assert(bal.minUnitsx == 12)
bal.applyn(2000)
// Need at least 48 connections to satisfy min of 12, so we have to circle the ring 3 times (N=3)
// to get 48 virtual servers. Instance 1 offset: 0.25, width: 3*0.25 = 0.75 resulting in
// covering three quarters the servers, or 14 server units.
// Our instance 1 offset is 0.25, which maps to server instance 18*0.25=4.5 as the start of its
// aperture and ends at 18.0, meaning that server instances 4 through 17 are in its physical
// aperture and 4 should get ~1/2 the load of the rest in this clients aperture.
assert(counts.nonzero == (4 to 17).toSet)
assert(math.abs(counts(4).total.toDouble / counts(5).total - 0.5) <= 0.1)
assert(math.abs(counts(17).total.toDouble / counts(12).total - 1.0) <= 0.1)
}
test("no-arg rebuilds are idempotent") {
val bal = new Bal {
override protected val useDeterministicOrdering = Some(true)
}
ProcessCoordinate.setCoordinate(5, 10)
val servers = Vector.tabulate(10)(Factory)
bal.update(servers)
val order = bal.distx.vector
for (_ <- 0 to 100) {
bal.rebuildx()
assert(order.indices.forall { i => order(i) == bal.distx.vector(i) })
}
}
test("order maintained when status flaps") {
val bal = new Bal
ProcessCoordinate.unsetCoordinate()
val servers = Vector.tabulate(5)(Factory)
bal.update(servers)
// 3 of 5 servers are in the aperture
bal.adjustx(2)
assert(bal.aperturex == 3)
ProcessCoordinate.setCoordinate(instanceId = 3, totalInstances = 5)
// We just happen to know that based on our ordering, instance 2 is in the aperture.
// Note, we have an aperture of 3 and 1 down, so the probability of picking the down
// node with p2c is ((1/3)^2)^maxEffort . Instead of attempting to hit this case, we
// force a rebuild artificially.
servers(2).status = Status.Busy
bal.rebuildx()
for (i <- servers.indices) {
assert(servers(i) == bal.distx.vector(i).factory)
}
// flip back status
servers(2).status = Status.Open
bal.rebuildx()
for (i <- servers.indices) {
assert(servers(i) == bal.distx.vector(i).factory)
}
}
test("daperture toggle") {
val bal = new Bal {
override val minAperture = 150
}
ProcessCoordinate.setCoordinate(0, 150)
bal.update(Vector.tabulate(150)(Factory))
bal.rebuildx()
assert(bal.isDeterministicAperture)
// ignore 150, since we are using d-aperture and instead
// default to 12.
assert(bal.minUnitsx == 12)
// Now unset the coordinate which should send us back to random aperture
ProcessCoordinate.unsetCoordinate()
assert(bal.isRandomAperture)
bal.update(Vector.tabulate(150)(Factory))
bal.rebuildx()
assert(bal.minUnitsx == 150)
}
test("d-aperture with equally loaded nodes doesn't unduly bias due to rounding errors") {
val counts = new Counts
val bal = new Bal {
override val minAperture = 12
override protected def nodeLoad: Double = 1.0
}
ProcessCoordinate.setCoordinate(0, 1)
bal.update(counts.range(3))
bal.rebuildx()
assert(bal.isDeterministicAperture)
assert(bal.minUnitsx == 3)
bal.applyn(3000)
ProcessCoordinate.unsetCoordinate()
val requests = counts.toIterator.map(_._total).toVector
val avg = requests.sum.toDouble / requests.size
val relativeDiffs = requests.map { i => math.abs(avg - i) / avg }
relativeDiffs.foreach { i => assert(i < 0.05) }
}
test("'p2c' d-aperture doesn't unduly bias") {
val counts = new Counts
val bal = new Bal {
override val minAperture = 1
override protected def nodeLoad: Double = 1.0
override val useDeterministicOrdering: Option[Boolean] = Some(true)
}
// If no ProcessCoordinate is set but useDeterministicOrdering is true, we should fall back
// to the p2c-style deterministic aperture (one instance, no peers)
ProcessCoordinate.unsetCoordinate()
bal.update(counts.range(3))
bal.rebuildx()
assert(bal.isDeterministicAperture)
assert(bal.minUnitsx == 3)
bal.applyn(3000)
val requests = counts.toIterator.map(_._total).toVector
val avg = requests.sum.toDouble / requests.size
val relativeDiffs = requests.map { i => math.abs(avg - i) / avg }
relativeDiffs.foreach { i => assert(i < 0.05) }
}
test("vectorHash") {
class WithAddressFactory(i: Int, addr: InetSocketAddress) extends Factory(i) {
override val address: Address = Inet(addr, Addr.Metadata.empty)
}
val sr = new InMemoryStatsReceiver
def getVectorHash: Float = sr.gauges(Seq("vector_hash")).apply()
val bal = new Bal {
override protected def statsReceiver = sr
}
def updateWithIps(ips: Vector[String]): Unit =
bal.update(ips.map { addr =>
new WithAddressFactory(addr.##, new InetSocketAddress(addr, 80))
})
updateWithIps(Vector("1.1.1.1", "1.1.1.2"))
val hash1 = getVectorHash
updateWithIps(Vector("1.1.1.1", "1.1.1.3"))
val hash2 = getVectorHash
assert(hash1 != hash2)
// Doesn't have hysteresis
updateWithIps(Vector("1.1.1.1", "1.1.1.2"))
val hash3 = getVectorHash
assert(hash1 == hash3)
// Permutations have different hash codes. First update
// with a different list so the rebuild occurs
updateWithIps(Vector("1.1.1.1", "1.1.1.3"))
updateWithIps(Vector("1.1.1.2", "1.1.1.1"))
val hash4 = getVectorHash
assert(hash1 != hash4)
}
}
| twitter/finagle | finagle-core/src/test/scala/com/twitter/finagle/loadbalancer/aperture/ApertureTest.scala | Scala | apache-2.0 | 21,515 |
package com.sksamuel.elastic4s.http.search.queries.span
import com.sksamuel.elastic4s.http.search.queries.QueryBuilderFn
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.searches.queries.span.SpanOrQuery
object SpanOrQueryBodyFn {
def apply(q: SpanOrQuery): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject("span_or")
builder.startArray("clauses")
q.clauses.foreach { clause =>
builder.rawValue(QueryBuilderFn(clause))
}
builder.endArray()
q.boost.foreach(builder.field("boost", _))
q.queryName.foreach(builder.field("_name", _))
builder.endObject()
builder.endObject()
}
}
| Tecsisa/elastic4s | elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/search/queries/span/SpanOrQueryBodyFn.scala | Scala | apache-2.0 | 714 |
package monocle
//import scalaz.Id.Id
//import scalaz.std.anyVal._
import cats.instances.list._
//import scalaz.std.option._
//import scalaz.syntax.std.boolean._
//import scalaz.syntax.std.option._
//import scalaz.syntax.tag._
import cats.{Applicative, Functor, Monoid, Traverse, Id}
import catssupport.Implicits._
import cats.data.Const
import cats.instances.int._
import cats.arrow.Choice
/**
* A [[PTraversal]] can be seen as a [[POptional]] generalised to 0 to n targets
* where n can be infinite.
*
* [[PTraversal]] stands for Polymorphic Traversal as it set and modify methods change
* a type `A` to `B` and `S` to `T`.
* [[Traversal]] is a type alias for [[PTraversal]] restricted to monomorphic updates:
* {{{
* type Traversal[S, A] = PTraversal[S, S, A, A]
* }}}
*
* @see [[monocle.law.TraversalLaws]]
*
* @tparam S the source of a [[PTraversal]]
* @tparam T the modified source of a [[PTraversal]]
* @tparam A the target of a [[PTraversal]]
* @tparam B the modified target of a [[PTraversal]]
*/
abstract class PTraversal[S, T, A, B] extends Serializable { self =>
/**
* modify polymorphically the target of a [[PTraversal]] with an Applicative function
* all traversal methods are written in terms of modifyF
*/
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T]
/** map each target to a Monoid and combine the results */
@inline final def foldMap[M: Monoid](f: A => M)(s: S): M =
modifyF[Const[M, ?]](a => Const(f(a)))(s).getConst
/** combine all targets using a target's Monoid */
@inline final def fold(s: S)(implicit ev: Monoid[A]): A =
foldMap(identity)(s)
/** get all the targets of a [[PTraversal]] */
@inline final def getAll(s: S): List[A] =
foldMap(List(_))(s)
/** find the first target of a [[PTraversal]] matching the predicate */
@inline final def find(p: A => Boolean)(s: S): Option[A] =
foldMap(a => (if(p(a)) Some(a) else None).first)(s).unwrap
/** get the first target of a [[PTraversal]] */
@inline final def headOption(s: S): Option[A] =
find(_ => true)(s)
/** check if at least one target satisfies the predicate */
@inline final def exist(p: A => Boolean)(s: S): Boolean =
foldMap(p(_).disjunction)(s).unwrap
/** check if all targets satisfy the predicate */
@inline final def all(p: A => Boolean)(s: S): Boolean =
foldMap(p(_).conjunction)(s).unwrap
/** modify polymorphically the target of a [[PTraversal]] with a function */
@inline final def modify(f: A => B): S => T =
modifyF[Id](f)
/** set polymorphically the target of a [[PTraversal]] with a value */
@inline final def set(b: B): S => T =
modify(_ => b)
/** join two [[PTraversal]] with the same target */
@inline final def choice[S1, T1](other: PTraversal[S1, T1, A, B]): PTraversal[S \\/ S1, T \\/ T1, A, B] =
new PTraversal[S \\/ S1, T \\/ T1, A, B]{
def modifyF[F[_]: Applicative](f: A => F[B])(s: S \\/ S1): F[T \\/ T1] =
s.fold(
s => Functor[F].map(self.modifyF(f)(s))(\\/.left),
s1 => Functor[F].map(other.modifyF(f)(s1))(\\/.right)
)
}
@deprecated("use choice", since = "1.2.0")
@inline final def sum[S1, T1](other: PTraversal[S1, T1, A, B]): PTraversal[S \\/ S1, T \\/ T1, A, B] =
choice(other)
/** calculate the number of targets */
@inline final def length(s: S): Int =
foldMap(_ => 1)(s)
/****************************************************************/
/** Compose methods between a [[PTraversal]] and another Optics */
/****************************************************************/
/** compose a [[PTraversal]] with a [[Fold]] */
@inline final def composeFold[C](other: Fold[A, C]): Fold[S, C] =
asFold composeFold other
/** compose a [[PTraversal]] with a [[Getter]] */
@inline final def composeGetter[C](other: Getter[A, C]): Fold[S, C] =
asFold composeGetter other
/** compose a [[PTraversal]] with a [[PSetter]] */
@inline final def composeSetter[C, D](other: PSetter[A, B, C, D]): PSetter[S, T, C, D] =
asSetter composeSetter other
/** compose a [[PTraversal]] with a [[PTraversal]] */
@inline final def composeTraversal[C, D](other: PTraversal[A, B, C, D]): PTraversal[S, T, C, D] =
new PTraversal[S, T, C, D] {
def modifyF[F[_]: Applicative](f: C => F[D])(s: S): F[T] =
self.modifyF(other.modifyF(f)(_))(s)
}
/** compose a [[PTraversal]] with a [[POptional]] */
@inline final def composeOptional[C, D](other: POptional[A, B, C, D]): PTraversal[S, T, C, D] =
composeTraversal(other.asTraversal)
/** compose a [[PTraversal]] with a [[PPrism]] */
@inline final def composePrism[C, D](other: PPrism[A, B, C, D]): PTraversal[S, T, C, D] =
composeTraversal(other.asTraversal)
/** compose a [[PTraversal]] with a [[PLens]] */
@inline final def composeLens[C, D](other: PLens[A, B, C, D]): PTraversal[S, T, C, D] =
composeTraversal(other.asTraversal)
/** compose a [[PTraversal]] with a [[PIso]] */
@inline final def composeIso[C, D](other: PIso[A, B, C, D]): PTraversal[S, T, C, D] =
composeTraversal(other.asTraversal)
/********************************************/
/** Experimental aliases of compose methods */
/********************************************/
/** alias to composeTraversal */
@inline final def ^|->>[C, D](other: PTraversal[A, B, C, D]): PTraversal[S, T, C, D] =
composeTraversal(other)
/** alias to composeOptional */
@inline final def ^|-?[C, D](other: POptional[A, B, C, D]): PTraversal[S, T, C, D] =
composeOptional(other)
/** alias to composePrism */
@inline final def ^<-?[C, D](other: PPrism[A, B, C, D]): PTraversal[S, T, C, D] =
composePrism(other)
/** alias to composeLens */
@inline final def ^|->[C, D](other: PLens[A, B, C, D]): PTraversal[S, T, C, D] =
composeLens(other)
/** alias to composeIso */
@inline final def ^<->[C, D](other: PIso[A, B, C, D]): PTraversal[S, T, C, D] =
composeIso(other)
/**********************************************************************/
/** Transformation methods to view a [[PTraversal]] as another Optics */
/**********************************************************************/
/** view a [[PTraversal]] as a [[Fold]] */
@inline final def asFold: Fold[S, A] =
new Fold[S, A]{
def foldMap[M: Monoid](f: A => M)(s: S): M =
self.foldMap(f)(s)
}
/** view a [[PTraversal]] as a [[PSetter]] */
@inline final def asSetter: PSetter[S, T, A, B] =
PSetter(modify)
}
object PTraversal extends TraversalInstances {
def id[S, T]: PTraversal[S, T, S, T] =
PIso.id[S, T].asTraversal
def codiagonal[S, T]: PTraversal[S \\/ S, T \\/ T, S, T] =
new PTraversal[S \\/ S, T \\/ T, S, T]{
def modifyF[F[_]: Applicative](f: S => F[T])(s: S \\/ S): F[T \\/ T] =
s.bimap(f,f).fold(Applicative[F].map(_)(\\/.left), Applicative[F].map(_)(\\/.right))
}
/** create a [[PTraversal]] from a Traverse */
def fromTraverse[T[_]: Traverse, A, B]: PTraversal[T[A], T[B], A, B] =
new PTraversal[T[A], T[B], A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: T[A]): F[T[B]] =
Traverse[T].traverse(s)(f)
}
def apply2[S, T, A, B](get1: S => A, get2: S => A)(_set: (B, B, S) => T): PTraversal[S, T, A, B] =
new PTraversal[S, T, A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T] =
Applicative[F].map2(f(get1(s)), f(get2(s)))(_set(_, _, s))
}
def apply3[S, T, A, B](get1: S => A, get2: S => A, get3: S => A)(_set: (B, B, B, S) => T): PTraversal[S, T, A, B] =
new PTraversal[S, T, A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T] =
Applicative[F].map3(f(get1(s)), f(get2(s)), f(get3(s)))(_set(_, _, _, s))
}
def apply4[S, T, A, B](get1: S => A, get2: S => A, get3: S => A, get4: S => A)(_set: (B, B, B, B, S) => T): PTraversal[S, T, A, B] =
new PTraversal[S, T, A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T] =
Applicative[F].map4(f(get1(s)), f(get2(s)), f(get3(s)), f(get4(s)))(_set(_, _, _, _, s))
}
def apply5[S, T, A, B](get1: S => A, get2: S => A, get3: S => A, get4: S => A, get5: S => A)(_set: (B, B, B, B, B, S) => T): PTraversal[S, T, A, B] =
new PTraversal[S, T, A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T] =
Applicative[F].map5(f(get1(s)), f(get2(s)), f(get3(s)), f(get4(s)), f(get5(s)))(_set(_, _, _, _, _, s))
}
def apply6[S, T, A, B](get1: S => A, get2: S => A, get3: S => A, get4: S => A, get5: S => A, get6: S => A)(_set: (B, B, B, B, B, B, S) => T): PTraversal[S, T, A, B] =
new PTraversal[S, T, A, B] {
def modifyF[F[_]: Applicative](f: A => F[B])(s: S): F[T] =
Applicative[F].map6(f(get1(s)), f(get2(s)), f(get3(s)), f(get4(s)), f(get5(s)), f(get6(s)))(_set(_, _, _, _, _, _, s))
}
}
object Traversal {
def id[A]: Traversal[A, A] =
Iso.id[A].asTraversal
def codiagonal[S, T]: Traversal[S \\/ S, S] =
PTraversal.codiagonal
/** [[Traversal]] that points to nothing */
def void[S, A]: Traversal[S, A] =
Optional.void.asTraversal
def apply2[S, A](get1: S => A, get2: S => A)(set: (A, A, S) => S): Traversal[S, A] =
PTraversal.apply2(get1, get2)(set)
def apply3[S, A](get1: S => A, get2: S => A, get3: S => A)(set: (A, A, A, S) => S): Traversal[S, A] =
PTraversal.apply3(get1, get2, get3)(set)
def apply4[S, A](get1: S => A, get2: S => A, get3: S => A, get4: S => A)(set: (A, A, A, A, S) => S): Traversal[S, A] =
PTraversal.apply4(get1, get2, get3, get4)(set)
def apply5[S, A](get1: S => A, get2: S => A, get3: S => A, get4: S => A, get5: S => A)(set: (A, A, A, A, A, S) => S): Traversal[S, A] =
PTraversal.apply5(get1, get2, get3, get4, get5)(set)
def apply6[S, A](get1: S => A, get2: S => A, get3: S => A, get4: S => A, get5: S => A, get6: S => A)(set: (A, A, A, A, A, A, S) => S): Traversal[S, A] =
PTraversal.apply6(get1, get2, get3, get4, get5, get6)(set)
/**
* Composes N lenses horizontally. Note that although it is possible to pass two or more lenses
* that point to the same `A`, in practice it considered an unsafe usage (see https://github.com/julien-truffaut/Monocle/issues/379#issuecomment-236374838).
*/
def applyN[S, A](xs: Lens[S, A]*): Traversal[S, A] = {
new PTraversal[S, S, A, A] {
def modifyF[F[_] : Applicative](f: A => F[A])(s: S): F[S] = {
xs.foldLeft(Applicative[F].pure(s))((fs, lens) =>
Applicative[F].apply2(f(lens.get(s)), fs)((a, s) => lens.set(a)(s))
)
}
}
}
}
sealed abstract class TraversalInstances {
implicit val traversalChoice: Choice[Traversal] = new Choice[Traversal] {
def compose[A, B, C](f: Traversal[B, C], g: Traversal[A, B]): Traversal[A, C] =
g composeTraversal f
def id[A]: Traversal[A, A] =
Traversal.id
@OnlyInCats
def choice[A, B, C](f1: Traversal[A, C], f2: Traversal[B, C]): Traversal[A \\/ B, C] =
f1 choice f2
@OnlyInScalaz
def choice[A, B, C](f1: => Traversal[A, C], f2: => Traversal[B, C]): Traversal[A \\/ B, C] =
f1 choice f2
}
}
| fkz/Monocle | core/shared/src/main/scala/monocle/Traversal.scala | Scala | mit | 11,149 |
package org.atnos.eff.syntax
import cats.data.{Ior, IorNel, NonEmptyList, ValidatedNel}
import org.atnos.eff._
import cats.{Applicative, Semigroup}
object validate extends validate
trait validate {
implicit class ValidateEffectOps[R, A](e: Eff[R, A]) {
def runNel[E](implicit m: Member[Validate[E, *], R]): Eff[m.Out, NonEmptyList[E] Either A] =
ValidateInterpretation.runNel(e)(m.aux)
def runMap[E, L : Semigroup](map: E => L)(implicit m: Member[Validate[E, *], R]): Eff[m.Out, L Either A] =
ValidateInterpretation.runMap(e)(map)(Semigroup[L], m.aux)
def runValidatedNel[E](implicit m: Member[Validate[E, *], R]): Eff[m.Out, ValidatedNel[E, A]] =
ValidateInterpretation.runValidatedNel(e)(m.aux)
def runIorMap[E, L : Semigroup](map: E => L)(implicit m: Member[Validate[E, *], R]): Eff[m.Out, L Ior A] =
ValidateInterpretation.runIorMap(e)(map)(Semigroup[L], m.aux)
def runIorNel[E](implicit m: Member[Validate[E, *], R]): Eff[m.Out, E IorNel A] =
ValidateInterpretation.runIorNel(e)(m.aux)
@deprecated("Use catchFirstWrong or more general catchWrongs instead", "5.4.2")
def catchWrong[E](handle: E => Eff[R, A])(implicit m: Member[Validate[E, *], R]): Eff[R, A] =
ValidateInterpretation.catchWrong(e)(handle)
def catchWrongs[E, S[_]: Applicative](handle: S[E] => Eff[R, A])(implicit m: Member[Validate[E, *], R], semi: Semigroup[S[E]]): Eff[R, A] =
ValidateInterpretation.catchWrongs(e)(handle)
def catchFirstWrong[E](handle: E => Eff[R, A])(implicit m: Member[Validate[E, *], R]): Eff[R, A] =
ValidateInterpretation.catchFirstWrong(e)(handle)
def catchLastWrong[E](handle: E => Eff[R, A])(implicit m: Member[Validate[E, *], R]): Eff[R, A] =
ValidateInterpretation.catchLastWrong(e)(handle)
def catchAllWrongs[E](handle: NonEmptyList[E] => Eff[R, A])(implicit m: Member[Validate[E, *], R]): Eff[R, A] =
ValidateInterpretation.catchAllWrongs(e)(handle)
}
}
| etorreborre/eff-cats | shared/src/main/scala/org/atnos/eff/syntax/validate.scala | Scala | mit | 1,980 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command.partition
import java.util
import scala.collection.JavaConverters._
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.execution.command.{AlterTableAddPartitionCommand, AlterTableDropPartitionCommand, AtomicRunnableCommand}
import org.apache.spark.sql.optimizer.CarbonFilters
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.indexstore.PartitionSpec
import org.apache.carbondata.core.metadata.SegmentFileStore
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.statusmanager.SegmentStatus
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.{OperationContext, OperationListenerBus, PostAlterTableHivePartitionCommandEvent, PreAlterTableHivePartitionCommandEvent}
import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
import org.apache.carbondata.processing.util.CarbonLoaderUtil
/**
* Adding the partition to the hive and create a new segment if the location has data.
*
*/
case class CarbonAlterTableAddHivePartitionCommand(
tableName: TableIdentifier,
partitionSpecsAndLocs: Seq[(TablePartitionSpec, Option[String])],
ifNotExists: Boolean)
extends AtomicRunnableCommand {
var partitionSpecsAndLocsTobeAdded : util.List[PartitionSpec] = _
var table: CarbonTable = _
override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
table = CarbonEnv.getCarbonTable(tableName)(sparkSession)
if (table.isHivePartitionTable) {
if (table.isChildDataMap) {
throw new UnsupportedOperationException("Cannot add partition directly on aggregate tables")
}
val partitionWithLoc = partitionSpecsAndLocs.filter(_._2.isDefined)
if (partitionWithLoc.nonEmpty) {
val partitionSpecs = partitionWithLoc.map{ case (part, location) =>
new PartitionSpec(
new util.ArrayList(part.map(p => p._1 + "=" + p._2).toList.asJava),
location.get)
}
// Get all the partitions which are not already present in hive.
val currParts = CarbonFilters.getCurrentPartitions(sparkSession, tableName).get
partitionSpecsAndLocsTobeAdded =
new util.ArrayList(partitionSpecs.filterNot { part =>
currParts.exists(p => part.equals(p))
}.asJava)
}
val operationContext = new OperationContext
val preAlterTableHivePartitionCommandEvent = PreAlterTableHivePartitionCommandEvent(
sparkSession,
table)
OperationListenerBus.getInstance()
.fireEvent(preAlterTableHivePartitionCommandEvent, operationContext)
AlterTableAddPartitionCommand(tableName, partitionSpecsAndLocs, ifNotExists).run(sparkSession)
val postAlterTableHivePartitionCommandEvent = PostAlterTableHivePartitionCommandEvent(
sparkSession,
table)
OperationListenerBus.getInstance()
.fireEvent(postAlterTableHivePartitionCommandEvent, operationContext)
} else {
throw new UnsupportedOperationException(
"Cannot add partition directly on non partitioned table")
}
Seq.empty[Row]
}
override def undoMetadata(sparkSession: SparkSession, exception: Exception): Seq[Row] = {
AlterTableDropPartitionCommand(
tableName,
partitionSpecsAndLocs.map(_._1),
ifExists = true,
purge = false,
retainData = true).run(sparkSession)
val msg = s"Got exception $exception when processing data of add partition." +
"Dropping partitions to the metadata"
LogServiceFactory.getLogService(this.getClass.getCanonicalName).error(msg)
Seq.empty[Row]
}
override def processData(sparkSession: SparkSession): Seq[Row] = {
// Partitions with physical data should be registered to as a new segment.
if (partitionSpecsAndLocsTobeAdded != null && partitionSpecsAndLocsTobeAdded.size() > 0) {
val segmentFile = SegmentFileStore.getSegmentFileForPhysicalDataPartitions(table.getTablePath,
partitionSpecsAndLocsTobeAdded)
if (segmentFile != null) {
val indexToSchemas = SegmentFileStore.getSchemaFiles(segmentFile, table.getTablePath)
val tableColums = table.getTableInfo.getFactTable.getListOfColumns.asScala
var isSameSchema = indexToSchemas.asScala.exists{ case(key, columnSchemas) =>
columnSchemas.asScala.exists { col =>
tableColums.exists(p => p.getColumnUniqueId.equals(col.getColumnUniqueId))
} && columnSchemas.size() == tableColums.length
}
if (!isSameSchema) {
throw new UnsupportedOperationException(
"Schema of index files located in location is not matching with current table schema")
}
val loadModel = new CarbonLoadModel
loadModel.setCarbonTransactionalTable(true)
loadModel.setCarbonDataLoadSchema(new CarbonDataLoadSchema(table))
// Create new entry in tablestatus file
CarbonLoaderUtil.readAndUpdateLoadProgressInTableMeta(loadModel, false)
val newMetaEntry = loadModel.getCurrentLoadMetadataDetail
val segmentFileName =
SegmentFileStore.genSegmentFileName(
loadModel.getSegmentId, String.valueOf(loadModel.getFactTimeStamp)) +
CarbonTablePath.SEGMENT_EXT
newMetaEntry.setSegmentFile(segmentFileName)
val segmentsLoc = CarbonTablePath.getSegmentFilesLocation(table.getTablePath)
CarbonUtil.checkAndCreateFolderWithPermission(segmentsLoc)
val segmentPath = segmentsLoc + CarbonCommonConstants.FILE_SEPARATOR + segmentFileName
SegmentFileStore.writeSegmentFile(segmentFile, segmentPath)
CarbonLoaderUtil.populateNewLoadMetaEntry(
newMetaEntry,
SegmentStatus.SUCCESS,
loadModel.getFactTimeStamp,
true)
// Add size to the entry
CarbonLoaderUtil.addDataIndexSizeIntoMetaEntry(newMetaEntry, loadModel.getSegmentId, table)
// Make the load as success in table status
CarbonLoaderUtil.recordNewLoadMetadata(newMetaEntry, loadModel, false, false)
}
}
Seq.empty[Row]
}
}
| jatin9896/incubator-carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/partition/CarbonAlterTableAddHivePartitionCommand.scala | Scala | apache-2.0 | 7,286 |
package com.clemble.query
import reactivemongo.api.MongoDriver
import reactivemongo.api.collections.bson.BSONCollection
import reactivemongo.bson.{BSONDocumentWriter, BSONDocumentReader, BSONDocument}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Await
/**
* Mongo BSON SearchableRepository spec
*/
class MongoBSONSearchableRepositorySpec extends SearchableRepositorySpec {
implicit val format: BSONDocumentReader[Employee] with BSONDocumentWriter[Employee] = new BSONDocumentReader[Employee] with BSONDocumentWriter[Employee] {
override def write(t: Employee): BSONDocument = BSONDocument(
"name" -> t.name,
"salary" -> t.salary
)
override def read(bson: BSONDocument): Employee =
Employee(
bson.getAs[String]("name").get,
bson.getAs[Int]("salary").get
)
}
override val repo: MongoBSONSearchableRepository[Employee] with ProjectionSupport = new MongoBSONSearchableRepository[Employee] with MongoBSONProjectionSupport[Employee] {
override val collection: BSONCollection = {
SpecificationConstants.db.collection[BSONCollection]("employee_bson")
}
override val queryTranslator: QueryTranslator[BSONDocument, BSONDocument] = new MongoBSONQueryTranslator()
override implicit val f: BSONDocumentReader[Employee] = format
}
override def save(employee: Employee): Boolean = {
val fSave = repo.collection.update(BSONDocument("_id" -> employee.name), format.write(employee), upsert = true)
Await.result(fSave, 1 minute).errmsg.isEmpty
}
override def remove(employee: Employee): Boolean = {
val fRemove = repo.collection.remove(BSONDocument("name" -> employee.name))
Await.result(fRemove, 1 minute).errmsg.isEmpty
}
}
| clemble/scala-query-dsl | src/test/scala/com/clemble/query/MongoBSONSearchableRepositorySpec.scala | Scala | apache-2.0 | 1,801 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.checkpoint.file
import java.io.File
import scala.collection.JavaConversions._
import java.util.Random
import org.junit.Assert._
import org.junit.{After, Before, Test}
import org.apache.samza.SamzaException
import org.apache.samza.Partition
import org.apache.samza.checkpoint.Checkpoint
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.container.TaskName
import org.junit.rules.TemporaryFolder
class TestFileSystemCheckpointManager {
val checkpointRoot = System.getProperty("java.io.tmpdir") // TODO: Move this out of tmp, into our build dir
val taskName = new TaskName("Warwickshire")
val baseFileLocation = new File(checkpointRoot)
val tempFolder = new TemporaryFolder
@Before
def createTempFolder = tempFolder.create()
@After
def deleteTempFolder = tempFolder.delete()
@Test
def testReadForCheckpointFileThatDoesNotExistShouldReturnNull {
val cpm = new FileSystemCheckpointManager("some-job-name", tempFolder.getRoot)
assertNull(cpm.readLastCheckpoint(taskName))
}
@Test
def testReadForCheckpointFileThatDoesExistShouldReturnProperCheckpoint {
val cp = new Checkpoint(Map(
new SystemStreamPartition("a", "b", new Partition(0)) -> "c",
new SystemStreamPartition("a", "c", new Partition(1)) -> "d",
new SystemStreamPartition("b", "d", new Partition(2)) -> "e"))
var readCp:Checkpoint = null
val cpm = new FileSystemCheckpointManager("some-job-name", tempFolder.getRoot)
cpm.start
cpm.writeCheckpoint(taskName, cp)
readCp = cpm.readLastCheckpoint(taskName)
cpm.stop
assertNotNull(readCp)
cp.equals(readCp)
assertEquals(cp.getOffsets.keySet(), readCp.getOffsets.keySet())
assertEquals(cp.getOffsets, readCp.getOffsets)
assertEquals(cp, readCp)
}
@Test
def testMissingRootDirectoryShouldFailOnManagerCreation {
val cpm = new FileSystemCheckpointManager("some-job-name", new File(checkpointRoot + new Random().nextInt))
try {
cpm.start
fail("Expected an exception since root directory for fs checkpoint manager doesn't exist.")
} catch {
case e: SamzaException => None // this is expected
}
cpm.stop
}
} | vjagadish/samza-clone | samza-core/src/test/scala/org/apache/samza/checkpoint/file/TestFileSystemCheckpointManager.scala | Scala | apache-2.0 | 3,020 |
/**
* Copyright (C) 2014-2015 Really Inc. <http://really.io>
*/
/** Inspired by Snowflake Twitter */
package io.really.quickSand
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
class QuickSand(workerId: Long, datacenterId: Long, reallyEpoch: Long) {
def logger = LoggerFactory.getLogger("Quicksand")
logger.debug("QuickSand Started, workerId={} and datacenterId={}", workerId, datacenterId)
private[this] val workerIdBits = 5L
private[this] val datacenterIdBits = 5L
private[this] val maxWorkerId = -1L ^ (-1L << workerIdBits)
private[this] val maxDatacenterId = -1L ^ (-1L << datacenterIdBits)
private[this] val sequenceBits = 12L
private[this] val workerIdShift = sequenceBits
private[this] val datacenterIdShift = sequenceBits + workerIdBits
private[this] val timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits
private[this] val sequenceMask = -1L ^ (-1L << sequenceBits)
private[this] var sequence = 0l
private[this] var lastTimestamp = -1l
if (workerId > maxWorkerId || workerId < 0) {
throw new IllegalArgumentException("worker Id can't be greater than %d or less than 0".format(maxWorkerId))
}
if (datacenterId > maxDatacenterId || datacenterId < 0) {
throw new IllegalArgumentException("datacenter Id can't be greater than %d or less than 0".format(maxDatacenterId))
}
/**
* Might throw [[InvalidSystemClock]] Exception if the clock is skewed, beware.
* @return
*/
def nextId(): Long = {
synchronized {
var timestamp = DateTime.now.getMillis
if (timestamp < lastTimestamp) {
logger.error("clock is moving backwards. Rejecting requests until %d.", lastTimestamp)
throw new InvalidSystemClock("Clock moved backwards. Refusing to generate id for %d milliseconds".format(
lastTimestamp - timestamp
))
}
if (lastTimestamp == timestamp) {
sequence = (sequence + 1) & sequenceMask
if (sequence == 0) {
timestamp = tilNextMillis(lastTimestamp)
}
} else {
sequence = 0
}
lastTimestamp = timestamp
((timestamp - reallyEpoch) << timestampLeftShift) |
(datacenterId << datacenterIdShift) |
(workerId << workerIdShift) |
sequence
}
}
protected def tilNextMillis(lastTimestamp: Long): Long = {
var timestamp = DateTime.now.getMillis
while (timestamp <= lastTimestamp) {
timestamp = DateTime.now.getMillis
}
timestamp
}
}
class InvalidSystemClock(message: String) extends Exception(message)
| reallylabs/really | modules/really-utils/src/main/scala/io/really/quickSand/QuickSand.scala | Scala | apache-2.0 | 2,574 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import scala.math.Numeric._
import scala.math.Ordering
import org.apache.spark.sql.catalyst.util.{MathUtils, SQLOrderingUtil}
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.types.Decimal.DecimalIsConflicted
private[sql] object ByteExactNumeric extends ByteIsIntegral with Ordering.ByteOrdering {
private def checkOverflow(res: Int, x: Byte, y: Byte, op: String): Unit = {
if (res > Byte.MaxValue || res < Byte.MinValue) {
throw QueryExecutionErrors.binaryArithmeticCauseOverflowError(x, op, y)
}
}
override def plus(x: Byte, y: Byte): Byte = {
val tmp = x + y
checkOverflow(tmp, x, y, "+")
tmp.toByte
}
override def minus(x: Byte, y: Byte): Byte = {
val tmp = x - y
checkOverflow(tmp, x, y, "-")
tmp.toByte
}
override def times(x: Byte, y: Byte): Byte = {
val tmp = x * y
checkOverflow(tmp, x, y, "*")
tmp.toByte
}
override def negate(x: Byte): Byte = {
if (x == Byte.MinValue) { // if and only if x is Byte.MinValue, overflow can happen
throw QueryExecutionErrors.unaryMinusCauseOverflowError(x)
}
(-x).toByte
}
}
private[sql] object ShortExactNumeric extends ShortIsIntegral with Ordering.ShortOrdering {
private def checkOverflow(res: Int, x: Short, y: Short, op: String): Unit = {
if (res > Short.MaxValue || res < Short.MinValue) {
throw QueryExecutionErrors.binaryArithmeticCauseOverflowError(x, op, y)
}
}
override def plus(x: Short, y: Short): Short = {
val tmp = x + y
checkOverflow(tmp, x, y, "+")
tmp.toShort
}
override def minus(x: Short, y: Short): Short = {
val tmp = x - y
checkOverflow(tmp, x, y, "-")
tmp.toShort
}
override def times(x: Short, y: Short): Short = {
val tmp = x * y
checkOverflow(tmp, x, y, "*")
tmp.toShort
}
override def negate(x: Short): Short = {
if (x == Short.MinValue) { // if and only if x is Byte.MinValue, overflow can happen
throw QueryExecutionErrors.unaryMinusCauseOverflowError(x)
}
(-x).toShort
}
}
private[sql] object IntegerExactNumeric extends IntIsIntegral with Ordering.IntOrdering {
override def plus(x: Int, y: Int): Int = MathUtils.addExact(x, y)
override def minus(x: Int, y: Int): Int = MathUtils.subtractExact(x, y)
override def times(x: Int, y: Int): Int = MathUtils.multiplyExact(x, y)
override def negate(x: Int): Int = MathUtils.negateExact(x)
}
private[sql] object LongExactNumeric extends LongIsIntegral with Ordering.LongOrdering {
override def plus(x: Long, y: Long): Long = MathUtils.addExact(x, y)
override def minus(x: Long, y: Long): Long = MathUtils.subtractExact(x, y)
override def times(x: Long, y: Long): Long = MathUtils.multiplyExact(x, y)
override def negate(x: Long): Long = MathUtils.negateExact(x)
override def toInt(x: Long): Int =
if (x == x.toInt) {
x.toInt
} else {
throw QueryExecutionErrors.castingCauseOverflowError(x, "int")
}
}
private[sql] object FloatExactNumeric extends FloatIsFractional {
private val intUpperBound = Int.MaxValue
private val intLowerBound = Int.MinValue
private val longUpperBound = Long.MaxValue
private val longLowerBound = Long.MinValue
override def toInt(x: Float): Int = {
// When casting floating values to integral types, Spark uses the method `Numeric.toInt`
// Or `Numeric.toLong` directly. For positive floating values, it is equivalent to `Math.floor`;
// for negative floating values, it is equivalent to `Math.ceil`.
// So, we can use the condition `Math.floor(x) <= upperBound && Math.ceil(x) >= lowerBound`
// to check if the floating value x is in the range of an integral type after rounding.
// This condition applies to converting Float/Double value to any integral types.
if (Math.floor(x) <= intUpperBound && Math.ceil(x) >= intLowerBound) {
x.toInt
} else {
throw QueryExecutionErrors.castingCauseOverflowError(x, "int")
}
}
override def toLong(x: Float): Long = {
if (Math.floor(x) <= longUpperBound && Math.ceil(x) >= longLowerBound) {
x.toLong
} else {
throw QueryExecutionErrors.castingCauseOverflowError(x, "int")
}
}
override def compare(x: Float, y: Float): Int = SQLOrderingUtil.compareFloats(x, y)
}
private[sql] object DoubleExactNumeric extends DoubleIsFractional {
private val intUpperBound = Int.MaxValue
private val intLowerBound = Int.MinValue
private val longUpperBound = Long.MaxValue
private val longLowerBound = Long.MinValue
override def toInt(x: Double): Int = {
if (Math.floor(x) <= intUpperBound && Math.ceil(x) >= intLowerBound) {
x.toInt
} else {
throw QueryExecutionErrors.castingCauseOverflowError(x, "int")
}
}
override def toLong(x: Double): Long = {
if (Math.floor(x) <= longUpperBound && Math.ceil(x) >= longLowerBound) {
x.toLong
} else {
throw QueryExecutionErrors.castingCauseOverflowError(x, "long")
}
}
override def compare(x: Double, y: Double): Int = SQLOrderingUtil.compareDoubles(x, y)
}
private[sql] object DecimalExactNumeric extends DecimalIsConflicted {
override def toInt(x: Decimal): Int = x.roundToInt()
override def toLong(x: Decimal): Long = x.roundToLong()
}
| shaneknapp/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/numerics.scala | Scala | apache-2.0 | 6,127 |
package com.psyanite.scorm.node
import scala.xml.NodeSeq
case class Metadata (
var schema: Option[String],
var schemaVersion: Option[String],
var scheme: Option[String]
)
object Metadata extends BaseNode {
def apply(name: NodeSeq, version: NodeSeq, scheme: NodeSeq): Metadata = {
new Metadata(getText(name), getText(version), getText(scheme))
}
}
| psyanite/scorm-parser | src/main/scala/com/psyanite/scorm/node/Metadata.scala | Scala | apache-2.0 | 393 |
/**
* Copyright 2015 Vaishaal Shankar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.akmorrow13.endive.pipelines
import breeze.linalg._
import net.akmorrow13.endive.EndiveConf
import net.akmorrow13.endive.processing.Sequence
import net.akmorrow13.endive.utils._
import nodes.learning._
import nodes.nlp._
import nodes.stats.TermFrequency
import nodes.util.CommonSparseFeatures
import nodes.util.{Identity, Cacher, ClassLabelIndicatorsFromIntLabels, TopKClassifier, MaxClassifier, VectorCombiner}
import utils.{Image, MatrixUtils, Stats, ImageMetadata, LabeledImage, RowMajorArrayVectorizedImage, ChannelMajorArrayVectorizedImage}
import com.github.fommil.netlib.BLAS
import evaluation.BinaryClassifierEvaluator
import org.apache.log4j.{Level, Logger}
import org.apache.parquet.filter2.dsl.Dsl.{BinaryColumn, _}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro._
import org.kohsuke.args4j.{Option => Args4jOption}
import org.yaml.snakeyaml.constructor.Constructor
import org.yaml.snakeyaml.Yaml
import pipelines.Logging
object BlasTest extends Serializable with Logging {
/**
* Just print version of blas on master and executors
*
*
* @param args
*/
def main(args: Array[String]) = {
val conf = new SparkConf().setAppName("BlasTest")
conf.setIfMissing("spark.master", "local[4]")
val sc = new SparkContext(conf)
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
val blasVersion = BLAS.getInstance().getClass().getName()
println(s"Currently used version of blas (in driver) is ${blasVersion}")
val blasVersionSlaves = sc.parallelize((0 until 100)).map { x => BLAS.getInstance().getClass().getName() }.collect().toSet.mkString(",")
println(s"Currently used version of blas (in slaves) is ${blasVersionSlaves}")
}
}
| akmorrow13/endive | src/main/scala/net/akmorrow13/endive/pipelines/BlasTest.scala | Scala | apache-2.0 | 2,581 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.xml
/** A hack to group XML nodes in one node for output.
*
* @author Burak Emir
* @version 1.0
*/
@serializable
final case class Group(val nodes: Seq[Node]) extends Node {
override def theSeq = nodes
override def canEqual(other: Any) = other match {
case x: Group => true
case _ => false
}
override def strict_==(other: Equality) = other match {
case Group(xs) => nodes sameElements xs
case _ => false
}
override def basisForHashCode = nodes
/** Since Group is very much a hack it throws an exception if you
* try to do anything with it.
*/
private def fail(msg: String) = throw new UnsupportedOperationException("class Group does not support method '%s'" format msg)
def label = fail("label")
override def attributes = fail("attributes")
override def namespace = fail("namespace")
override def child = fail("child")
def buildString(sb: StringBuilder) = fail("toString(StringBuilder)")
}
| cran/rkafkajars | java/scala/xml/Group.scala | Scala | apache-2.0 | 1,582 |
/*
Copyright (c) 2009-2012, The Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California nor the names of
its contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.cdlib.was.weari;
import org.apache.solr.common.SolrInputDocument;
import org.cdlib.was.weari.MediaTypeGroup.groupWrapper;
import org.cdlib.was.weari.SolrFields._;
import org.cdlib.was.weari.SolrUtils.addField;
object ParsedArchiveRecordSolrizer {
private def getContent (rec : ParsedArchiveRecord) : Option[String] =
if (shouldIndexContent(rec.suppliedContentType) ||
shouldIndexContent(rec.detectedContentType.getOrElse(ContentType.DEFAULT)))
rec.content
else None;
private def shouldIndexContent (contentType : ContentType) : Boolean = {
/* Right now we index everything except audio, video, image, js, & css */
contentType.top match {
case "audio" | "video" | "image" => false;
case "text" => contentType.sub match {
case "javascript" | "css" => false;
case _ => true;
}
case "application" => contentType.sub match {
case "zip" => false;
case _ => true;
}
}
}
def convert (rec : ParsedArchiveRecord) : SolrInputDocument = {
val doc = new SolrInputDocument;
/* set the fields */
val detected = rec.detectedContentType;
val supplied = rec.suppliedContentType;
val boost = 1.0f;
addField(doc, ARCNAME_FIELD, rec.getFilename);
addField(doc, ID_FIELD, "%s.%s".format(rec.canonicalUrl, rec.getDigest.getOrElse("-")));
addField(doc, HOST_FIELD, rec.canonicalHost);
addField(doc, CANONICALURL_FIELD, rec.canonicalUrl);
addField(doc, URL_FIELD, rec.getUrl);
addField(doc, URLFP_FIELD, rec.urlFingerprint);
addField(doc, DIGEST_FIELD, rec.getDigest);
addField(doc, DATE_FIELD, rec.getDate.toDate);
addField(doc, TITLE_FIELD, rec.title);
addField(doc, CONTENT_LENGTH_FIELD, rec.getLength);
addField(doc, CONTENT_FIELD, getContent(rec));
addField(doc, MEDIA_TYPE_GROUP_DET_FIELD, detected.flatMap(_.mediaTypeGroup));
addField(doc, MEDIA_TYPE_SUP_FIELD, supplied.mediaType);
addField(doc, CHARSET_SUP_FIELD, supplied.charset);
addField(doc, MEDIA_TYPE_DET_FIELD, detected.map(_.mediaType));
addField(doc, CHARSET_DET_FIELD, detected.flatMap(_.charset));
doc.setDocumentBoost(boost);
return doc;
}
}
| cdlib/weari | src/main/scala/org/cdlib/was/weari/ParsedArchiveRecordSolrizer.scala | Scala | bsd-3-clause | 3,748 |
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import akka.actor.Actor
import akka.actor.Props
import akka.actor.actorRef2Scala
import org.apache.spark.SparkConf
import org.apache.spark.SparkEnv
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.ActorHelper
import org.apache.spark.rdd.PairRDDFunctions
import org.apache.spark.streaming.dstream.PairDStreamFunctions
import org.apache.spark.streaming._
class HelloerMv extends Actor with ActorHelper {
override def preStart() = {
println("")
println("=== HelloerMv is starting up ===")
println(s"=== path=${context.self.path} ===")
println("")
}
def receive = {
case (k: String, v: BigDecimal) =>
store((k, v))
}
}
class HelloerAdj extends Actor with ActorHelper {
override def preStart() = {
println("")
println("=== HelloerAdj is starting up ===")
println(s"=== path=${context.self.path} ===")
println("")
}
def receive = {
case (k: String, v: BigDecimal) =>
store((k, v))
}
}
object StreamingApp2 {
def main(args: Array[String]) {
// Configuration for a Spark application.
// Used to set various Spark parameters as key-value pairs.
val driverPort = 7777
val driverHost = "localhost"
val conf = new SparkConf(false) // skip loading external settings
.setMaster("local[*]") // run locally with enough threads
.setAppName("Spark Streaming with Scala and Akka") // name in Spark web UI
.set("spark.logConf", "true")
.set("spark.driver.port", s"$driverPort")
.set("spark.driver.host", s"$driverHost")
.set("spark.akka.logLifecycleEvents", "true")
val ssc = new StreamingContext(conf, Seconds(1))
ssc.checkpoint(".")
val actorNameMv = "helloerMv"
val actorStreamMv: ReceiverInputDStream[(String, BigDecimal)] = ssc.actorStream[(String, BigDecimal)](Props[HelloerMv], actorNameMv)
// actorStreamMv.print()
val actorNameAdj = "helloerAdj"
val actorStreamAdj: ReceiverInputDStream[(String, BigDecimal)] = ssc.actorStream[(String, BigDecimal)](Props[HelloerAdj], actorNameAdj)
// actorStreamAdj.print()
val actorStreamAll = new PairDStreamFunctions(actorStreamMv).cogroup(actorStreamAdj)
val actorStreamLast = new PairDStreamFunctions(actorStreamAll).mapValues(x => (x._1.lastOption, x._2.lastOption))
def updateFunc(newValuePairs: Seq[(Option[BigDecimal], Option[BigDecimal])], state: Option[(BigDecimal, BigDecimal)]): Option[(BigDecimal, BigDecimal)] = {
val currentState = state match {
case Some(v) => v
case None => (BigDecimal(0), BigDecimal(0))
}
val newState = newValuePairs match {
case xs :+ x =>
x match {
case (Some(v), None) => Some(v, currentState._2)
case (None, Some(v)) => Some(currentState._1, v)
case (None, None) => state
case (Some(v1), Some(v2)) => Some(v1, v2)
}
case _ => state
}
newState
}
val actorStreamSum = new PairDStreamFunctions(actorStreamLast).updateStateByKey[(BigDecimal, BigDecimal)](updateFunc _)
//actorStreamLast.print()
actorStreamSum.map(x => (x._1, x._2._1 + x._2._2)).print()
ssc.start()
Thread.sleep(3 * 1000) // wish I knew a better way to handle the asynchrony
import scala.concurrent.duration._
val actorSystem = SparkEnv.get.actorSystem
val urlMv = s"akka.tcp://spark@$driverHost:$driverPort/user/Supervisor0/$actorNameMv"
val urlAdj = s"akka.tcp://spark@$driverHost:$driverPort/user/Supervisor1/$actorNameAdj"
val timeout = 5 seconds
val helloerMv = Await.result(actorSystem.actorSelection(urlMv).resolveOne(timeout), timeout)
val helloerAdj = Await.result(actorSystem.actorSelection(urlAdj).resolveOne(timeout), timeout)
helloerMv ! ("C123", BigDecimal(100))
helloerAdj ! ("C123", BigDecimal(10))
helloerMv ! ("C123", BigDecimal(120))
helloerAdj ! ("C123", BigDecimal(20))
helloerAdj ! ("C123", BigDecimal(-10))
val stopSparkContext = true
val stopGracefully = true
ssc.stop(stopSparkContext, stopGracefully)
}
} | charlesxucheng/spark-streaming-scala | src/main/scala/StreamingApp2.scala | Scala | apache-2.0 | 4,338 |
package se.citerus.dddsample.application.impl
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory;
import se.citerus.dddsample.domain.model.cargo._;
import org.springframework.transaction.annotation.Transactional;
import se.citerus.dddsample.application.BookingService;
import se.citerus.dddsample.domain.model.location.Location;
import se.citerus.dddsample.domain.model.location.LocationRepository;
import se.citerus.dddsample.domain.model.location.UnLocode;
import se.citerus.dddsample.domain.service.RoutingService;
import java.util.Date;
class BookingServiceImpl(cargoRepository:CargoRepository,
locationRepository:LocationRepository,
routingService:RoutingService) extends BookingService {
private val logger = LogFactory.getLog(getClass());
@Transactional
def bookNewCargo(originUnLocode:UnLocode,
destinationUnLocode:UnLocode,
arrivalDeadline:Date ) : TrackingId = {
val trackingId = cargoRepository.nextTrackingId();
val origin = locationRepository.find(originUnLocode).getOrElse { throw new IllegalArgumentException("origin not found") }
val destination = locationRepository.find(destinationUnLocode).getOrElse { throw new IllegalArgumentException("destination not found") };
val routeSpecification = new RouteSpecification(origin, destination, arrivalDeadline);
val cargo = new Cargo(trackingId, routeSpecification);
cargoRepository.store(cargo);
logger.info("Booked new cargo with tracking id " + cargo.trackingId.idString);
return cargo.trackingId;
}
@Transactional
def requestPossibleRoutesForCargo(trackingId:TrackingId) : List[Itinerary] = {
var cargo = cargoRepository.find(trackingId).getOrElse { return List() };
routingService.fetchRoutesForSpecification(cargo.routeSpecification);
}
@Transactional
def assignCargoToRoute(itinerary:Itinerary, trackingId:TrackingId) {
val cargo = cargoRepository.find(trackingId).getOrElse { throw new IllegalArgumentException("Can't assign itinerary to non-existing cargo " + trackingId) }
cargo.assignToRoute(itinerary)
cargoRepository.store(cargo)
logger.info("Assigned cargo " + trackingId + " to new route");
}
@Transactional
def changeDestination(trackingId:TrackingId, unLocode:UnLocode) {
val cargo = cargoRepository.find(trackingId).getOrElse { throw new IllegalArgumentException("Can't find cargo with tracking id = " + trackingId) }
val newDestination = locationRepository.find(unLocode).getOrElse { throw new IllegalArgumentException("location not found") };
val routeSpecification = new RouteSpecification(cargo.origin, newDestination, cargo.routeSpecification.arrivalDeadline);
cargo.specifyNewRoute(routeSpecification);
cargoRepository.store(cargo);
logger.info("Changed destination for cargo " + trackingId + " to " + routeSpecification.destination);
}
} | oluies/ddd-sample-scala | src/main/scala/se/citerus/dddsample/application/impl/BookingServiceImpl.scala | Scala | mit | 3,034 |
/***
* Copyright 2017 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker
import com.rackspace.com.papi.components.checker.RunAssertionsHandler._
import com.rackspace.com.papi.components.checker.servlet._
import com.rackspace.com.papi.components.checker.step.results.Result
import com.rackspace.cloud.api.wadl.Converters._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.collection.JavaConversions._
import scala.xml.Elem
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.JsonNode
@RunWith(classOf[JUnitRunner])
class ValidatorWADLAssertSuite extends BaseValidatorSuite {
///
/// Configs
///
val baseConfig = {
val c = TestConfig()
c.xpathVersion = 31
c.removeDups = false
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.enableAssertExtension = false
c
}
val baseWithAssert = {
val c = TestConfig()
c.xpathVersion = 31
c.removeDups = false
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c
}
val baseWithAssertRemoveDups = {
val c = TestConfig()
c.xpathVersion = 31
c.removeDups = true
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c
}
val baseWithAssertRemoveDupsMethodLabels = {
val c = TestConfig()
c.xpathVersion = 31
c.removeDups = true
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.preserveMethodLabels = true
c
}
val baseWithAssertParamDefaults = {
val c = TestConfig()
c.removeDups = false
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = true
c.checkHeaders = true
c.enableAssertExtension = true
c
}
val baseWithAssertParamDefaultsRemoveDups = {
val c = TestConfig()
c.removeDups = true
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = true
c.checkHeaders = true
c.enableAssertExtension = true
c
}
val baseWithAssertParamDefaultsRemoveDupsMethodLabels = {
val c = TestConfig()
c.removeDups = true
c.checkWellFormed = false
c.checkPlainParams = false
c.enableCaptureHeaderExtension = false
c.setParamDefaults = true
c.checkHeaders = true
c.enableAssertExtension = true
c.preserveMethodLabels = true
c
}
val baseAssertWithPlainParams = {
val c = TestConfig()
c.removeDups = false
c.checkWellFormed = true
c.checkPlainParams = true
c.checkElements = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c
}
val baseAssertWithJoinXPaths = {
val c = TestConfig()
c.removeDups = false
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.checkElements = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c
}
val baseAssertWithJoinXPathsMethodLabels = {
val c = TestConfig()
c.removeDups = false
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.checkElements = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.preserveMethodLabels = true
c
}
val baseAssertWithJoinXPathsAndRemoveDups = {
val c = TestConfig()
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.checkElements = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c
}
val baseAssertWithJoinXPathsAndRemoveDupsMethodLabels = {
val c = TestConfig()
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.checkElements = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.preserveMethodLabels = true
c
}
// RaxRoles Configs
val baseWithPlainParamsRaxRoles = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = false
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithRemoveDupsRaxRoles = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithRemoveDupsRaxRolesMethodLabels = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c.preserveMethodLabels = true
c
}
val baseWithJoinXPathsRaxRoles = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = false
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithJoinXPathsAndRemoveDupsRaxRoles = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithJoinXPathsAndRemoveDupsRaxRolesMethodLabels = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c.preserveMethodLabels = true
c
}
// RaxRoles Configs Masked
val baseWithPlainParamsRaxRolesMask = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = false
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithRemoveDupsRaxRolesMask = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithRemoveDupsRaxRolesMaskMethodLabels = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c.preserveMethodLabels = true
c
}
val baseWithJoinXPathsRaxRolesMask = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = false
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithJoinXPathsAndRemoveDupsRaxRolesMask = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c
}
val baseWithJoinXPathsAndRemoveDupsRaxRolesMaskMethodLabels = {
val c = TestConfig()
c.enableRaxRolesExtension = true
c.maskRaxRoles403 = true
c.removeDups = true
c.joinXPathChecks = true
c.checkWellFormed = true
c.checkPlainParams = true
c.enableCaptureHeaderExtension = false
c.setParamDefaults = false
c.enableAssertExtension = true
c.checkElements = true
c.preserveMethodLabels = true
c
}
val WADL_withAsserts = <application xmlns="http://wadl.dev.java.net/2009/02"
xmlns:rax="http://docs.rackspace.com/api"
xmlns:req="http://www.rackspace.com/repose/wadl/checker/request"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:tst="test.org"
xmlns:tst2="http://www.rackspace.com/repose/wadl/checker/step/test">
<resources base="https://test.api.openstack.com">
<resource path="/a" rax:roles="user">
<method name="PUT">
<request>
<representation mediaType="application/xml" element="tst:some_xml">
<param name="test" style="plain" path="tst:some_xml/tst:an_element/tst:another_element" required="true"/>
<rax:assert test="tst:some_xml/@att='1'" message="expect att to = 1"/>
<rax:assert test="$body/tst:some_xml/tst:an_element" message="expect an_element"/>
</representation>
<representation mediaType="application/json"/>
<representation mediaType="text/x-yaml">
<rax:assert test="false()" message="YAML makes us fail!" code="500"/>
</representation>
<!-- assertion should be placed in all representations -->
<representation>
<rax:assert test="not(empty($body))" message="There should be a body"/>
<rax:assert test="not(empty($_))" message="There should be a $_"/>
</representation>
</request>
</method>
<method name="POST">
<request>
<representation mediaType="application/xml" element="tst2:a">
<param name="test2" style="plain" path="tst2:a/@id" required="true" rax:message="Expecting an id attribute"/>
<!-- This assertion applies only to POST /a when the representation is XML -->
<rax:assert test="$req:method='POST' and $req:uri='/a' and /tst2:a" message="This assertion should never fire!!" code="500"/>
<rax:assert test="$req:method='POST' and $req:uri='/a' and $_/tst2:a" message="This assertion should never fire!!" code="500"/>
</representation>
<representation mediaType="application/json">
<param name="test3" style="plain" path="$_?firstName" required="true" rax:message="Need a first name" rax:code="403"/>
</representation>
</request>
</method>
<resource path="b">
<method name="GET"/>
<method name="DELETE" rax:roles="admin Administrator">
<request>
<param name="X-AUTH" style="header" type="xs:string" default="foo!" required="true" repeating="true"/>
<!-- Should be treated as a request assertion -->
<representation>
<rax:assert test="$req:method='DELETE'"/>
<rax:assert test="req:header('X-AUTH') = 'foo!'"/>
</representation>
</request>
</method>
<method name="POST">
<request>
<representation mediaType="application/xml">
</representation>
<representation mediaType="application/json">
<!-- This assertion applies only to POST /a/b if the representation is JSON -->
<rax:assert test="$req:uri='/a/b' and not(empty($body))" message="The request path should be /a/b and there should be a JSON body" code="400"/>
<rax:assert test="$_?stuff?string = 'A String'" message="Expect JSON to have 'A String'"/>
</representation>
<!-- This assertion applies only to POST /a/b request regardless of the representation-->
<rax:assert test="$req:uri='/a/b'" message="The request path should be /a/b" code="400"/>
</request>
</method>
</resource>
<resource path="z">
<method name="PATCH">
<request>
<representation mediaType="application/xml" element="tst2:a">
<param name="test2" style="plain" path="tst2:a/@id" required="true" rax:message="Expecting an id attribute"/>
<rax:assert test="$req:method='PATCH' and $req:uri='/a' and /tst2:a" message="This assertion should never fire!!" code="500"/>
</representation>
</request>
</method>
<method name="PATCH" rax:roles="#all">
<request>
<representation mediaType="application/json" rax:roles="#all">
<param name="test3" style="plain" path="$_?firstName" required="true" rax:message="Need a first name" rax:code="403"/>
</representation>
</request>
</method>
</resource>
<!-- This assertion applies to all requests in the resource /a -->
<rax:assert test="$req:uri='/a'" message="The request path should be /a" code="400"/>
<rax:assert test="$req:uriLevel = 1" message="Bad URL Level this shouldn't happen" code="500"/>
<rax:assert test="some $h in $req:headerNames satisfies starts-with($h, 'a')" message="There should be a header that starts with a"/>
<!-- This assertion applies to all requests in the resource /a AND all subresources of a /a/b for example-->
<rax:assert test="some $h in $req:headerNames satisfies starts-with($h,'b')" message="There should be a header that starts with b" code="400" applyToChildren="true"/>
</resource>
<!-- This assertion applies to all requests in the WADL -->
<rax:assert test="'foo!' = req:headers('X-AUTH', true())" message="The X-AUTH header should always be specified and it should be foo!" code="400" />
</resources>
</application>
val WADL_withAsserts2 = <application xmlns="http://wadl.dev.java.net/2009/02"
xmlns:rax="http://docs.rackspace.com/api"
xmlns:req="http://www.rackspace.com/repose/wadl/checker/request"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:tst="test.org"
xmlns:tst2="http://www.rackspace.com/repose/wadl/checker/step/test"
>
<resources base="https://test.api.openstack.com">
<resource path="/a" rax:roles="user">
<method name="PUT">
<request>
<representation mediaType="application/xml" element="tst:some_xml">
<param name="test" style="plain" path="tst:some_xml/tst:an_element/tst:another_element" required="true"/>
<rax:assert test="tst:some_xml/@att='1'" message="expect att to = 1"/>
<rax:assert test="$req:body/tst:some_xml/tst:an_element" message="expect an_element"/>
</representation>
<representation mediaType="application/json"/>
<representation mediaType="text/x-yaml">
<rax:assert test="false()" message="YAML makes us fail!" code="500"/>
</representation>
<!-- assertion should be placed in all representations -->
<representation>
<rax:assert test="not(empty($req:body))" message="There should be a body"/>
<rax:assert test="not(empty($req:_))" message="There should be a $req:_"/>
</representation>
</request>
</method>
<method name="POST">
<request>
<representation mediaType="application/xml" element="tst2:a">
<param name="test2" style="plain" path="tst2:a/@id" required="true" rax:message="Expecting an id attribute"/>
<!-- This assertion applies only to POST /a when the representation is XML -->
<rax:assert test="$req:method='POST' and $req:uri='/a' and /tst2:a" message="This assertion should never fire!!" code="500"/>
<rax:assert test="$req:method='POST' and $req:uri='/a' and $req:_/tst2:a" message="This assertion should never fire!!" code="500"/>
</representation>
<representation mediaType="application/json">
<param name="test3" style="plain" path="$ _?firstName" required="true" rax:message="Need a first name" rax:code="403"/>
</representation>
</request>
</method>
<resource path="b">
<method name="GET"/>
<method name="DELETE" rax:roles="admin Administrator">
<request>
<param name="X-AUTH" style="header" type="xs:string" default="foo!" required="true" repeating="true"/>
<!-- Should be treated as a request assertion -->
<representation>
<rax:assert test="$req:method='DELETE'"/>
<rax:assert test="req:header('X-AUTH') = 'foo!'"/>
</representation>
</request>
</method>
<method name="POST">
<request>
<representation mediaType="application/xml">
</representation>
<representation mediaType="application/json">
<!-- This assertion applies only to POST /a/b if the representation is JSON -->
<rax:assert test="$req:uri='/a/b' and not(empty($req:body))" message="The request path should be /a/b and there should be a JSON body" code="400"/>
<rax:assert test="$req:_?stuff?string = 'A String'" message="Expect JSON to have 'A String'"/>
</representation>
<!-- This assertion applies only to POST /a/b request regardless of the representation-->
<rax:assert test="$req:uri='/a/b'" message="The request path should be /a/b" code="400"/>
</request>
</method>
</resource>
<resource path="z">
<method name="PATCH">
<request>
<representation mediaType="application/xml" element="tst2:a">
<param name="test2" style="plain" path="tst2:a/@id" required="true" rax:message="Expecting an id attribute"/>
<rax:assert test="$req:method='PATCH' and $req:uri='/a' and /tst2:a" message="This assertion should never fire!!" code="500"/>
</representation>
</request>
</method>
<method name="PATCH" rax:roles="#all">
<request>
<representation mediaType="application/json" rax:roles="#all">
<param name="test3" style="plain" path="$_?firstName" required="true" rax:message="Need a first name" rax:code="403"/>
</representation>
</request>
</method>
</resource>
<!-- This assertion applies to all requests in the resource /a -->
<rax:assert test="$req:uri='/a'" message="The request path should be /a" code="400"/>
<rax:assert test="$req:uriLevel = 1" message="Bad URL Level this shouldn't happen" code="500"/>
<rax:assert test="some $h in $req:headerNames satisfies starts-with($h, 'a')" message="There should be a header that starts with a"/>
<!-- This assertion applies to all requests in the resource /a AND all subresources of a /a/b for example-->
<rax:assert test="some $h in $req:headerNames satisfies starts-with($h,'b')" message="There should be a header that starts with b" code="400" applyToChildren="true"/>
</resource>
<!-- This assertion applies to all requests in the WADL -->
<rax:assert test="'foo!' = req:headers('X-AUTH', true())" message="The X-AUTH header should always be specified and it should be foo!" code="400" />
</resources>
</application>
//
// Config combinations
//
val assertDisabledConfigs = Map[String, Config]("base config with asserts disabled"->baseConfig)
val assertEnabledConfigs = Map[String, Config]("Config with asserts enabled"->baseWithAssert,
"Config with asserts and remove dups"->baseWithAssertRemoveDups,
"Config with asserts and remove dups preserve method labels "->baseWithAssertRemoveDupsMethodLabels
)
val assertEnabledParamDefaultConfigs = Map[String, Config]("Config with asserts enabled, param defaults"->baseWithAssertParamDefaults,
"Config with asserts enabled, param defaults and remove dups"->baseWithAssertParamDefaultsRemoveDups,
"Config with asserts enabled, param defaults and remove dups preserve method labels"->baseWithAssertParamDefaultsRemoveDupsMethodLabels
)
val assertEnabledPlainParamConfigs = Map[String, Config]("Config with asserts enabled, plain params"->baseAssertWithPlainParams,
"Config with asserts enabled, plain params join XPath"->baseAssertWithJoinXPaths,
"Config with asserts enabled, plain params join XPath, remove dups"->baseAssertWithJoinXPathsAndRemoveDups,
"Config with asserts enabled, plain params join XPath preserve method labels"->baseAssertWithJoinXPathsMethodLabels,
"Config with asserts enabled, plain params join XPath, remove dups preserve method labels"->baseAssertWithJoinXPathsAndRemoveDupsMethodLabels
)
val assertEnabledPlainRaxRoles = Map[String, Config]("Config with asserts enabled, plain params, rax roles"->baseWithPlainParamsRaxRoles,
"Config with asserts enabled, plain params, rax roles, remove dups"->baseWithRemoveDupsRaxRoles,
"Config with asserts enabled, plain params, rax roles, remove dups method labels"->baseWithRemoveDupsRaxRolesMethodLabels,
"Config with asserts enabled, plain params, rax roles, join xpath"->baseWithJoinXPathsRaxRoles,
"Config with asserts enabled, plain params, rax roles, remove dups, join xpath"->baseWithJoinXPathsAndRemoveDupsRaxRoles,
"Config with asserts enabled, plain params, rax roles, remove dups, join xpath, preserve method labels"->baseWithJoinXPathsAndRemoveDupsRaxRolesMethodLabels)
val assertEnabledPlainRaxRolesMask = Map[String, Config]("Config with asserts enabled, plain params, rax roles masked"->baseWithPlainParamsRaxRolesMask,
"Config with asserts enabled, plain params, rax roles masked, remove dups"->baseWithRemoveDupsRaxRolesMask,
"Config with asserts enabled, plain params, rax roles masked, remove dups, preserve method labels"->baseWithRemoveDupsRaxRolesMaskMethodLabels,
"Config with asserts enabled, plain params, rax roles masked, join xpath"->baseWithJoinXPathsRaxRolesMask,
"Config with asserts enabled, plain params, rax roles masked, remove dups, join xpath, preserve method labels"->baseWithJoinXPathsAndRemoveDupsRaxRolesMaskMethodLabels)
//
// WADL combinations
//
val assertWADLs = Map[String, Elem]("WADL with $body assertions"->WADL_withAsserts,
"WADL with $req:body assertions"->WADL_withAsserts2)
//
// Assertions!
//
def happyPathAssertions(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should validate with goodXML on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with goodJSON on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/json",goodJSON, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a should validate with goodXML on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/xml",goodXML_XSD2, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a should validate with goodJSON on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/json",goodJSON_Schema1, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A GET on /a/b should validate on $wadlDesc with $configDesc") {
validator.validate(request("GET", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A DELETE on /a/b should validate on $wadlDesc with $configDesc") {
validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user", "Administrator"))), response, chain)
}
test (s"A POST on /a/b should validate with good XML on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a/b", "application/xml", goodXML, false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a/b should validate with good JSON on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a/b", "application/json", goodJSON, false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
}
def happyWhenAssertionsAreDisabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should validate with goodXML with an @att=2 on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",
<some_xml att='2' xmlns='test.org'>
<an_element>
<another_element />
</an_element>
</some_xml>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with goodXML but only X-Roles headers on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with goodXML that does not contain an_element $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",
<some_xml att='1' xmlns='test.org'>
<another_element>
<yet_another_element />
</another_element>
</some_xml>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with YAML $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "text/x-yaml","""---
- name: Hello World!
""", false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with empty JSON body $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a/b", "application/json", "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A PUT on /a should validate with good JSON body and only X-Roles headers $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a/b", "application/json", goodJSON, false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a should validate with goodXML (wrong schema) on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a should validate with goodJSON (wrong schema) on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/json",""" { "foo" : "bar" } """, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A DELETE on /a/b should validate on with only X-ROLES headers $wadlDesc with $configDesc") {
validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("X-Roles"->List("user","admin"))), response, chain)
}
test (s"A POST on /a/b should validate with empty JSON body only X-ROLES headers $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a/b", "application/json", null.asInstanceOf[String], false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain)
}
}
//
// These are just sanity checks against the WADL, they should never validate
//
def happySadPaths (validator : Validator, wadlDesc : String, configDesc : String) {
test (s"Plain text PUT should fail on /a $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT","/a","plain/text","hello!", false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
415, List("content type","application/xml","application/json","text/x\\\\-yaml"))
}
test (s"A PATCH on /a should fail with a 405 on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PATCH", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
405, List("Method", "POST", "PUT"))
}
test (s"A POST on /a/b/c should fail with a 404 on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b/c", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
404, List("not found"))
}
test (s"Plain text PATCH should fail on /a/z $wadlDesc with $configDesc when X-ROLES is user") {
assertResultFailed(validator.validate(request("PATCH","/a/z","plain/text","hello!", false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
415, List("content type","did not match"))
}
test (s"Plain text PATCH should fail on /a/z $wadlDesc with $configDesc when X-ROLES is foo") {
assertResultFailed(validator.validate(request("PATCH","/a/z","plain/text","hello!", false,
Map[String,List[String]]("X-Roles"->List("foo"))), response, chain),
415, List("content type","did not match"))
}
}
def sadWhenAssertionsAreEnabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should not validate with goodXML with an @att=2 on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",
<some_xml att='2' xmlns='test.org'>
<an_element>
<another_element />
</an_element>
</some_xml>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("expect att to = 1"))
}
test (s"A PUT on /a should not validate with goodXML but no headers on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
400, List("There should be a header that starts with a"))
}
test (s"A PUT on /a should not validate with goodXML but no b header on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"X-Roles"->List("user"))), response, chain),
400, List("There should be a header that starts with b"))
}
test (s"A PUT on /a should not validate with goodXML but no X-AUTH header on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("abba"),
"X-Roles"->List("user"))), response, chain),
400, List("X-Auth","foo!"))
}
test (s"A PUT on /a should not validate with YAML $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "text/x-yaml","""---
- name: Hello World!
""", false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
500, List("YAML makes us fail!"))
}
test (s"A PUT on /a should not validate with empty JSON body $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a", "application/json", "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("No content"))
}
test (s"A PUT on /a should not validate with good JSON body and no headers $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/json", goodJSON, false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
400, List("There should be a header that starts with b"))
}
test (s"A PUT on /a should not validate with good JSON body and b header but no auth header $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/json", goodJSON, false,
Map[String,List[String]]("b"->List("abba"),
"X-Roles"->List("user"))), response, chain),
400, List("X-AUTH","foo!"))
}
test (s"A PUT on /a should not validate with good XML body and no headers on application/xml $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/xml", goodXML, false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
400, List("There should be a header that starts with b"))
}
test (s"A PUT on /a should not validate with good XML body and b header but no auth header $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/xml", goodXML, false,
Map[String,List[String]]("b"->List("abba"),
"X-Roles"->List("user"))), response, chain),
400, List("X-AUTH","foo!"))
}
test (s"A DELETE on /a/b should not validate on with 'foo!' is not the first value $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("x-auth"->List("bar","foo!"),
"X-Roles"->List("admin"))),
response, chain),
400, List("req:header('X-AUTH') = 'foo!'"))
}
test (s"A POST on /a/b should not validate with empty JSON body no headers $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/json", null.asInstanceOf[String], false,
Map[String,List[String]]("X-Roles"->List("user"))), response, chain),
400, List("No content"))
}
test (s"A POST on /a/b should not validate with JSON with bad schema on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a/b", "application/json", goodJSON_Schema1, false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("Expect JSON to have 'A String'"))
}
}
def happyWithParamDefaultsEnabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A DELETE on /a/b should validate on when no X-Auth is specified $wadlDesc with $configDesc") {
validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Roles"->List("user"))), response, chain)
}
}
def sadWithParamDefaultsDisabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A DELETE on /a/b should not validate on with incorrect Auth header $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("X-Auth"->List("Bust"),
"X-Roles"->List("admin"))), response, chain),
400, List("req:header('X-AUTH') = 'foo!'"))
}
}
def testsWithPlainParamsDisabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should not validate with goodXML that does not contain an_element $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",
<some_xml att='1' xmlns='test.org'>
<another_element>
<yet_another_element />
</another_element>
</some_xml>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("expect an_element"))
}
test (s"A POST on /a should not validate with goodXML (wrong schema) on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
500, List("This assertion should never fire!!"))
}
test (s"A POST on /a with tst2:a and no @id attribute should succeed on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/xml",
<a xmlns="http://www.rackspace.com/repose/wadl/checker/step/test"
stepType="ACCEPT"
even="22"/>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
test (s"A POST on /a with a bad schema JSON should succeed on $wadlDesc with $configDesc") {
validator.validate(request("POST", "/a", "application/json",""" { "foo" : "bar" } """,false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain)
}
}
def testsWithPlainParamsEnabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should not validate with goodXML that does not contain an_element (plain param) $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",
<some_xml att='1' xmlns='test.org'>
<another_element>
<yet_another_element />
</another_element>
</some_xml>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("Expecting","tst:some_xml/tst:an_element/tst:another_element"))
}
test (s"A POST on /a should not validate with goodXML (wrong schema) on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("root", "tst2:a"))
}
test (s"A POST on /a with tst2:a and no @id attribute should fail with plain params enabled $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a", "application/xml",
<a xmlns="http://www.rackspace.com/repose/wadl/checker/step/test"
stepType="ACCEPT"
even="22"/>
, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
400, List("Expecting an id attribute"))
}
test (s"A POST on /a with a bad schema JSON should fail on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("POST", "/a", "application/json",""" { "foo" : "bar" } """,false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("user"))), response, chain),
403, List("Need a first name"))
}
}
def testsWithRaxRolesDisabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should validate with goodXML with no roles on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"))), response, chain)
}
test (s"A PUT on /a should validate with goodXML and an unknown role on $wadlDesc with $configDesc") {
validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizRole","admin"))), response, chain)
}
test (s"A DELETE on /a/b should validate with multiple unknown roles on $wadlDesc with $configDesc") {
validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizBuz", "Wooga"))), response, chain)
}
test (s"A DELETE on /a/b should validate with no roles $wadlDesc with $configDesc") {
validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"))), response, chain)
}
}
def testsWithRaxRolesEnabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should validate with goodXML with no roles on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"))), response, chain),
403, List("You are forbidden"))
}
test (s"A PUT on /a should validate with goodXML and an unknown role on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizRole","admin"))), response, chain),
403, List("You are forbidden"))
}
test (s"A DELETE on /a/b should validate with multiple unknown roles on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizBuz", "Wooga"))), response, chain),
403, List("You are forbidden"))
}
test (s"A DELETE on /a/b should validate with no roles $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"))), response, chain),
403, List("You are forbidden"))
}
}
def testsWithRaxRolesMaskEnabled(validator : Validator, wadlDesc : String, configDesc : String) {
test (s"A PUT on /a should validate with goodXML with no roles on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"))), response, chain),
405, List("PUT"))
}
test (s"A PUT on /a should validate with goodXML and an unknown role on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("PUT", "/a", "application/xml",goodXML, false,
Map[String,List[String]]("a"->List("abba"),
"b"->List("ababa"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizRole","admin"))), response, chain),
405, List("PUT"))
}
test (s"A DELETE on /a/b should validate with multiple unknown roles on $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"),
"X-Roles"->List("bizBuz", "Wooga"))), response, chain),
404, List("Resource not found"))
}
test (s"A DELETE on /a/b should validate with no roles $wadlDesc with $configDesc") {
assertResultFailed(validator.validate(request("DELETE", "/a/b", null, "", false,
Map[String,List[String]]("a"->List("Bbba"),
"b"->List("Dbaba"),
"X-Auth"->List("foo!"))), response, chain),
404, List("Resource not found"))
}
}
//
// With assertions disabled
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertDisabledConfigs) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happyWhenAssertionsAreDisabled(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
}
}
//
// With asserts enabled
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertEnabledConfigs) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
sadWhenAssertionsAreEnabled(validator, wadlDesc, configDesc)
sadWithParamDefaultsDisabled(validator, wadlDesc, configDesc)
testsWithPlainParamsDisabled(validator, wadlDesc, configDesc)
testsWithRaxRolesDisabled(validator, wadlDesc, configDesc)
}
}
//
// With param defaults and assert enabled configs
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertEnabledParamDefaultConfigs) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
sadWhenAssertionsAreEnabled(validator, wadlDesc, configDesc)
happyWithParamDefaultsEnabled (validator, wadlDesc, configDesc)
testsWithPlainParamsDisabled(validator, wadlDesc, configDesc)
testsWithRaxRolesDisabled(validator, wadlDesc, configDesc)
}
}
//
// With plain params and asserts enabled
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertEnabledPlainParamConfigs) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
sadWhenAssertionsAreEnabled(validator, wadlDesc, configDesc)
sadWithParamDefaultsDisabled(validator, wadlDesc, configDesc)
testsWithPlainParamsEnabled(validator, wadlDesc, configDesc)
testsWithRaxRolesDisabled(validator, wadlDesc, configDesc)
}
}
//
// With plain params and rax:roles and asserts enabled
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertEnabledPlainRaxRoles) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
sadWhenAssertionsAreEnabled(validator, wadlDesc, configDesc)
sadWithParamDefaultsDisabled(validator, wadlDesc, configDesc)
testsWithPlainParamsEnabled(validator, wadlDesc, configDesc)
testsWithRaxRolesEnabled(validator, wadlDesc, configDesc)
}
}
//
// With plain params and rax:roles Masked and asserts enabled
//
for ((wadlDesc, wadl) <- assertWADLs) {
for ((configDesc, config) <- assertEnabledPlainRaxRolesMask) {
val validator = Validator(wadl, config)
happyPathAssertions(validator, wadlDesc, configDesc)
happySadPaths (validator, wadlDesc, configDesc)
sadWhenAssertionsAreEnabled(validator, wadlDesc, configDesc)
sadWithParamDefaultsDisabled(validator, wadlDesc, configDesc)
testsWithPlainParamsEnabled(validator, wadlDesc, configDesc)
testsWithRaxRolesMaskEnabled(validator, wadlDesc, configDesc)
}
}
}
| wdschei/api-checker | core/src/test/scala/com/rackspace/com/papi/components/checker/ValidatorWADLAssertSuite.scala | Scala | apache-2.0 | 63,728 |
/*
* Copyright 2013 websudos ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.phantom.dsl.query
import org.scalatest.{ FlatSpec, Matchers, ParallelTestExecution }
import com.websudos.phantom.Implicits._
import com.websudos.phantom.tables.Primitives
class TypeRestrictionsTest extends FlatSpec with Matchers {
it should "allow using a correct type for a value method" in {
"Primitives.insert.value(_.boolean, true)" should compile
}
it should "not allow using a wrong type for a value method" in {
"Primitives.insert.value(_.boolean, 5)" shouldNot compile
}
}
| nosheenzaza/phantom-data-centric | phantom-dsl/src/test/scala/com/websudos/phantom/dsl/query/TypeRestrictionsTest.scala | Scala | gpl-2.0 | 1,118 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
* Author(s) :
* - Zohour Abouakil
* - David Courtinot
*/
package cfg
import scala.reflect.runtime.universe
import ast._
import ast.model._
import ctl._
/**
* This file contains the model we are going to use to link the AST classes with the CTL ones.
* @author Zohour Abouakil
* @author David Courtinot
*/
case class CFGMetaVar(name: String) extends MetaVariable {
override def hashCode = name.hashCode
override def toString = name
override def equals(a: Any) = a match {
case CFGMetaVar(value) => value == name
case _ => false
}
}
sealed abstract class CFGVal extends Value
/**
* CFGExpr represents any expression that can be found or extracted by an ExprPattern on a CFG node
*/
final case class CFGExpr(expr: Expr) extends CFGVal {
override def toString = expr.toString
}
object CFGExpr extends TypeOf[CFGVal] {
override def cast(n: CFGVal) = n match { case CFGExpr(_) => true; case _ => false }
}
/**
* CFGDecl represents a C++ declaration. The equality between two CFGDecl is based on their ID in
* the AST.
*/
final case class CFGDecl(id: String, typeOf: String, name: String) extends CFGVal {
override val hashCode = id.hashCode
override def equals(a: Any) = a match { case CFGDecl(id,_,_) => id == this.id; case _ => false }
}
object CFGDecl extends TypeOf[CFGVal] { override def cast(n: CFGVal) = n match { case CFGDecl(_,_,_) => true; case _ => false } }
/**
* CFGDef, just like CFGDecl represents a C++ declaration. However, while CFGDecl corresponds to an
* actual declaration, CFGDef only represents the semantic of a declaration and not the declaration
* itself. Indeed, two CFGDef are considered equal if they declare a variable of the same name and
* the same type.
*/
final case class CFGDef(typeOf: String, name: String) extends CFGVal
object CFGDef extends TypeOf[CFGVal] { override def cast(n: CFGVal) = n match { case CFGDef(_,_) => true; case _ => false } }
/**
* CFGString represents any string that can be matched by a StringPattern (operator symbol, type name...).
*/
final case class CFGString(s: String) extends CFGVal
object CFGString extends TypeOf[CFGVal] { override def cast(n: CFGVal) = n match { case CFGString(_) => true; case _ => false } }
/**
* ConvertNodes contains various methods enabling to fetch expressions contained by a CFGNode of any kind.
*/
object ConvertNodes {
private def getAllVal (expr: Expr): Set[CFGVal] = getAllExpr(expr) ++ getAllString(expr)
// this method is called recursively on the sub-expressions as the FindExprLabelizer may extract any of the sub-expressions
private def getAllExpr (expr: Expr): Set[CFGVal] = expr.getSubExprs.flatMap(getAllExpr(_)).toSet + CFGExpr(expr)
private def getAllString(expr: Expr): Set[CFGVal] = expr match {
case BinaryOp (_,_,_,op ) => Set(op)
case CompoundAssignOp(_,_,_,op ) => Set(op)
case UnaryOp (_,_,op ,_) => Set(op)
case CallExpr (typeOf,ref,_) => Set(typeOf,ref.targetName)
case CXXNewExpr (typeOf ,_) => Set(typeOf)
case _ => Set()
}
/**
* Returns the single expression contained by a node, if any.
*/
def getExpr(p: ProgramNode): Option[Expr] = p match {
case If (expr,_,_) => Some(expr)
case While (expr,_,_) => Some(expr)
case Expression(expr,_,_) => Some(expr)
case Switch (expr,_,_) => Some(expr)
case For (Some(expr),_,_) => Some(expr)
// see comment in the convert method
case Statement (VarDecl(name,typeOf,expr),cr,id) => expr.map(BinaryOp(typeOf,SourceCodeNode(DeclRefExpr(typeOf,name,id),cr,id),_,"="))
case _ => None
}
/**
* Returns a conversion function from ProgramNode to the Set[CFGVal] likely to be extracted
* by Pattern(s) matching
*/
def convert: (ProgramNode => Set[CFGVal]) = (p: ProgramNode) => p match {
case If (expr,_,_) => getAllVal(expr)
case While (expr,_,_) => getAllVal(expr)
case Expression(expr,_,_) => getAllVal(expr)
case Switch (expr,_,_) => getAllVal(expr)
case For (Some(expr),_,_) => getAllVal(expr)
case Statement (VarDecl(name,typeOf,expr),cr,id) =>
// for a VarDecl node, we instantiate an artificial assignment because the expr attribute
// only represents the right part of the assignment included in the declaration
Set(CFGDecl(p.id,typeOf,name),CFGDef(typeOf,name)) ++
expr.map(e => CFGExpr(BinaryOp(typeOf,SourceCodeNode(DeclRefExpr(typeOf,name,id),cr,id),e,"="))) ++
Set(CFGString(name),CFGString(typeOf))
case _ => Set()
}
} | jxw1102/Projet-merou | ModelChecker/src/cfg/Model.scala | Scala | apache-2.0 | 5,889 |
abstract class AbstractFile {
def name: String
val extension: String = name.substring(4)
}
class RemoteFile(url: String) extends AbstractFile {
val localFile: String = url.hashCode + ".tmp" // error
def name: String = localFile
}
| som-snytt/dotty | tests/init/neg/AbstractFile.scala | Scala | apache-2.0 | 236 |
package com.wangc.fast.p2
/**
* Created by wangchao on 2017/6/1.
* Chapter 2.5 Loops
*/
object Study2_5_Loops {
def main(args: Array[String]): Unit = {
var n = 1
var r = 0
while (n > 0){
println(n+"=="+r)
r = r + n
if (r>5){
n = -1
}
}
//[0,5] 前闭后闭
for(i <- 0 to 5){
println(i)
}
//[0,3) 前闭后开
for(i <- 0 until 3){
println(i)
}
println( 12 to 15 )
for (ch <- "Hello"){
println(ch)
}
//scala 中没有 break 和 continue ,使用一个flag来控制
println("===============")
test()
}
def test():Unit={
//前闭后闭 [1,5]
for (i <- 1 to 5){
println(i)
}
//前闭后开 [1,5)
for (i <- 1 until 20 if i%5==0){
println(i)
}
for(c<-"hello"){
println(c)
}
}
}
| wang153723482/HelloWorld_my | HelloWorld_scala/src/com/wangc/fast/p2/Study2_5_Loops.scala | Scala | apache-2.0 | 913 |
package com.theseventhsense.datetime
import com.theseventhsense.utils.types.SSDateTime.TimeZone
/**
* Created by erik on 6/15/16.
*/
class RichTimeZone(timeZone: TimeZone) extends MomentRichTimeZone(timeZone)
object RichTimeZone extends MomentRichTimezoneOps
| 7thsense/utils-datetime | js/src/main/scala/com/theseventhsense/datetime/RichTimezone.scala | Scala | mit | 266 |
package scalaxy.extensions
package test
import org.junit._
import Assert._
class MacroExtensionsTest extends TestBase
{
override def transform(s: String, name: String = "test") = {
// First, compile with untyped reify:
transformCode(s, name, macroExtensions = true, runtimeExtensions = false, useUntypedReify = true)
// Then return result of compilation without untyped reify:
transformCode(s, name, macroExtensions = true, runtimeExtensions = false, useUntypedReify = false)._1
}
@Test
def trivial {
transform("object O { @scalaxy.extension[Int] def foo: Int = 10 }")
}
@Test
def noReturnType {
expectException("return type is missing") {
transform("object O { @scalaxy.extension[Int] def foo = 10 }")
}
}
@Ignore
@Test
def notInModule {
expectException("not defined in module") {
transform("class O { @scalaxy.extension[Int] def foo: Int = 10 }")
}
}
@Test
def notHygienic {
expectException("self is redefined locally") {
transform("object O { @scalaxy.extension[Int] def foo: Int = { val self = 10; self } }")
}
}
@Test
def ambiguousThis {
expectException("ambiguous this") {
transform("""
object O {
@scalaxy.extension[Int]
def foo: Int = {
new Object() { println(this) };
10
}
}
""")
}
}
@Test
def noArg {
assertSameTransform(
"""
object O {
@scalaxy.extension[String] def len: Int = self.length
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$len$1(val self: String)
extends scala.AnyRef {
def len: Int =
macro scalaxy$extensions$len$1.len
}
object scalaxy$extensions$len$1 {
def len(c: scala.reflect.macros.Context): c.Expr[Int] = {
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[String](selfTree$1)
reify({
val self = self$Expr$1.splice
self.length
})
}
}
}
"""
)
}
@Test
def oneByValueArg {
assertSameTransform(
"""
object O {
@scalaxy.extension[Int] def foo(quote: String): String = quote + self + quote
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$foo$1(val self: Int)
extends scala.AnyVal {
def foo(quote$Expr$1: String): String =
macro scalaxy$extensions$foo$1.foo
}
object scalaxy$extensions$foo$1 {
def foo(c: scala.reflect.macros.Context)
(quote$Expr$1: c.Expr[String]):
c.Expr[String] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[Int](selfTree$1)
reify({
val self = self$Expr$1.splice
val quote = quote$Expr$1.splice
quote + self + quote
})
}
}
}
"""
)
}
@Test
def typeParam {
assertSameTransform(
"""
object O {
@scalaxy.extension[Double] def foo[A](a: A): A = {
println(s"$self.foo($a)")
a
}
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$foo$1(val self: Double)
extends scala.AnyVal {
def foo[A](a$Expr$1: A): A =
macro scalaxy$extensions$foo$1.foo[A]
}
object scalaxy$extensions$foo$1 {
def foo[A : c.WeakTypeTag]
(c: scala.reflect.macros.Context)
(a$Expr$1: c.Expr[A]):
c.Expr[A] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[Double](selfTree$1)
reify({
val self = self$Expr$1.splice;
val a = a$Expr$1.splice;
{
println(s"${self}.foo(${a})")
a
}
})
}
}
}
"""
)
}
@Test
def innerOuterTypeParams {
assertSameTransform(
"""
object O {
@scalaxy.extension[Array[A]] def foo[A, B](b: B): (Array[A], B) = (self, b)
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$foo$1[A](val self: Array[A])
extends scala.AnyRef {
def foo[B](b$Expr$1: B): (Array[A], B) =
macro scalaxy$extensions$foo$1.foo[A, B]
}
object scalaxy$extensions$foo$1 {
def foo[A : c.WeakTypeTag, B : c.WeakTypeTag]
(c: scala.reflect.macros.Context)
(b$Expr$1: c.Expr[B]):
c.Expr[(Array[A], B)] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[Array[A]](selfTree$1)
reify({
val self = self$Expr$1.splice
val b = b$Expr$1.splice
(self, b)
})
}
}
}
"""
)
}
@Test
def passImplicitsThrough {
assertSameTransform(
"""
object O {
@scalaxy.extension[T] def squared[T : Numeric]: T = self * self
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$squared$1[T](val self: T)
extends scala.AnyRef {
def squared(implicit evidence$1$Expr$1: Numeric[T]): T =
macro scalaxy$extensions$squared$1.squared[T]
}
object scalaxy$extensions$squared$1 {
def squared[T](c: scala.reflect.macros.Context)
(evidence$1$Expr$1: c.Expr[Numeric[T]])
(implicit evidence$2: c.WeakTypeTag[T]):
c.Expr[T] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[T](selfTree$1)
reify({
val self = self$Expr$1.splice
implicit val evidence$1 = evidence$1$Expr$1.splice
self * self
})
}
}
}
"""
)
}
@Test
def passImplicitsThroughToMacro {
assertSameTransform(
"""
object O {
@scalaxy.extension[T]
def squared[T : Numeric]: T = macro {
val evExpr = implicity[c.Expr[Numeric[T]]]
reify({
implicit val ev = evExpr.splice
self * self
})
}
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$squared$1[T](val self: T)
extends scala.AnyRef {
def squared(implicit evidence$1$Expr$1: Numeric[T]): T =
macro scalaxy$extensions$squared$1.squared[T]
}
object scalaxy$extensions$squared$1 {
def squared[T](c: scala.reflect.macros.Context)
(evidence$1$Expr$1: c.Expr[Numeric[T]])
(implicit evidence$2: c.WeakTypeTag[T]):
c.Expr[T] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self = c.Expr[T](selfTree$1);
{
implicit def evidence$1$1 = c.Expr[Numeric[T]](evidence$1$Expr$1);
{
val evExpr = implicity[c.Expr[Numeric[T]]]
reify({
implicit val ev = evExpr.splice
self * self
})
}
}
}
}
}
"""
)
}
@Test
def oneByNameArgWithImplicitClassTag {
assertSameTransform(
"""
import scala.reflect.ClassTag
object O {
@scalaxy.extension[Int]
def fill[T : ClassTag](generator: => T): Array[T] =
Array.fill[T](self)(generator)
}
""",
"""
import scala.reflect.ClassTag
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$fill$1(val self: Int)
extends scala.AnyVal {
def fill[T](generator: T)(implicit evidence$1$Expr$1: ClassTag[T]): Array[T] =
macro scalaxy$extensions$fill$1.fill[T]
}
object scalaxy$extensions$fill$1 {
def fill[T](c: scala.reflect.macros.Context)
(generator: c.Expr[T])
(evidence$1$Expr$1: c.Expr[ClassTag[T]])
(implicit evidence$2: c.WeakTypeTag[T]):
c.Expr[Array[T]] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[Int](selfTree$1)
reify({
val self = self$Expr$1.splice
implicit val evidence$1 = evidence$1$Expr$1.splice
Array.fill[T](self)(generator.splice)
})
}
}
}
"""
)
}
@Test
def oneByNameAndOneByValueArg {
assertSameTransform(
"""
object O {
@scalaxy.extension[Int]
def fillZip(value: Int, generator: => String): Array[(Int, String)] =
Array.fill(self)((value, generator))
}
""",
"""
object O {
import scala.language.experimental.macros;
implicit class scalaxy$extensions$fillZip$1(val self: Int)
extends scala.AnyVal {
def fillZip(value$Expr$1: Int, generator: String): Array[(Int, String)] =
macro scalaxy$extensions$fillZip$1.fillZip
}
object scalaxy$extensions$fillZip$1 {
def fillZip(c: scala.reflect.macros.Context)
(value$Expr$1: c.Expr[Int], generator: c.Expr[String]):
c.Expr[Array[(Int, String)]] =
{
import c.universe._
val Apply(_, List(selfTree$1)) = c.prefix.tree;
val self$Expr$1 = c.Expr[Int](selfTree$1)
reify({
val self = self$Expr$1.splice
val value = value$Expr$1.splice
Array.fill(self)((value, generator.splice))
})
}
}
}
"""
)
}
}
| nativelibs4java/Scalaxy | Obsolete/MacroExtensions/src/test/scala/scalaxy/extensions/MacroExtensionsTest.scala | Scala | bsd-3-clause | 11,049 |
/*
* (c) Copyright 2019 EntIT Software LLC, a Micro Focus company, L.P.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0 which accompany this distribution.
*
* The Apache License is available at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.cloudslang.content.google.actions.compute.compute_engine.disks
import java.util
import com.hp.oo.sdk.content.annotations.{Action, Output, Param, Response}
import com.hp.oo.sdk.content.plugin.ActionMetadata.{MatchType, ResponseType}
import io.cloudslang.content.constants.BooleanValues.TRUE
import io.cloudslang.content.constants.OutputNames.{EXCEPTION, RETURN_CODE, RETURN_RESULT}
import io.cloudslang.content.constants.{ResponseNames, ReturnCodes}
import io.cloudslang.content.google.services.compute.compute_engine.instances.InstanceService
import io.cloudslang.content.google.utils.Constants.{COMMA, NEW_LINE, TIMEOUT_EXCEPTION}
import io.cloudslang.content.google.utils.action.DefaultValues.{DEFAULT_POLLING_INTERVAL, DEFAULT_PRETTY_PRINT, DEFAULT_PROXY_PORT, DEFAULT_SYNC_TIMEOUT}
import io.cloudslang.content.google.utils.action.GoogleOutputNames.{ZONE_OPERATION_NAME => _, _}
import io.cloudslang.content.google.utils.action.InputNames._
import io.cloudslang.content.google.utils.action.InputUtils.{convertSecondsToMilli, verifyEmpty}
import io.cloudslang.content.google.utils.action.InputValidator.{validateBoolean, validateNonNegativeDouble, validateNonNegativeLong, validateProxyPort}
import io.cloudslang.content.google.utils.action.OutputUtils.toPretty
import io.cloudslang.content.google.utils.service.{GoogleAuth, HttpTransportUtils, JsonFactoryUtils}
import io.cloudslang.content.google.utils.{ErrorOperation, OperationStatus, SuccessOperation}
import io.cloudslang.content.utils.BooleanUtilities.toBoolean
import io.cloudslang.content.utils.NumberUtilities.{toDouble, toInteger, toLong}
import io.cloudslang.content.utils.OutputUtilities.{getFailureResultsMap, getSuccessResultsMap}
import org.apache.commons.lang3.StringUtils.{EMPTY, defaultIfEmpty}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import scala.concurrent.TimeoutException
/**
* Created by sandorr on 5/2/2017.
*/
class DetachDisk {
/**
* Creates a disk resource in the specified project using the data included as inputs.
*
* @param projectId Name of the Google Cloud project.
* @param zone Name of the zone for this request.
* @param accessToken The access token from GetAccessToken.
* @param instanceName Name of the instance to attach the disk to.
* @param deviceName The disk device name to detach.
* @param asyncInp Optional - Boolean specifying whether the operation to run sync or async.
* Valid values: "true", "false"
* Default: "true"
* @param timeoutInp Optional - The time, in seconds, to wait for a response if the async input is set to "false".
* If the value is 0, the operation will wait until zone operation progress is 100.
* Valid values: Any positive number including 0.
* Default: "30"
* @param pollingIntervalInp Optional - The time, in seconds, to wait before a new request that verifies if the operation finished
* is executed, if the async input is set to "false".
* Valid values: Any positive number including 0.
* Default: "1"
* @param proxyHost Optional - Proxy server used to connect to Google Cloud API. If empty no proxy will
* be used.
* @param proxyPortInp Optional - Proxy server port.
* Default: "8080"
* @param proxyUsername Optional - Proxy server user name.
* @param proxyPasswordInp Optional - Proxy server password associated with the proxyUsername input value.
* @param prettyPrintInp Optional - Whether to format (pretty print) the resulting json.
* Valid values: "true", "false"
* Default: "true"
* @return A map with strings as keys and strings as values that contains: outcome of the action, returnCode of the
* operation, status of the ZoneOperation if the <asyncInp> is true. If <asyncInp> is false the map will also
* contain the name of the instance, the details of the instance, including the attached disks and the status
* of the operation will be replaced by the status of the instance.
* In case an exception occurs the failure message is provided.
*/
@Action(name = "Detach Disk",
outputs = Array(
new Output(RETURN_CODE),
new Output(RETURN_RESULT),
new Output(EXCEPTION),
new Output(ZONE_OPERATION_NAME),
new Output(INSTANCE_NAME),
new Output(INSTANCE_DETAILS),
new Output(DISKS),
new Output(STATUS)),
responses = Array(
new Response(text = ResponseNames.SUCCESS, field = RETURN_CODE, value = ReturnCodes.SUCCESS, matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.RESOLVED),
new Response(text = ResponseNames.FAILURE, field = RETURN_CODE, value = ReturnCodes.FAILURE, matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.ERROR, isOnFail = true)
)
)
def execute(@Param(value = ACCESS_TOKEN, required = true, encrypted = true) accessToken: String,
@Param(value = PROJECT_ID, required = true) projectId: String,
@Param(value = ZONE, required = true) zone: String,
@Param(value = INSTANCE_NAME, required = true) instanceName: String,
@Param(value = DEVICE_NAME, required = true) deviceName: String,
@Param(value = ASYNC) asyncInp: String,
@Param(value = TIMEOUT) timeoutInp: String,
@Param(value = POLLING_INTERVAL) pollingIntervalInp: String,
@Param(value = PROXY_HOST) proxyHost: String,
@Param(value = PROXY_PORT) proxyPortInp: String,
@Param(value = PROXY_USERNAME) proxyUsername: String,
@Param(value = PROXY_PASSWORD, encrypted = true) proxyPasswordInp: String,
@Param(value = PRETTY_PRINT) prettyPrintInp: String): util.Map[String, String] = {
val proxyHostOpt = verifyEmpty(proxyHost)
val proxyUsernameOpt = verifyEmpty(proxyUsername)
val proxyPortStr = defaultIfEmpty(proxyPortInp, DEFAULT_PROXY_PORT)
val proxyPassword = defaultIfEmpty(proxyPasswordInp, EMPTY)
val prettyPrintStr = defaultIfEmpty(prettyPrintInp, DEFAULT_PRETTY_PRINT)
val asyncStr = defaultIfEmpty(asyncInp, TRUE)
val timeoutStr = defaultIfEmpty(timeoutInp, DEFAULT_SYNC_TIMEOUT)
val pollingIntervalStr = defaultIfEmpty(pollingIntervalInp, DEFAULT_POLLING_INTERVAL)
val validationStream = validateProxyPort(proxyPortStr) ++
validateBoolean(prettyPrintStr, PRETTY_PRINT) ++
validateBoolean(asyncStr, ASYNC) ++
validateNonNegativeLong(timeoutStr, TIMEOUT) ++
validateNonNegativeDouble(pollingIntervalStr, POLLING_INTERVAL)
if (validationStream.nonEmpty) {
return getFailureResultsMap(validationStream.mkString(NEW_LINE))
}
try {
val proxyPort = toInteger(proxyPortStr)
val prettyPrint = toBoolean(prettyPrintStr)
val async = toBoolean(asyncStr)
val timeout = toLong(timeoutStr)
val pollingIntervalMilli = convertSecondsToMilli(toDouble(pollingIntervalStr))
val httpTransport = HttpTransportUtils.getNetHttpTransport(proxyHostOpt, proxyPort, proxyUsernameOpt, proxyPassword)
val jsonFactory = JsonFactoryUtils.getDefaultJacksonFactory
val credential = GoogleAuth.fromAccessToken(accessToken)
OperationStatus(InstanceService.detachDisk(httpTransport, jsonFactory, credential, projectId, zone, instanceName,
deviceName, async, timeout, pollingIntervalMilli)) match {
case SuccessOperation(operation) =>
val resultMap = getSuccessResultsMap(toPretty(prettyPrint, operation)) + (ZONE_OPERATION_NAME -> operation.getName)
if (async) {
val status = defaultIfEmpty(operation.getStatus, EMPTY)
resultMap +
(STATUS -> status)
} else {
val instance = InstanceService.get(httpTransport, jsonFactory, credential, projectId, zone, instanceName)
val name = defaultIfEmpty(instance.getName, EMPTY)
val status = defaultIfEmpty(instance.getStatus, EMPTY)
val disksNames = Option(instance.getDisks).getOrElse(List().asJava).map(_.getDeviceName)
resultMap +
(INSTANCE_NAME -> name) +
(INSTANCE_DETAILS -> toPretty(prettyPrint, instance)) +
(DISKS -> disksNames.mkString(COMMA)) +
(STATUS -> status)
}
case ErrorOperation(error) => getFailureResultsMap(error)
}
} catch {
case t: TimeoutException => getFailureResultsMap(TIMEOUT_EXCEPTION, t)
case e: Throwable => getFailureResultsMap(e)
}
}
} | CloudSlang/cs-actions | cs-google/src/main/scala/io/cloudslang/content/google/actions/compute/compute_engine/disks/DetachDisk.scala | Scala | apache-2.0 | 9,623 |
package io.github.suitougreentea.VariousMinos
import scala.collection.mutable.ListBuffer
class FallingPiece {
private var _array: ListBuffer[Array[Block]] = ListBuffer.empty
var y = -1
var containsPersistentBlock = false
var containsIndependentBlock = false
def apply(x: Int, y:Int): Block = {
var ny = y - this.y
if(x < 0 || x >= 10 || ny < 0 || ny >= height) new Block(-1) else _array(ny)(x)
}
def update(x: Int, y:Int, value: Block) = {
if(this.y == -1) this.y = y
var ny = y - this.y
if(x >= 0 && x < 10 && ny >= 0) {
while(ny >= height){
_array :+= Array.fill(10)(new Block(0))
}
_array(ny)(x) = value
}
}
def height = _array.size
} | suitougreentea/VariousMinos2 | src/main/scala/io/github/suitougreentea/VariousMinos/FallingPiece.scala | Scala | mit | 707 |
package skinny.controller.feature
import org.scalatra.test.scalatest.ScalatraFlatSpec
import skinny._
import skinny.controller.SkinnyController
class AfterActionSpec extends ScalatraFlatSpec {
behavior of "afterAction"
object After1 extends SkinnyController with Routes {
get("/1") { response.writer.write("0") }.as(Symbol("index"))
afterAction() { response.writer.write("1") }
afterAction() { response.writer.write("2") }
}
object After2 extends SkinnyController with Routes {
get("/2") { response.writer.write("OK") }.as(Symbol("index"))
afterAction() { response.writer.write(" Computer") }
}
addFilter(After1, "/*")
addFilter(After2, "/*")
"afterAction" should "be controller-local" in {
get("/1") {
body should equal("012")
}
get("/2") {
body should equal("OK Computer")
}
}
}
| skinny-framework/skinny-framework | framework/src/test/scala/skinny/controller/feature/AfterActionSpec.scala | Scala | mit | 852 |
package blended.jms.utils.internal
import java.text.SimpleDateFormat
class ConnectionMonitor(vendor: String, provider: String, clientId: String) extends ConnectionMonitorMBean {
private[this] val df = new SimpleDateFormat("yyyy-MM-dd-HH:mm:ss:SSS")
private[this] var state : ConnectionState = ConnectionState(provider = provider).copy(status = ConnectionState.DISCONNECTED)
private[this] var cmd : ConnectionCommand = ConnectionCommand(vendor = vendor, provider = provider)
override def getProvider(): String = provider
override def getClientId(): String = clientId
def getCommand() : ConnectionCommand = cmd
def resetCommand() : Unit = { cmd = ConnectionCommand(vendor = vendor, provider = provider) }
def setState(newState: ConnectionState) : Unit = { state = newState }
def getState() : ConnectionState = state
override def getStatus(): String = state.status
override def getLastConnect(): String = state.lastConnect match {
case None => "n/a"
case Some(d) => df.format(d)
}
override def getLastDisconnect(): String = state.lastDisconnect match {
case None => "n/a"
case Some(d) => df.format(d)
}
override def getFailedPings(): Int = state.failedPings
override def getMaxEvents(): Int = state.maxEvents
override def setMaxEvents(n: Int): Unit = { cmd = cmd.copy(maxEvents = n) }
override def getEvents(): Array[String] = state.events.toArray
override def disconnect(reason: String): Unit = { cmd = cmd.copy(disconnectPending = true) }
override def connect(now: Boolean): Unit = { cmd = cmd.copy(connectPending = true, reconnectNow = now) }
}
| lefou/blended | blended.jms.utils/src/main/scala/blended/jms/utils/internal/ConnectionMonitor.scala | Scala | apache-2.0 | 1,617 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gander.extractors
import org.jsoup.nodes.Element
/**
* Implement this abstract class to extract anything not currently contained within the {@link com.jimplush.goose.Article} class
*/
class AdditionalDataExtractor extends Extractor[Map[String, String]] {
def extract(rootElement: Element): Map[String, String] = {
Map.empty
}
}
| lloydmeta/gander | src/main/scala/gander/extractors/AdditionalDataExtractor.scala | Scala | apache-2.0 | 1,152 |
package x7c1.linen.repository.loader.queueing
import java.net.URL
import x7c1.wheat.modern.queue.map.TrackableQueue.CanDump
trait UrlEnclosure {
def raw: URL
}
object UrlEnclosure {
implicit def canDump[A <: UrlEnclosure]: CanDump[A] = new CanDump[A] {
override def dump(x: A): String = x.raw.toString
}
}
| x7c1/Linen | linen-repository/src/main/scala/x7c1/linen/repository/loader/queueing/UrlEnclosure.scala | Scala | mit | 322 |
package tifmo
import dcstree.WordBase
import mylib.res.en.EnStopWords
package main.en {
case class EnWord(lemma: String, mypos: String, ner: String, isSingleton: Boolean) extends WordBase {
override def toString = lemma.replaceAll("[^a-zA-Z0-9]", "_") + "_" + mypos
def isStopWord = (ner == "O" || ner == "NUMBER") && EnStopWords.isStopWord(lemma)
def isNamedEntity = !Set("O", "NUMBER", "DATE", "TIME").contains(ner)
}
}
| tomtung/tifmo | src/main/scala/tifmo/main/en/EnWord.scala | Scala | bsd-2-clause | 437 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalog.v2.{Identifier, StagingTableCatalog, TableCatalog}
import org.apache.spark.sql.catalog.v2.expressions.Transform
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{CannotReplaceMissingTableException, NoSuchTableException}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.sources.v2.StagedTable
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
case class ReplaceTableExec(
catalog: TableCatalog,
ident: Identifier,
tableSchema: StructType,
partitioning: Seq[Transform],
tableProperties: Map[String, String],
orCreate: Boolean) extends LeafExecNode {
override protected def doExecute(): RDD[InternalRow] = {
if (catalog.tableExists(ident)) {
catalog.dropTable(ident)
} else if (!orCreate) {
throw new CannotReplaceMissingTableException(ident)
}
catalog.createTable(ident, tableSchema, partitioning.toArray, tableProperties.asJava)
sqlContext.sparkContext.parallelize(Seq.empty, 1)
}
override def output: Seq[Attribute] = Seq.empty
}
case class AtomicReplaceTableExec(
catalog: StagingTableCatalog,
identifier: Identifier,
tableSchema: StructType,
partitioning: Seq[Transform],
tableProperties: Map[String, String],
orCreate: Boolean) extends LeafExecNode {
override protected def doExecute(): RDD[InternalRow] = {
val staged = if (orCreate) {
catalog.stageCreateOrReplace(
identifier, tableSchema, partitioning.toArray, tableProperties.asJava)
} else if (catalog.tableExists(identifier)) {
try {
catalog.stageReplace(
identifier, tableSchema, partitioning.toArray, tableProperties.asJava)
} catch {
case e: NoSuchTableException =>
throw new CannotReplaceMissingTableException(identifier, Some(e))
}
} else {
throw new CannotReplaceMissingTableException(identifier)
}
commitOrAbortStagedChanges(staged)
sqlContext.sparkContext.parallelize(Seq.empty, 1)
}
override def output: Seq[Attribute] = Seq.empty
private def commitOrAbortStagedChanges(staged: StagedTable): Unit = {
Utils.tryWithSafeFinallyAndFailureCallbacks({
staged.commitStagedChanges()
})(catchBlock = {
staged.abortStagedChanges()
})
}
}
| pgandhi999/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/ReplaceTableExec.scala | Scala | apache-2.0 | 3,355 |
package com.softwaremill.example
import akka.actor.{Props, Actor, ActorSystem}
import spray.routing._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
object DockedServer extends App with SimpleRoutingApp {
// setup
implicit val actorSystem = ActorSystem()
implicit val timeout = Timeout(1.second)
import actorSystem.dispatcher
// an actor which holds a map of counters which can be queried and updated
val countersActor = actorSystem.actorOf(Props(new CountersActor()))
startServer(interface = "0.0.0.0", port = 8080) {
// definition of how the server should behave when a request comes in
// simplest route, matching only GET /hello, and sending a "Hello World!" response
get {
path("hello") {
complete {
"Hello World! This is the welcome page 2017"
}
}
} ~ // the ~ concatenates two routes: if the first doesn't match, the second is tried
path("counter" / Segment) { counterName => // extracting the second path component into a closure argument
get {
complete {
(countersActor ? Get(counterName)) // integration with futures
.mapTo[Int]
.map(amount => s"$counterName is: $amount")
}
} ~
post {
parameters("amount".as[Int]) { amount => // the type of the amount closure argument is Int, as specified!
countersActor ! Add(counterName, amount) // fire-and-forget
complete {
"OK"
}
}
}
}
}
// implementation of the actor
class CountersActor extends Actor {
private var counters = Map[String, Int]()
override def receive = {
case Get(counterName) => sender ! counters.getOrElse(counterName, 0)
case Add(counterName, amount) =>
val newAmount = counters.getOrElse(counterName, 0) + amount
counters = counters + (counterName -> newAmount)
}
}
// messages to communicate with the counters actor
case class Get(counterName: String)
case class Add(counterName: String, amount: Int)
}
| F-Xu/docker-spray-example | src/main/scala/com/softwaremill/example/DockedServer.scala | Scala | apache-2.0 | 2,083 |
package com.twitter.scalding
import org.specs._
import com.twitter.scalding._
/**
* Simple Example: First group data by gender and then sort by height reverse order.
* Then add another column for each group which is the rank order of the height.
*/
class AddRankingWithScanLeft(args: Args) extends Job(args) {
Tsv("input1", ('gender, 'height))
.read
.groupBy('gender) { group =>
group.sortBy('height).reverse
group.scanLeft(('height) -> ('rank))((0L)) {
(rank: Long, user_id: Double) =>
{
(rank + 1L)
}
}
}
// scanLeft generates an extra line per group, thus remove it
.filter('height) { x: String => x != null }
.debug
.write(Tsv("result1"))
}
/**
* Advanced example: Count seconds each user spent reading a blog article (using scanLeft)
* For the sake of simplicity we assume that you have converted date-time into epoch
*/
//class ScanLeftTimeExample(args: Args) extends Job(args) {
//
// Tsv("input2", ('epoch, 'user, 'event))
// // Create a helper symbol first
// .insert('temp, 0L)
// // Group by user and sort by epoch in reverse, so that most recent event comes first
// .groupBy('user) { group =>
// group.sortBy('epoch).reverse
// .scanLeft(('epoch, 'temp) -> ('originalEpoch, 'duration))((0L, 0L)) {
// (firstLine: (Long, Long), secondLine: (Long, Long)) =>
// var delta = firstLine._1 - secondLine._1
// // scanLeft is initialised with (0L,0L) so first subtraction
// // will result into a negative number!
// if (delta < 0L) delta = -delta
// (secondLine._1, delta)
// }
// }
// .project('epoch, 'user, 'event, 'duration)
// // Remove lines introduced by scanLeft and discard helping symbols
// .filter('epoch) { x: Any => x != null }
// // Order in ascending time
// .groupBy('user) { group =>
// group.sortBy('epoch)
// }
// // You can now remove most recent events where we are uncertain of time spent
// .filter('duration) { x: Long => x < 10000L }
// .debug
// .write(Tsv("result2"))
//
//}
class ScanLeftTest extends Specification {
import Dsl._
// --- A simple ranking job
val sampleInput1 = List(
("male", "165.2"),
("female", "172.2"),
("male", "184.1"),
("male", "125.4"),
("female", "128.6"))
// Each group sorted and ranking added highest person to shortest
val expectedOutput1 = Set(
("male", 184.1, 1),
("male", 165.2, 2),
("male", 125.4, 3),
("female", 172.2, 1),
("female", 128.6, 2))
"A simple ranking scanleft job" should {
JobTest("com.twitter.scalding.AddRankingWithScanLeft")
.source(Tsv("input1", ('gender, 'height)), sampleInput1)
.sink[(String, Double, Long)](Tsv("result1")) { outBuf1 =>
"produce correct number of records when filtering out null values" in {
outBuf1.size must_== 5
}
"create correct ranking per group, 1st being the heighest person of that group" in {
outBuf1.toSet must_== expectedOutput1
}
}
.run
.finish
}
// // --- A trickier duration counting job
// var sampleInput2 = List(
// (1370737000L, "userA", "/read/blog/123"),
// (1370737002L, "userB", "/read/blog/781"),
// (1370737028L, "userA", "/read/blog/621"),
// (1370737067L, "userB", "/add/comment/"),
// (1370737097L, "userA", "/read/blog/888"),
// (1370737103L, "userB", "/read/blog/999"))
//
// // Each group sorted and ranking added highest person to shortest
// val expectedOutput2 = Set(
// (1370737000L, "userA", "/read/blog/123", 28), // userA was reading blog/123 for 28 seconds
// (1370737028L, "userA", "/read/blog/621", 69), // userA was reading blog/621 for 69 seconds
// (1370737002L, "userB", "/read/blog/781", 65), // userB was reading blog/781 for 65 seconds
// (1370737067L, "userB", "/add/comment/", 36)) // userB was posting a comment for 36 seconds
// // Note that the blog/999 is not recorded as we can't tell how long userB spend on it based on the input
//
// "A more advanced time extraction scanleft job" should {
// JobTest("com.twitter.scalding.ScanLeftTimeExample")
// .source(Tsv("input2", ('epoch, 'user, 'event)), sampleInput2)
// .sink[(Long, String, String, Long)](Tsv("result2")) { outBuf2 =>
// "produce correct number of records when filtering out null values" in {
// outBuf2.size must_== 4
// }
// "create correct output per user" in {
// outBuf2.toSet must_== expectedOutput2
// }
// }
// .run
// .finish
// }
}
| lucamilanesio/scalding | scalding-core/src/test/scala/com/twitter/scalding/ScanLeftTest.scala | Scala | apache-2.0 | 4,723 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package network
package common
import com.linkedin.norbert.norbertutils.MockClock
import org.specs2.mutable.SpecificationWithJUnit
class CachedNetworkStatisticsSpec extends SpecificationWithJUnit {
val cachedNetworkStatistics = CachedNetworkStatistics[Int, Int](new MockClock, 100, 100)
"CachedNetworkStatistics" should {
"clear out maps properly" in {
cachedNetworkStatistics.beginRequest(1, 1, 0)
cachedNetworkStatistics.beginNetty(1, 1, 0)
cachedNetworkStatistics.beginRequest(1, 2, 0)
cachedNetworkStatistics.beginNetty(1, 2, 0)
cachedNetworkStatistics.pendingTimings.get.get.get(1).get.length must be_==(2)
cachedNetworkStatistics.endRequest(1, 1)
cachedNetworkStatistics.endRequest(1, 2)
cachedNetworkStatistics.pendingTimings.get.get.get(1).get.length must be_==(0)
}
}
}
| linkedin/norbert | network/src/test/scala/com/linkedin/norbert/network/common/CachedNetworkStatisticsSpec.scala | Scala | apache-2.0 | 1,471 |
def f(arr:List[Int]):List[Int] = arr.view.zipWithIndex.filter(_._2 % 2 != 0).map(_._1).toList
| clemus90/competitive-programming | hackerRank/FunctionalProgramming/FilterPositionInAList.scala | Scala | mit | 95 |
object Test extends App {
val y = (0: Int) match {
case 1 => 1
case 0 | 0 => 0
case 2 | 2 | 2 | 3 | 2 | 3 => 0
case 4 | (_ @ 4) => 0
case _ => -1
}
assert(y == 0, y)
}
| AlexSikia/dotty | tests/untried/neg/t7290.scala | Scala | bsd-3-clause | 194 |
package almond
import java.util.UUID
import almond.channels.Channel
import almond.interpreter.{ExecuteResult, Message}
import almond.protocol.{Execute => ProtocolExecute, _}
import almond.kernel.{ClientStreams, Kernel, KernelThreads}
import almond.TestLogging.logCtx
import ammonite.util.Colors
import cats.effect.IO
import fs2.Stream
import utest._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration
object TestUtil {
private def noCrLf(input: String): String =
input.replace("\\r\\n", "\\n")
def noCrLf(res: ExecuteResult): ExecuteResult =
res match {
case s: ExecuteResult.Success =>
ExecuteResult.Success(
s.data.copy(data = s.data.data.map {
case ("text/plain", v) => ("text/plain", noCrLf(v))
case (k, v) => (k, v)
})
)
case other => other
}
def isScala212 =
scala.util.Properties.versionNumberString.startsWith("2.12.")
implicit class IOOps[T](private val io: IO[T]) extends AnyVal {
// beware this is not *exactly* a timeout, more a max idle time say… (see the scaladoc of IO.unsafeRunTimed)
def unsafeRunTimedOrThrow(duration: Duration = Duration.Inf): T =
io.unsafeRunTimed(duration).getOrElse {
throw new Exception("Timeout")
}
}
def execute(
sessionId: String,
code: String,
msgId: String = UUID.randomUUID().toString,
stopOnError: Boolean = true
) =
Message(
Header(
msgId,
"test",
sessionId,
ProtocolExecute.requestType.messageType,
Some(Protocol.versionStr)
),
ProtocolExecute.Request(code, stop_on_error = Some(stopOnError))
).on(Channel.Requests)
class SessionRunner(
interpreterEc: ExecutionContext,
bgVarEc: ExecutionContext,
threads: KernelThreads
) {
def run(inputs: Seq[(String, String)], publish: Seq[String] = Nil): Unit = {
val (input, replies) = inputs.unzip
val sessionId = UUID.randomUUID().toString
val lastMsgId = UUID.randomUUID().toString
val stopWhen: (Channel, Message[RawJson]) => IO[Boolean] =
(_, m) =>
IO.pure(m.header.msg_type == "execute_reply" && m.parent_header.exists(_.msg_id == lastMsgId))
assert(input.nonEmpty)
val input0 = Stream(
input.init.map(s => execute(sessionId, s)) :+
execute(sessionId, input.last, lastMsgId): _*
)
val streams = ClientStreams.create(input0, stopWhen)
val interpreter = new ScalaInterpreter(
params = ScalaInterpreterParams(
updateBackgroundVariablesEcOpt = Some(bgVarEc),
initialColors = Colors.BlackWhite
),
logCtx = logCtx
)
val t = Kernel.create(interpreter, interpreterEc, threads, logCtx)
.flatMap(_.run(streams.source, streams.sink))
t.unsafeRunTimedOrThrow()
val replies0 = streams.executeReplies.filter(_._2.nonEmpty)
val expectedReplies = replies
.zipWithIndex
.collect {
case (r, idx) if r.nonEmpty =>
(idx + 1) -> r
}
.toMap
val publish0 = streams.displayDataText
for ((a, b) <- publish0.zip(publish) if a != b)
System.err.println(s"Expected $b, got $a")
for (k <- replies0.keySet.intersect(expectedReplies.keySet) if replies0.get(k) != expectedReplies.get(k))
System.err.println(s"At line $k: expected ${expectedReplies(k)}, got ${replies0(k)}")
for (k <- replies0.keySet.--(expectedReplies.keySet))
System.err.println(s"At line $k: expected nothing, got ${replies0(k)}")
for (k <- expectedReplies.keySet.--(replies0.keySet))
System.err.println(s"At line $k: expected ${expectedReplies(k)}, got nothing")
assert(replies0.mapValues(noCrLf).toMap == expectedReplies.mapValues(noCrLf).toMap)
assert(publish0.map(noCrLf) == publish.map(noCrLf))
}
}
}
| alexarchambault/jupyter-scala | modules/scala/scala-interpreter/src/test/scala/almond/TestUtil.scala | Scala | apache-2.0 | 3,929 |
//https://www.hackerrank.com/challenges/filter-elements
object FilterElements extends App {
import scala.collection.mutable.{Map => Map}
import scala.annotation.tailrec
@tailrec
def f(a: List[Int], k: Int, m: Map[Int, Int]): List[Int] = a match {
case Nil => m.filter(_._2 >= k).keys.toList
case head :: tail => m(head) += 1; f(tail, k, m)
}
val t = io.StdIn.readInt // number of test cases
for (_ <- 1 to t) {
val Array(n, k) = io.StdIn.readLine.split(" ").map(_.toInt) // list size, repetition count
val a = io.StdIn.readLine.split(" ").map(_.toInt).toList
val m = scala.collection.mutable.LinkedHashMap.empty[Int, Int] withDefaultValue 0
val filtered = f(a, k, m)
println(if (filtered.isEmpty) "-1" else filtered.mkString(" "))
}
} | flopezlasanta/hackerrank | src/functional_programming/recursion/FilterElements.scala | Scala | mit | 793 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.util
import java.util.Map.Entry
import java.util.concurrent.TimeUnit
import java.util.{Iterator => jIterator}
import org.apache.accumulo.core.client.{IteratorSetting, Scanner}
import org.apache.accumulo.core.data.{Key, Range, Value}
import org.apache.hadoop.io.Text
import scala.collection.JavaConversions._
object EmptyScanner extends Scanner {
override def getRange: Range = ???
override def setRange(range: Range): Unit = {}
override def setTimeOut(timeOut: Int): Unit = {}
override def getTimeOut: Int = ???
override def getTimeout(timeUnit: TimeUnit): Long = ???
override def setTimeout(timeOut: Long, timeUnit: TimeUnit): Unit = {}
override def setBatchSize(size: Int): Unit = {}
override def getBatchSize: Int = ???
override val iterator: jIterator[Entry[Key, Value]] = Iterator.empty
override def enableIsolation(): Unit = {}
override def disableIsolation(): Unit = {}
override def addScanIterator(cfg: IteratorSetting): Unit = {}
override def updateScanIteratorOption(iteratorName: String, key: String, value: String): Unit = {}
override def removeScanIterator(iteratorName: String): Unit = {}
override def clearScanIterators(): Unit = {}
override def fetchColumn(colFam: Text, colQual: Text): Unit = {}
override def fetchColumnFamily(col: Text): Unit = {}
override def clearColumns(): Unit = {}
override def close(): Unit = {}
// added in accumulo 1.6 - don't user override so 1.5 compiles
def getReadaheadThreshold: Long = ???
def setReadaheadThreshold(batches: Long): Unit = {}
}
| mdzimmerman/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/util/EmptyScanner.scala | Scala | apache-2.0 | 2,053 |
package com.akkademy
case class ParseArticle(uri: String)
case class ParseHtmlArticle(url: String, htmlString: String)
case class HttpResponse(body: String)
case class ArticleBody(url: String, body: String)
| josiah14/AkkademyDb | akkademaid/src/main/scala/com/akkademy/Messages.scala | Scala | mit | 208 |
package depsearch.indexer.s3
import java.util.concurrent.Callable
import java.util.concurrent.Executors
import scala.collection.JavaConverters._
import com.amazonaws.services.s3.AmazonS3
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.GetObjectRequest
import com.amazonaws.services.s3.model.S3ObjectSummary
import java.util.concurrent.TimeUnit
import com.amazonaws.services.s3.model.ListObjectsRequest
import S3Lister._
import depsearch.indexer.IvyDependencyParser
import depsearch.db.DependencyDB
import depsearch.common.model.Dependency
import depsearch.db.mongo.MongoDependencyDB
import java.io.InputStream
class S3Indexer(db: DependencyDB) {
val s3: AmazonS3 = new AmazonS3Client
val executor = Executors.newFixedThreadPool(5)
def index(bucket: String, prefix: Option[String] = None) {
val r = new ListObjectsRequest().withBucketName(bucket).withPrefix(prefix.getOrElse(null))
s3.listBatch(r) { list =>
list.grouped(100) foreach { g =>
executor.submit(new IndexWorker(s3, db, g))
}
}
executor.shutdown()
executor.awaitTermination(10, TimeUnit.MINUTES)
db.setLastUpdated(new java.util.Date)
}
class IndexWorker(s3: AmazonS3, db: DependencyDB, list: Iterable[S3ObjectSummary]) extends Callable[Boolean] {
val parser = new IvyDependencyParser
val ivyFilePattern = """.*/ivy-[^/]+.xml$""".r
private def getObject(o: S3ObjectSummary): InputStream = {
val obj = s3.getObject(new GetObjectRequest(o.getBucketName, o.getKey))
return obj.getObjectContent
}
def call(): Boolean = {
for (elem <- list) {
if (ivyFilePattern.findFirstIn(elem.getKey()).isDefined) {
val in = getObject(elem)
try {
db.update(parser.parse(in))
} catch {
case e: Exception => {
System.err.println("+" * 50)
System.err.println(elem.getKey() + ", " + e.getMessage())
System.err.println("+" * 50)
}
} finally {
in.close()
}
}
}
true
}
}
}
object S3Indexer {
def main(args: Array[String]) {
val db = MongoDependencyDB()
val bucket = args(0)
val prefix = if (args.length > 1) Some(args(1)) else None
new S3Indexer(db).index(bucket, prefix)
}
} | ogrodnek/dependency-repository-indexer | src/main/scala/depsearch/indexer/s3/S3Indexer.scala | Scala | apache-2.0 | 2,397 |
package org.openurp.edu.eams.teach.planaudit.service.listeners
import org.beangle.commons.collection.Collections
import org.openurp.edu.base.Course
import org.openurp.edu.base.code.CourseType
import org.openurp.edu.teach.grade.CourseGrade
import org.openurp.edu.teach.planaudit.CourseAuditResult
import org.openurp.edu.teach.planaudit.GroupAuditResult
import org.openurp.edu.teach.planaudit.PlanAuditResult
import org.openurp.edu.teach.planaudit.model.CourseAuditResultBean
import org.openurp.edu.teach.planaudit.model.GroupAuditResultBean
import org.openurp.edu.eams.teach.planaudit.service.PlanAuditContext
import org.openurp.edu.eams.teach.planaudit.service.PlanAuditListener
import org.openurp.edu.eams.teach.planaudit.service.StdGrade
import org.openurp.edu.teach.plan.CourseGroup
import org.openurp.edu.teach.plan.PlanCourse
class PlanAuditCommonElectiveListener extends PlanAuditListener {
def endPlanAudit(context: PlanAuditContext) {
val result = context.result
val stdGrade = context.stdGrade
val electiveType = context.standard.convertTarCourseType
if (null == electiveType) return
var groupResult = result.groupResult(electiveType)
if (null == groupResult) {
val groupRs = new GroupAuditResultBean()
groupRs.courseType = electiveType
groupRs.name = electiveType.name
groupRs.groupNum = -1
groupResult = groupRs
result.addGroupResult(groupResult)
}
val restCourses = stdGrade.getRestCourses
for (course <- restCourses) {
val courseResult = new CourseAuditResultBean()
courseResult.course = course
val grades = stdGrade.useGrades(course)
if (!grades.isEmpty &&
grades(0).courseType.id != electiveType.id) {
courseResult.remark = "计划外"
}
courseResult.checkPassed(grades)
groupResult.addCourseResult(courseResult)
}
processConvertCredits(groupResult, result, context)
groupResult.checkPassed(true)
}
protected def processConvertCredits(target: GroupAuditResult, result: PlanAuditResult, context: PlanAuditContext) {
val parents = Collections.newSet[GroupAuditResult]
val sibling = Collections.newSet[GroupAuditResult]
var start = target.parent
while (null != start && !parents.contains(start)) {
parents.add(start)
start = start.parent
}
val parent = target.parent
if (null != parent) {
sibling ++= parent.children
sibling.remove(target)
}
var otherConverted = 0f
var siblingConverted = 0f
for (gr <- result.groupResults) {
val convertable = context.standard.isConvertable(gr.courseType) || (gr != target && !parents.contains(gr))
if (convertable) {
if (sibling.contains(gr)) {
siblingConverted += (if (gr.passed) (gr.auditStat.creditsCompleted - gr.auditStat.creditsRequired) else 0f)
} else if (null == gr.parent) {
otherConverted += (if (gr.passed) gr.auditStat.creditsCompleted - gr.auditStat.creditsRequired else 0f)
}
}
}
target.auditStat.creditsConverted = (otherConverted + siblingConverted)
for (r <- parents) r.auditStat.creditsConverted = otherConverted
}
def startPlanAudit(context: PlanAuditContext): Boolean = true
def startCourseAudit(context: PlanAuditContext, groupResult: GroupAuditResult, planCourse: PlanCourse): Boolean = {
true
}
def startGroupAudit(context: PlanAuditContext, courseGroup: CourseGroup, groupResult: GroupAuditResult): Boolean = {
true
}
}
| openurp/edu-eams-webapp | core/src/main/scala/org/openurp/edu/eams/teach/planaudit/service/listeners/PlanAuditCommonElectiveListener.scala | Scala | gpl-3.0 | 3,509 |
package miniconf.server
import akka.actor.{ActorRef, ActorSystem}
/**
* Created by wenzhi.bwz on 14-7-18.
*/
object MainApp {
def main(args : Array[String]) : Unit = {
start()
}
def start() {
try {
val system = ActorSystem("miniconf-system")
val miniconfConf = system.settings.config.getConfig("miniconf")
val interface: String = miniconfConf.getString("httpService.interface")
val port: Int = miniconfConf.getInt("httpService.port")
system.actorOf(MainHttpService.props(interface, port), "mainHttpService")
} catch {
case e:Throwable => e.printStackTrace()
}
}
}
| netcomm/miniconf | src/main/scala/miniconf/server/MainApp.scala | Scala | apache-2.0 | 631 |
package org.genericConfig.admin.shared.configTree
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 02.06.2020
*/
case class ConfigTreeItemDTO(
configProperties : Option[ConfigTreeItemConfigPropDTO],
userProperties : Option[ConfigTreeItemUserPropDTO]
)
| gennadij/admin | shared/src/main/scala/org/genericConfig/admin/shared/configTree/ConfigTreeItemDTO.scala | Scala | apache-2.0 | 391 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import com.mongodb.{WriteConcern => JWriteConcern}
/**
* Controls the acknowledgment of write operations with various options.
*
* ==`w`==
* - 0: Don't wait for acknowledgement from the server
* - 1: Wait for acknowledgement, but don't wait for secondaries to replicate
* - >=2: Wait for one or more secondaries to also acknowledge
* - "majority": Wait for a majority of secondaries to also acknowledge
* - "<tag set name>": Wait for one or more secondaries to also acknowledge based on a tag set name
*
* ==`wTimeout` - how long to wait for slaves before failing ==
* - 0: indefinite
* - >0: time to wait in milliseconds
*
* ==Other options:==
*
* - `journal`: If true block until write operations have been committed to the journal. Cannot be used in combination with `fsync`.
* Prior to MongoDB 2.6 this option was ignored if the server was running without journaling. Starting with MongoDB 2.6
* write operations will fail with an exception if this option is used when the server is running without journaling.
*
* == Implicit helper ==
*
* The [[ScalaWriteConcern]] implicit allows for chainable building of the WriteConcern eg:
*
* {{{
* val myWriteConcern = WriteConcern.ACKNOWLEDGED.withJournal(true)).withWTimeout(Duration(10, TimeUnit.MILLISECONDS))
* }}}
*
* @since 1.0
*/
object WriteConcern {
/**
* Write operations that use this write concern will wait for acknowledgement from the primary server before returning. Exceptions are
* raised for network issues, and server errors.
*/
val ACKNOWLEDGED: JWriteConcern = JWriteConcern.ACKNOWLEDGED
/**
* Write operations that use this write concern will wait for acknowledgement from a single member.
*/
val W1: JWriteConcern = apply(1)
/**
* Write operations that use this write concern will wait for acknowledgement from two members.
*/
val W2: JWriteConcern = apply(2)
/**
* Write operations that use this write concern will wait for acknowledgement from three members.
*/
val W3: JWriteConcern = apply(3)
/**
* Write operations that use this write concern will return as soon as the message is written to the socket. Exceptions are raised for
* network issues, but not server errors.
*/
val UNACKNOWLEDGED: JWriteConcern = JWriteConcern.UNACKNOWLEDGED
/**
* Exceptions are raised for network issues, and server errors; the write operation waits for the server to group commit to the journal
* file on disk.
*/
val JOURNALED: JWriteConcern = JWriteConcern.JOURNALED
/**
* Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation.
*/
val MAJORITY: JWriteConcern = JWriteConcern.MAJORITY
/**
* Create a WriteConcern with the set number of acknowledged writes before returning
*
* @param w number of writes
*/
def apply(w: Int): JWriteConcern = new JWriteConcern(w)
/**
* Tag set named write concern or a "majority" write concern.
*
* @param w Write Concern tag set name or "majority", representing the servers to ensure write propagation to before acknowledgment.
* Do not use string representation of integer values for w.
*/
def apply(w: String): JWriteConcern = new JWriteConcern(w)
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/WriteConcern.scala | Scala | apache-2.0 | 3,901 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import scala.util.Try
import scala.util.Success
import scala.util.Failure
class OptionSugarSpec extends UnitSpec with Accumulation with OptionSugar {
"OptionSugar" should "enable toOr to be invoked on Options" in {
Some(12).toOr("won't be used") shouldBe Good(12)
(Some(12): Option[Int]).toOr("won't be used") shouldBe Good(12)
val ex = new Exception("oops")
None.toOr(ex) shouldBe Bad(ex)
None.toOr("oops") shouldBe Bad("oops")
(None: Option[String]).toOr("oops") shouldBe Bad("oops")
}
it should "take a byName for the orElse" in {
var noneChangedThis = false
var someChangedThis = false
None.toOr{noneChangedThis = true; "oops"} shouldBe Bad("oops")
Some(12).toOr{someChangedThis = true; "oops"} shouldBe Good(12)
noneChangedThis shouldBe true
someChangedThis shouldBe false
}
}
| dotty-staging/scalatest | scalactic-test/src/test/scala/org/scalactic/OptionSugarSpec.scala | Scala | apache-2.0 | 1,465 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
// Iglu
import iglu.client.Resolver
// Scalaz
import scalaz._
import Scalaz._
// This project
import loaders.CollectorPayload
import registry.snowplow.{Tp1Adapter => SpTp1Adapter}
import registry.snowplow.{Tp2Adapter => SpTp2Adapter}
import registry.snowplow.{RedirectAdapter => SpRedirectAdapter}
import registry.{
CloudfrontAccessLogAdapter,
IgluAdapter,
CallrailAdapter,
MailchimpAdapter,
MandrillAdapter,
PagerdutyAdapter,
PingdomAdapter
}
/**
* The AdapterRegistry lets us convert a CollectorPayload
* into one or more RawEvents, using a given adapter.
*/
object AdapterRegistry {
private object Vendor {
val Snowplow = "com.snowplowanalytics.snowplow"
val Redirect = "r"
val Iglu = "com.snowplowanalytics.iglu"
val Callrail = "com.callrail"
val Mailchimp = "com.mailchimp"
val Mandrill = "com.mandrill"
val Pagerduty = "com.pagerduty"
val Pingdom = "com.pingdom"
val Cloudfront = "com.amazon.aws.cloudfront"
}
/**
* Router to determine which adapter we use
* to convert the CollectorPayload into
* one or more RawEvents.
*
* @param payload The CollectorPayload we
* are transforming
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation
* @return a Validation boxing either a
* NEL of RawEvents on Success,
* or a NEL of Strings on Failure
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents = (payload.api.vendor, payload.api.version) match {
case (Vendor.Snowplow, "tp1") => SpTp1Adapter.toRawEvents(payload)
case (Vendor.Snowplow, "tp2") => SpTp2Adapter.toRawEvents(payload)
case (Vendor.Redirect, "tp2") => SpRedirectAdapter.toRawEvents(payload)
case (Vendor.Iglu, "v1") => IgluAdapter.toRawEvents(payload)
case (Vendor.Callrail, "v1") => CallrailAdapter.toRawEvents(payload)
case (Vendor.Mailchimp, "v1") => MailchimpAdapter.toRawEvents(payload)
case (Vendor.Mandrill, "v1") => MandrillAdapter.toRawEvents(payload)
case (Vendor.Pagerduty, "v1") => PagerdutyAdapter.toRawEvents(payload)
case (Vendor.Pingdom, "v1") => PingdomAdapter.toRawEvents(payload)
case (Vendor.Cloudfront, "wd_access_log") => CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(payload)
// TODO: add Sendgrid et al
case _ => s"Payload with vendor ${payload.api.vendor} and version ${payload.api.version} not supported by this version of Scala Common Enrich".failNel
}
}
| digitized-io/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/AdapterRegistry.scala | Scala | apache-2.0 | 3,372 |
package com.twitter.zipkin.storage.cassandra
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
import scala.collection._
import java.nio.ByteBuffer
import org.specs.Specification
import org.specs.mock.{JMocker, ClassMocker}
import com.twitter.conversions.time._
import scala.collection.JavaConverters._
import com.twitter.zipkin.gen
import com.twitter.cassie.tests.util.FakeCassandra
import com.twitter.ostrich.admin.RuntimeEnvironment
import com.twitter.util.{Eval, Future}
import java.util.{Set => JSet}
import com.twitter.cassie.{BatchMutationBuilder, Column, Order, ColumnFamily}
import com.twitter.zipkin.config.{CassandraConfig, CassandraIndexConfig}
import com.twitter.io.TempFile
import com.twitter.zipkin.common.{BinaryAnnotation, Annotation, Span, Endpoint}
import com.twitter.zipkin.adapter.ThriftAdapter
class CassandraIndexSpec extends Specification with JMocker with ClassMocker {
object FakeServer extends FakeCassandra
var cassandraIndex: CassandraIndex = null
val ep = Endpoint(123, 123, "service")
def binaryAnnotation(key: String, value: String) =
BinaryAnnotation(key, ByteBuffer.wrap(value.getBytes), ThriftAdapter(gen.AnnotationType.String), Some(ep))
val spanId = 456
val ann1 = Annotation(1, "cs", Some(ep))
val ann2 = Annotation(2, "sr", None)
val ann3 = Annotation(2, "custom", Some(ep))
val ann4 = Annotation(2, "custom", Some(ep))
val span1 = Span(123, "methodcall", spanId, None, List(ann1, ann3),
List(binaryAnnotation("BAH", "BEH")))
val span2 = Span(123, "methodcall", spanId, None, List(ann2),
List(binaryAnnotation("BAH2", "BEH2")))
val span3 = Span(123, "methodcall", spanId, None, List(ann2, ann3, ann4),
List(binaryAnnotation("BAH2", "BEH2")))
val spanEmptySpanName = Span(123, "", spanId, None, List(ann1, ann2), List())
val spanEmptyServiceName = Span(123, "spanname", spanId, None, List(), List())
val mergedSpan = Span(123, "methodcall", spanId, None,
List(ann1, ann2), List(binaryAnnotation("BAH2", "BEH2")))
"CassandraIndex" should {
doBefore {
FakeServer.start()
val test = TempFile.fromResourcePath("/CassandraIndexConfig.scala")
val env = RuntimeEnvironment(this, Array("-f", test.toString))
val config = new Eval().apply[CassandraIndexConfig](env.configFile)
config.cassandraConfig.port = FakeServer.port.get
cassandraIndex = config.apply()
}
doAfter {
cassandraIndex.close()
FakeServer.stop()
}
"index and get span names" in {
cassandraIndex.indexSpanNameByService(span1)()
cassandraIndex.getSpanNames("service")() mustEqual Set(span1.name)
}
"index and get service names" in {
cassandraIndex.indexServiceName(span1)()
cassandraIndex.getServiceNames() mustEqual Set(span1.serviceNames.head)
}
"index only on annotation in each span with the same value" in {
val _annotationsIndex = mock[ColumnFamily[ByteBuffer, Long, Long]]
val batch = mock[BatchMutationBuilder[ByteBuffer, Long, Long]]
val _config = mock[CassandraConfig]
val cs = new CassandraIndex() {
val config = _config
val serviceSpanNameIndex = null
val serviceNameIndex = null
val annotationsIndex = _annotationsIndex
val durationIndex = null
val serviceNames = null
val spanNames = null
}
val col = Column[Long, Long](ann3.timestamp, span3.traceId)
expect {
2.of(_config).tracesTimeToLive willReturn 20.days
one(_annotationsIndex).batch willReturn batch
one(batch).insert(a[ByteBuffer], a[Column[Long, Long]])
allowingMatch(batch, "insert")
one(batch).execute
}
cs.indexSpanByAnnotations(span3)
}
"getTraceIdsByName" in {
var ls = List[Long]()
//cassandra.storeSpan(span1)()
cassandraIndex.indexTraceIdByServiceAndName(span1)()
cassandraIndex.getTraceIdsByName("service", None, 0, 3)() foreach {
_ mustEqual span1.traceId
}
cassandraIndex.getTraceIdsByName("service", Some("methodname"), 0, 3)() foreach {
_ mustEqual span1.traceId
}
}
"getTracesDuration" in {
// no support in FakeCassandra for order and limit and it seems tricky to add
// so will mock the index instead
val _durationIndex = new ColumnFamily[Long, Long, String] {
override def multigetRows(keys: JSet[Long], startColumnName: Option[Long], endColumnName: Option[Long], order: Order, count: Int) = {
if (!order.reversed) {
Future.value(Map(321L -> Map(100L -> Column(100L, "")).asJava).asJava)
} else {
Future.value(Map(321L -> Map(120L -> Column(120L, "")).asJava).asJava)
}
}
}
val cass = new CassandraIndex() {
val config = new CassandraConfig{}
val serviceSpanNameIndex = null
val serviceNameIndex = null
val annotationsIndex = null
val durationIndex = _durationIndex
val serviceNames = null
val spanNames = null
}
val duration = cass.getTracesDuration(Seq(321L))()
duration(0).traceId mustEqual 321L
duration(0).duration mustEqual 20
}
"get no trace durations due to missing data" in {
// no support in FakeCassandra for order and limit and it seems tricky to add
// so will mock the index instead
val _durationIndex = new ColumnFamily[Long, Long, String] {
override def multigetRows(keys: JSet[Long], startColumnName: Option[Long], endColumnName: Option[Long], order: Order, count: Int) = {
if (!order.reversed) {
Future.value(Map(321L -> Map(100L -> Column(100L, "")).asJava).asJava)
} else {
Future.value(Map(321L -> Map[Long, Column[Long,String]]().asJava).asJava)
}
}
}
val cass = new CassandraIndex() {
val config = new CassandraConfig{}
val serviceSpanNameIndex = null
val serviceNameIndex = null
val annotationsIndex = null
val durationIndex = _durationIndex
val serviceNames = null
val spanNames = null
}
val duration = cass.getTracesDuration(Seq(321L))()
duration.isEmpty mustEqual true
}
"getTraceIdsByAnnotation" in {
cassandraIndex.indexSpanByAnnotations(span1)()
// fetch by time based annotation, find trace
var seq = cassandraIndex.getTraceIdsByAnnotation("service", "custom", None, 0, 3)()
seq mustEqual Seq(span1.traceId)
// should not find any traces since the core annotation doesn't exist in index
seq = cassandraIndex.getTraceIdsByAnnotation("service", "cs", None, 0, 3)()
seq.isEmpty mustBe true
// should find traces by the key and value annotation
seq = cassandraIndex.getTraceIdsByAnnotation("service", "BAH",
Some(ByteBuffer.wrap("BEH".getBytes)), 0, 3)()
seq mustEqual Seq(span1.traceId)
}
"not index empty service name" in {
cassandraIndex.indexServiceName(spanEmptyServiceName)
val serviceNames = cassandraIndex.getServiceNames()
serviceNames.isEmpty mustBe true
}
"not index empty span name " in {
cassandraIndex.indexSpanNameByService(spanEmptySpanName)
val spanNames = cassandraIndex.getSpanNames(spanEmptySpanName.name)
spanNames().isEmpty mustBe true
}
}
}
| dsias/zipkin | zipkin-server/src/test/scala/com/twitter/zipkin/storage/cassandra/CassandraIndexSpec.scala | Scala | apache-2.0 | 7,942 |
package pregnaware.frontend.services
import spray.client.pipelining._
import spray.http.Uri.{Authority, Host, Path}
import spray.http.{HttpMethod, HttpRequest, HttpResponse, Uri}
import spray.httpx.RequestBuilding.RequestBuilder
import pregnaware.utils.ConsulWrapper._
import pregnaware.utils.ExecutionActorWrapper
import scala.concurrent.Future
/** Common functions required by the clients of the back-end */
abstract class BackEndFuncs(serviceName: String) extends ExecutionActorWrapper {
/** Sends a request with no content to the server */
def send(method: HttpMethod, requestPath: String): Future[HttpResponse] = {
send(method, requestPath, (builder, uri) => builder(uri))
}
/* Sends the content request to the server */
def send[T](
method: HttpMethod,
requestPath: String,
buildRequest: (RequestBuilder, Uri) => HttpRequest): Future[HttpResponse] = {
send(requestPath, uri => buildRequest(new RequestBuilder(method), uri))
}
/* Sends the content request to the server (supporting specific HTTP Request construction) */
def send[T](
requestPath: String,
buildRequest: (Uri) => HttpRequest): Future[HttpResponse] = {
getAddress(serviceName).flatMap { address =>
val requestUri = Uri(
scheme = "http",
authority = Authority(Host(address.getHostName), address.getPort),
path = Path(s"/$serviceName/$requestPath")
)
buildRequest(requestUri) ~> sendReceive
}
}
}
| jds106/pregnaware | service/src/main/scala/pregnaware/frontend/services/BackEndFuncs.scala | Scala | mit | 1,469 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.framework.service
import java.util.UUID
import java.util.concurrent.atomic.AtomicInteger
import org.maproulette.framework.model._
import org.maproulette.framework.psql.{GroupField, Grouping, Query}
import org.maproulette.framework.util.{LeaderboardTag, FrameworkHelper}
import org.maproulette.framework.repository.UserRepository
import org.maproulette.models.dal.{ChallengeDAL, TaskDAL}
import org.maproulette.session.{SearchParameters, SearchLeaderboardParameters}
import play.api.Application
import javax.inject.Inject
import org.joda.time.DateTime
import play.api.db.Database
import org.maproulette.jobs.utils.LeaderboardHelper
import anorm.SqlParser._
import anorm._
import org.maproulette.Config
/**
* @author krotstan
*/
class LeaderboardServiceSpec(implicit val application: Application) extends FrameworkHelper {
val service: LeaderboardService = this.serviceManager.leaderboard
var randomUser: User = null
var challenge: Challenge = null
"LeaderboardService" should {
"get mapper leaderboard" taggedAs (LeaderboardTag) in {
val params = SearchLeaderboardParameters(onlyEnabled = false)
val results = this.service.getMapperLeaderboard(params)
results.size mustEqual 2
// Verify top challenges were populated
results.head.topChallenges.size mustEqual 1
results.head.topChallenges.head.id mustEqual challenge.id
// Verify top challenges were not populated when onlyEnabled
val params2 = SearchLeaderboardParameters(onlyEnabled = true)
val results2 = this.service.getMapperLeaderboard(params2)
results2.size mustEqual 2
results2.head.topChallenges.size mustEqual 0
// With Challenge Filter
val cParams = SearchLeaderboardParameters(challengeFilter = Some(List(challenge.id)))
val cResults = this.service.getMapperLeaderboard(cParams)
cResults.size mustEqual 2
val cParams2 = SearchLeaderboardParameters(challengeFilter = Some(List(987654)))
val cResults2 = this.service.getMapperLeaderboard(cParams2)
cResults2.size mustEqual 0
// With Project Filter
val pParams =
SearchLeaderboardParameters(projectFilter = Some(List(challenge.general.parent)))
val pResults =
this.service.getMapperLeaderboard(pParams)
pResults.size mustEqual 2
val pParams2 = SearchLeaderboardParameters(projectFilter = Some(List(987654)))
val pResults2 = this.service.getMapperLeaderboard(pParams2)
pResults2.size mustEqual 2
// With User Filter
val uParams = SearchLeaderboardParameters(userFilter = Some(List(randomUser.id)))
val uResults = this.service.getMapperLeaderboard(uParams)
uResults.size mustEqual 1
// By start and end date
val dParams = SearchLeaderboardParameters(
onlyEnabled = false,
start = Some(new DateTime().minusMonths(2)),
end = Some(new DateTime)
)
val dateResults = this.service.getMapperLeaderboard(dParams)
dateResults.size mustEqual 2
// By Country code
val ccParams =
SearchLeaderboardParameters(countryCodeFilter = Some(List("AR")), monthDuration = Some(3))
val ccResults = this.service
.getMapperLeaderboard(ccParams)
ccResults.size mustEqual 2
}
"get leaderboard for user" taggedAs (LeaderboardTag) in {
val results = this.service.getLeaderboardForUser(randomUser.id, SearchLeaderboardParameters())
results.size mustEqual 1
results.head.userId mustEqual randomUser.id
}
"get leaderboard for user with bracketing" taggedAs (LeaderboardTag) in {
val results = this.service
.getLeaderboardForUser(randomUser.id, SearchLeaderboardParameters(), bracket = 1)
results.size mustEqual 2
results.head.userId mustEqual randomUser.id
}
"get reviewer leaderboard" taggedAs (LeaderboardTag) in {
val cParams = SearchLeaderboardParameters(challengeFilter = Some(List(challenge.id)))
val cResults = this.service.getReviewerLeaderboard(cParams)
cResults.size mustEqual 1
val cParams2 = SearchLeaderboardParameters(challengeFilter = Some(List(987654)))
val cResults2 = this.service.getReviewerLeaderboard(cParams2)
cResults2.size mustEqual 0
// With Project Filter
val pParams =
SearchLeaderboardParameters(projectFilter = Some(List(challenge.general.parent)))
val pResults = this.service.getReviewerLeaderboard(pParams)
pResults.size mustEqual 1
}
}
override implicit val projectTestName: String = "LeaderboardServiceSpecProject"
override protected def beforeAll(): Unit = {
super.beforeAll()
val userRepository = this.application.injector.instanceOf(classOf[UserRepository])
val (u, c) = LeaderboardServiceSpec.setup(
this.challengeDAL,
this.taskDAL,
this.serviceManager,
this.defaultProject.id,
this.getTestTask,
this.getTestUser,
userRepository
)
randomUser = u
challenge = c
val db = this.application.injector.instanceOf(classOf[Database])
val config = this.application.injector.instanceOf(classOf[Config])
db.withConnection { implicit c =>
// Past 6 Months
SQL(LeaderboardHelper.rebuildChallengesLeaderboardSQL(6, config)).executeUpdate()
SQL(
LeaderboardHelper
.rebuildChallengesLeaderboardSQLCountry(3, "AR", "-73.42, -55.25, -53.63, -21.83", config)
).executeUpdate()
}
}
}
object LeaderboardServiceSpec {
var challengeDAL: ChallengeDAL = null
var taskDAL: TaskDAL = null
var serviceManager: ServiceManager = null
var taskFunc: (String, Long) => Task = null
var userFunc: (Long, String) => User = null
var userRepository: UserRepository = null
def setup(
challengeDAL: ChallengeDAL,
taskDAL: TaskDAL,
serviceManager: ServiceManager,
projectId: Long,
taskFunc: (String, Long) => Task,
userFunc: (Long, String) => User,
userRepository: UserRepository
): (User, Challenge) = {
this.challengeDAL = challengeDAL
this.taskDAL = taskDAL
this.serviceManager = serviceManager
this.taskFunc = taskFunc
this.userFunc = userFunc
this.userRepository = userRepository
val createdChallenge = challengeDAL
.insert(
Challenge(
-1,
"leaderboardChallenge",
null,
null,
general = ChallengeGeneral(
User.superUser.osmProfile.id,
projectId,
"TestChallengeInstruction"
),
creation = ChallengeCreation(),
priority = ChallengePriority(),
extra = ChallengeExtra()
),
User.superUser
)
val randomUser = completeTask(
createdChallenge.id,
Task.STATUS_FIXED,
"randomUser"
)
completeTask(
createdChallenge.id,
Task.STATUS_ALREADY_FIXED,
"randomUser2",
false,
true
)
// User that has opted out.
completeTask(
createdChallenge.id,
Task.STATUS_FIXED,
"hiddenUser",
true
)
(randomUser, createdChallenge)
}
private val counter = new AtomicInteger(9000)
private def completeTask(
challengeId: Long,
taskStatus: Int,
username: String,
optOut: Boolean = false,
addReview: Boolean = false
): User = {
val task = this.taskDAL
.insert(
this.taskFunc(UUID.randomUUID().toString, challengeId),
User.superUser
)
var randomUser = serviceManager.user.create(
this.userFunc(counter.getAndIncrement(), username),
User.superUser
)
if (optOut) {
randomUser =
randomUser.copy(settings = randomUser.settings.copy(leaderboardOptOut = Some(true)))
this.userRepository.update(randomUser, "POINT (14.0 22.0)")
}
this.taskDAL.setTaskStatus(List(task), taskStatus, randomUser, Some(true))
if (addReview) {
val refreshedTask = taskDAL.retrieveById(task.id).get
serviceManager.taskReview.setTaskReviewStatus(
refreshedTask,
Task.REVIEW_STATUS_APPROVED,
User.superUser,
None
)
}
randomUser
}
}
| mgcuthbert/maproulette2 | test/org/maproulette/framework/service/LeaderboardServiceSpec.scala | Scala | apache-2.0 | 8,398 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.eventhubscommon.progress
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataOutputStream, Path}
import org.apache.spark.eventhubscommon.EventHubNameAndPartition
import org.apache.spark.internal.Logging
private[spark] class ProgressWriter(
streamId: Int,
uid: String,
eventHubNameAndPartition: EventHubNameAndPartition,
timestamp: Long,
hadoopConfiguration: Configuration,
progressDir: String,
subDirIdentifiers: String*) extends Logging {
// TODO: Why can't we get this info from one of the ProgressTrackers?
// TODO: Come up with better name for this guy
private val tempProgressTrackingPointStr =
PathTools.makeTempDirectoryStr(progressDir, subDirIdentifiers: _*) + "/" +
PathTools.makeTempFileName(streamId, uid, eventHubNameAndPartition, timestamp)
// TODO: Why can't we get this info from one of the ProgressTrackers?
// TODO: Come up with better name for this guy
private[spark] val tempProgressTrackingPointPath = new Path(tempProgressTrackingPointStr)
def write(recordTime: Long, cpOffset: Long, cpSeq: Long): Unit = {
val fs = tempProgressTrackingPointPath.getFileSystem(hadoopConfiguration)
var cpFileStream: FSDataOutputStream = null
try {
// it would be safe to overwrite checkpoint, since we will not start a new job when
// checkpoint hasn't been committed
cpFileStream = fs.create(tempProgressTrackingPointPath, true)
val record = ProgressRecord(recordTime, uid,
eventHubNameAndPartition.eventHubName, eventHubNameAndPartition.partitionId, cpOffset,
cpSeq)
cpFileStream.writeBytes(s"$record")
} catch {
case ioe: IOException =>
ioe.printStackTrace()
throw ioe
} finally {
if (cpFileStream != null) {
cpFileStream.close()
}
}
}
}
| CodingCat/spark-eventhubs | core/src/main/scala/org/apache/spark/eventhubscommon/progress/ProgressWriter.scala | Scala | apache-2.0 | 2,708 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import junit.framework.Assert._
import kafka.zk.ZooKeeperTestHarness
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.message.{Message, ByteBufferMessageSet}
import scala.collection._
import kafka.utils.Utils
import kafka.utils.{TestZKUtils, TestUtils}
import org.scalatest.junit.JUnit3Suite
import org.apache.log4j.{Level, Logger}
class ZookeeperConsumerConnectorTest extends JUnit3Suite with KafkaServerTestHarness with ZooKeeperTestHarness {
private val logger = Logger.getLogger(getClass())
val zookeeperConnect = TestZKUtils.zookeeperConnect
val zkConnect = zookeeperConnect
val numNodes = 2
val numParts = 2
val topic = "topic1"
val configs =
for(props <- TestUtils.createBrokerConfigs(numNodes))
yield new KafkaConfig(props) {
override val enableZookeeper = true
override val numPartitions = numParts
override val zkConnect = zookeeperConnect
}
val group = "group1"
val consumer0 = "consumer0"
val consumer1 = "consumer1"
val consumer2 = "consumer2"
val consumer3 = "consumer3"
val nMessages = 2
def testBasic() {
val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandlers])
requestHandlerLogger.setLevel(Level.FATAL)
var actualMessages: List[Message] = Nil
// test consumer timeout logic
val consumerConfig0 = new ConsumerConfig(
TestUtils.createConsumerProperties(zkConnect, group, consumer0)) {
override val consumerTimeoutMs = 200
}
val zkConsumerConnector0 = new ZookeeperConsumerConnector(consumerConfig0, true)
val topicMessageStreams0 = zkConsumerConnector0.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2))
try {
getMessages(nMessages*2, topicMessageStreams0)
fail("should get an exception")
}
catch {
case e: ConsumerTimeoutException => // this is ok
case e => throw e
}
zkConsumerConnector0.shutdown
// send some messages to each broker
val sentMessages1 = sendMessages(nMessages, "batch1")
// create a consumer
val consumerConfig1 = new ConsumerConfig(
TestUtils.createConsumerProperties(zkConnect, group, consumer1))
val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true)
val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2))
val receivedMessages1 = getMessages(nMessages*2, topicMessageStreams1)
assertEquals(sentMessages1, receivedMessages1)
// commit consumed offsets
zkConsumerConnector1.commitOffsets
// create a consumer
val consumerConfig2 = new ConsumerConfig(
TestUtils.createConsumerProperties(zkConnect, group, consumer2))
val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true)
val topicMessageStreams2 = zkConsumerConnector2.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2))
// send some messages to each broker
val sentMessages2 = sendMessages(nMessages, "batch2")
Thread.sleep(200)
val receivedMessages2_1 = getMessages(nMessages, topicMessageStreams1)
val receivedMessages2_2 = getMessages(nMessages, topicMessageStreams2)
val receivedMessages2 = (receivedMessages2_1 ::: receivedMessages2_2).sortWith((s,t) => s.checksum < t.checksum)
assertEquals(sentMessages2, receivedMessages2)
// create a consumer with empty map
val consumerConfig3 = new ConsumerConfig(
TestUtils.createConsumerProperties(zkConnect, group, consumer3))
val zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3, true)
val topicMessageStreams3 = zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int]())
// send some messages to each broker
Thread.sleep(200)
val sentMessages3 = sendMessages(nMessages, "batch3")
Thread.sleep(200)
val receivedMessages3_1 = getMessages(nMessages, topicMessageStreams1)
val receivedMessages3_2 = getMessages(nMessages, topicMessageStreams2)
val receivedMessages3 = (receivedMessages3_1 ::: receivedMessages3_2).sortWith((s,t) => s.checksum < t.checksum)
assertEquals(sentMessages3, receivedMessages3)
zkConsumerConnector1.shutdown
zkConsumerConnector2.shutdown
zkConsumerConnector3.shutdown
logger.info("all consumer connectors stopped")
requestHandlerLogger.setLevel(Level.ERROR)
}
def sendMessages(messagesPerNode: Int, header: String): List[Message]= {
var messages: List[Message] = Nil
for(conf <- configs) {
val producer = TestUtils.createProducer("localhost", conf.port)
for (partition <- 0 until numParts) {
val ms = 0.until(messagesPerNode).map(x =>
new Message((header + conf.brokerId + "-" + partition + "-" + x).getBytes)).toArray
val mSet = new ByteBufferMessageSet(ms: _*)
for (message <- ms)
messages ::= message
producer.send(topic, partition, mSet)
}
producer.close()
}
messages.sortWith((s,t) => s.checksum < t.checksum)
}
def getMessages(nMessagesPerThread: Int, topicMessageStreams: Map[String,List[KafkaMessageStream]]): List[Message]= {
var messages: List[Message] = Nil
for ((topic, messageStreams) <- topicMessageStreams) {
for (messageStream <- messageStreams) {
val iterator = messageStream.iterator
for (i <- 0 until nMessagesPerThread) {
assertTrue(iterator.hasNext)
val message = iterator.next
messages ::= message
logger.debug("received message: " + Utils.toString(message.payload, "UTF-8"))
}
}
}
messages.sortWith((s,t) => s.checksum < t.checksum)
}
} | quipo/kafka | core/src/test/scala/unit/kafka/consumer/ZookeeperConsumerConnectorTest.scala | Scala | apache-2.0 | 6,312 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
import scala.language.reflectiveCalls
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.examples.mllib.AbstractParams
import org.apache.spark.ml.{Pipeline, PipelineStage, Transformer}
import org.apache.spark.ml.classification.{DecisionTreeClassificationModel, DecisionTreeClassifier}
import org.apache.spark.ml.feature.{VectorIndexer, StringIndexer}
import org.apache.spark.ml.regression.{DecisionTreeRegressionModel, DecisionTreeRegressor}
import org.apache.spark.ml.util.MetadataUtils
import org.apache.spark.mllib.evaluation.{RegressionMetrics, MulticlassMetrics}
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{SQLContext, DataFrame}
/**
* An example runner for decision trees. Run with
* {{{
* ./bin/run-example ml.DecisionTreeExample [options]
* }}}
* Note that Decision Trees can take a large amount of memory. If the run-example command above
* fails, try running via spark-submit and specifying the amount of memory as at least 1g.
* For local mode, run
* {{{
* ./bin/spark-submit --class org.apache.spark.examples.ml.DecisionTreeExample --driver-memory 1g
* [examples JAR path] [options]
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*/
object DecisionTreeExample {
case class Params(
input: String = null,
testInput: String = "",
dataFormat: String = "libsvm",
algo: String = "Classification",
maxDepth: Int = 5,
maxBins: Int = 32,
minInstancesPerNode: Int = 1,
minInfoGain: Double = 0.0,
fracTest: Double = 0.2,
cacheNodeIds: Boolean = false,
checkpointDir: Option[String] = None,
checkpointInterval: Int = 10) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DecisionTreeExample") {
head("DecisionTreeExample: an example decision tree app.")
opt[String]("algo")
.text(s"algorithm (classification, regression), default: ${defaultParams.algo}")
.action((x, c) => c.copy(algo = x))
opt[Int]("maxDepth")
.text(s"max depth of the tree, default: ${defaultParams.maxDepth}")
.action((x, c) => c.copy(maxDepth = x))
opt[Int]("maxBins")
.text(s"max number of bins, default: ${defaultParams.maxBins}")
.action((x, c) => c.copy(maxBins = x))
opt[Int]("minInstancesPerNode")
.text(s"min number of instances required at child nodes to create the parent split," +
s" default: ${defaultParams.minInstancesPerNode}")
.action((x, c) => c.copy(minInstancesPerNode = x))
opt[Double]("minInfoGain")
.text(s"min info gain required to create a split, default: ${defaultParams.minInfoGain}")
.action((x, c) => c.copy(minInfoGain = x))
opt[Double]("fracTest")
.text(s"fraction of data to hold out for testing. If given option testInput, " +
s"this option is ignored. default: ${defaultParams.fracTest}")
.action((x, c) => c.copy(fracTest = x))
opt[Boolean]("cacheNodeIds")
.text(s"whether to use node Id cache during training, " +
s"default: ${defaultParams.cacheNodeIds}")
.action((x, c) => c.copy(cacheNodeIds = x))
opt[String]("checkpointDir")
.text(s"checkpoint directory where intermediate node Id caches will be stored, " +
s"default: ${defaultParams.checkpointDir match {
case Some(strVal) => strVal
case None => "None"
}}")
.action((x, c) => c.copy(checkpointDir = Some(x)))
opt[Int]("checkpointInterval")
.text(s"how often to checkpoint the node Id cache, " +
s"default: ${defaultParams.checkpointInterval}")
.action((x, c) => c.copy(checkpointInterval = x))
opt[String]("testInput")
.text(s"input path to test dataset. If given, option fracTest is ignored." +
s" default: ${defaultParams.testInput}")
.action((x, c) => c.copy(testInput = x))
opt[String]("dataFormat")
.text("data format: libsvm (default), dense (deprecated in Spark v1.1)")
.action((x, c) => c.copy(dataFormat = x))
arg[String]("<input>")
.text("input path to labeled examples")
.required()
.action((x, c) => c.copy(input = x))
checkConfig { params =>
if (params.fracTest < 0 || params.fracTest >= 1) {
failure(s"fracTest ${params.fracTest} value incorrect; should be in [0,1).")
} else {
success
}
}
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
/** Load a dataset from the given path, using the given format */
private[ml] def loadData(
sqlContext: SQLContext,
path: String,
format: String,
expectedNumFeatures: Option[Int] = None): DataFrame = {
import sqlContext.implicits._
format match {
case "dense" => MLUtils.loadLabeledPoints(sqlContext.sparkContext, path).toDF()
case "libsvm" => expectedNumFeatures match {
case Some(numFeatures) => sqlContext.read.option("numFeatures", numFeatures.toString)
.format("libsvm").load(path)
case None => sqlContext.read.format("libsvm").load(path)
}
case _ => throw new IllegalArgumentException(s"Bad data format: $format")
}
}
/**
* Load training and test data from files.
* @param input Path to input dataset.
* @param dataFormat "libsvm" or "dense"
* @param testInput Path to test dataset.
* @param algo Classification or Regression
* @param fracTest Fraction of input data to hold out for testing. Ignored if testInput given.
* @return (training dataset, test dataset)
*/
private[ml] def loadDatasets(
sc: SparkContext,
input: String,
dataFormat: String,
testInput: String,
algo: String,
fracTest: Double): (DataFrame, DataFrame) = {
val sqlContext = new SQLContext(sc)
// Load training data
val origExamples: DataFrame = loadData(sqlContext, input, dataFormat)
// Load or create test set
val dataframes: Array[DataFrame] = if (testInput != "") {
// Load testInput.
val numFeatures = origExamples.first().getAs[Vector](1).size
val origTestExamples: DataFrame =
loadData(sqlContext, testInput, dataFormat, Some(numFeatures))
Array(origExamples, origTestExamples)
} else {
// Split input into training, test.
origExamples.randomSplit(Array(1.0 - fracTest, fracTest), seed = 12345)
}
val training = dataframes(0).cache()
val test = dataframes(1).cache()
val numTraining = training.count()
val numTest = test.count()
val numFeatures = training.select("features").first().getAs[Vector](0).size
println("Loaded data:")
println(s" numTraining = $numTraining, numTest = $numTest")
println(s" numFeatures = $numFeatures")
(training, test)
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"DecisionTreeExample with $params")
val sc = new SparkContext(conf)
params.checkpointDir.foreach(sc.setCheckpointDir)
val algo = params.algo.toLowerCase
println(s"DecisionTreeExample with parameters:\\n$params")
// Load training and test data and cache it.
val (training: DataFrame, test: DataFrame) =
loadDatasets(sc, params.input, params.dataFormat, params.testInput, algo, params.fracTest)
// Set up Pipeline
val stages = new mutable.ArrayBuffer[PipelineStage]()
// (1) For classification, re-index classes.
val labelColName = if (algo == "classification") "indexedLabel" else "label"
if (algo == "classification") {
val labelIndexer = new StringIndexer()
.setInputCol("label")
.setOutputCol(labelColName)
stages += labelIndexer
}
// (2) Identify categorical features using VectorIndexer.
// Features with more than maxCategories values will be treated as continuous.
val featuresIndexer = new VectorIndexer()
.setInputCol("features")
.setOutputCol("indexedFeatures")
.setMaxCategories(10)
stages += featuresIndexer
// (3) Learn Decision Tree
val dt = algo match {
case "classification" =>
new DecisionTreeClassifier()
.setFeaturesCol("indexedFeatures")
.setLabelCol(labelColName)
.setMaxDepth(params.maxDepth)
.setMaxBins(params.maxBins)
.setMinInstancesPerNode(params.minInstancesPerNode)
.setMinInfoGain(params.minInfoGain)
.setCacheNodeIds(params.cacheNodeIds)
.setCheckpointInterval(params.checkpointInterval)
case "regression" =>
new DecisionTreeRegressor()
.setFeaturesCol("indexedFeatures")
.setLabelCol(labelColName)
.setMaxDepth(params.maxDepth)
.setMaxBins(params.maxBins)
.setMinInstancesPerNode(params.minInstancesPerNode)
.setMinInfoGain(params.minInfoGain)
.setCacheNodeIds(params.cacheNodeIds)
.setCheckpointInterval(params.checkpointInterval)
case _ => throw new IllegalArgumentException("Algo ${params.algo} not supported.")
}
stages += dt
val pipeline = new Pipeline().setStages(stages.toArray)
// Fit the Pipeline
val startTime = System.nanoTime()
val pipelineModel = pipeline.fit(training)
val elapsedTime = (System.nanoTime() - startTime) / 1e9
println(s"Training time: $elapsedTime seconds")
// Get the trained Decision Tree from the fitted PipelineModel
algo match {
case "classification" =>
val treeModel = pipelineModel.stages.last.asInstanceOf[DecisionTreeClassificationModel]
if (treeModel.numNodes < 20) {
println(treeModel.toDebugString) // Print full model.
} else {
println(treeModel) // Print model summary.
}
case "regression" =>
val treeModel = pipelineModel.stages.last.asInstanceOf[DecisionTreeRegressionModel]
if (treeModel.numNodes < 20) {
println(treeModel.toDebugString) // Print full model.
} else {
println(treeModel) // Print model summary.
}
case _ => throw new IllegalArgumentException("Algo ${params.algo} not supported.")
}
// Evaluate model on training, test data
algo match {
case "classification" =>
println("Training data results:")
evaluateClassificationModel(pipelineModel, training, labelColName)
println("Test data results:")
evaluateClassificationModel(pipelineModel, test, labelColName)
case "regression" =>
println("Training data results:")
evaluateRegressionModel(pipelineModel, training, labelColName)
println("Test data results:")
evaluateRegressionModel(pipelineModel, test, labelColName)
case _ =>
throw new IllegalArgumentException("Algo ${params.algo} not supported.")
}
sc.stop()
}
/**
* Evaluate the given ClassificationModel on data. Print the results.
* @param model Must fit ClassificationModel abstraction
* @param data DataFrame with "prediction" and labelColName columns
* @param labelColName Name of the labelCol parameter for the model
*
* TODO: Change model type to ClassificationModel once that API is public. SPARK-5995
*/
private[ml] def evaluateClassificationModel(
model: Transformer,
data: DataFrame,
labelColName: String): Unit = {
val fullPredictions = model.transform(data).cache()
val predictions = fullPredictions.select("prediction").map(_.getDouble(0))
val labels = fullPredictions.select(labelColName).map(_.getDouble(0))
// Print number of classes for reference
val numClasses = MetadataUtils.getNumClasses(fullPredictions.schema(labelColName)) match {
case Some(n) => n
case None => throw new RuntimeException(
"Unknown failure when indexing labels for classification.")
}
val accuracy = new MulticlassMetrics(predictions.zip(labels)).precision
println(s" Accuracy ($numClasses classes): $accuracy")
}
/**
* Evaluate the given RegressionModel on data. Print the results.
* @param model Must fit RegressionModel abstraction
* @param data DataFrame with "prediction" and labelColName columns
* @param labelColName Name of the labelCol parameter for the model
*
* TODO: Change model type to RegressionModel once that API is public. SPARK-5995
*/
private[ml] def evaluateRegressionModel(
model: Transformer,
data: DataFrame,
labelColName: String): Unit = {
val fullPredictions = model.transform(data).cache()
val predictions = fullPredictions.select("prediction").map(_.getDouble(0))
val labels = fullPredictions.select(labelColName).map(_.getDouble(0))
val RMSE = new RegressionMetrics(predictions.zip(labels)).rootMeanSquaredError
println(s" Root mean squared error (RMSE): $RMSE")
}
}
// scalastyle:on println
| chenc10/Spark-PAF | examples/src/main/scala/org/apache/spark/examples/ml/DecisionTreeExample.scala | Scala | apache-2.0 | 14,050 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import java.util.Properties
import java.util.concurrent.LinkedBlockingQueue
import org.apache.kafka.common.protocol.Errors
import org.junit.Assert._
import org.easymock.EasyMock
import org.junit.Test
import kafka.api._
import kafka.cluster.BrokerEndPoint
import kafka.common._
import kafka.message._
import kafka.producer.async._
import kafka.serializer._
import kafka.server.KafkaConfig
import kafka.utils.TestUtils._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import kafka.utils._
class AsyncProducerTest {
// One of the few cases we can just set a fixed port because the producer is mocked out here since this uses mocks
val props = Seq(createBrokerConfig(1, "127.0.0.1:1", port=65534))
val configs = props.map(KafkaConfig.fromProps)
val brokerList = configs.map(c => org.apache.kafka.common.utils.Utils.formatAddress(c.hostName, c.port)).mkString(",")
@Test
def testProducerQueueSize() {
// a mock event handler that blocks
val mockEventHandler = new EventHandler[String,String] {
def handle(events: Seq[KeyedMessage[String,String]]) {
Thread.sleep(500)
}
def close {}
}
val props = new Properties()
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("metadata.broker.list", brokerList)
props.put("producer.type", "async")
props.put("queue.buffering.max.messages", "10")
props.put("batch.num.messages", "1")
props.put("queue.enqueue.timeout.ms", "0")
val config = new ProducerConfig(props)
val produceData = getProduceData(12)
val producer = new Producer[String, String](config, mockEventHandler)
try {
// send all 10 messages, should hit the batch size and then reach broker
producer.send(produceData: _*)
fail("Queue should be full")
}
catch {
case e: QueueFullException => //expected
}finally {
producer.close()
}
}
@Test
def testProduceAfterClosed() {
val produceData = getProduceData(10)
val producer = createProducer[String, String](
brokerList,
encoder = classOf[StringEncoder].getName)
producer.close
try {
producer.send(produceData: _*)
fail("should complain that producer is already closed")
}
catch {
case e: ProducerClosedException => //expected
}
}
@Test
def testBatchSize() {
/**
* Send a total of 10 messages with batch size of 5. Expect 2 calls to the handler, one for each batch.
*/
val producerDataList = getProduceData(10)
val mockHandler = EasyMock.createStrictMock(classOf[DefaultEventHandler[String,String]])
mockHandler.handle(producerDataList.take(5))
EasyMock.expectLastCall
mockHandler.handle(producerDataList.takeRight(5))
EasyMock.expectLastCall
EasyMock.replay(mockHandler)
val queue = new LinkedBlockingQueue[KeyedMessage[String,String]](10)
val producerSendThread =
new ProducerSendThread[String,String]("thread1", queue, mockHandler, Integer.MAX_VALUE, 5, "")
producerSendThread.start()
for (producerData <- producerDataList)
queue.put(producerData)
producerSendThread.shutdown
EasyMock.verify(mockHandler)
}
@Test
def testQueueTimeExpired() {
/**
* Send a total of 2 messages with batch size of 5 and queue time of 200ms.
* Expect 1 calls to the handler after 200ms.
*/
val producerDataList = getProduceData(2)
val mockHandler = EasyMock.createStrictMock(classOf[DefaultEventHandler[String,String]])
mockHandler.handle(producerDataList)
EasyMock.expectLastCall
EasyMock.replay(mockHandler)
val queueExpirationTime = 200
val queue = new LinkedBlockingQueue[KeyedMessage[String,String]](10)
val producerSendThread =
new ProducerSendThread[String,String]("thread1", queue, mockHandler, queueExpirationTime, 5, "")
producerSendThread.start()
for (producerData <- producerDataList)
queue.put(producerData)
Thread.sleep(queueExpirationTime + 100)
EasyMock.verify(mockHandler)
producerSendThread.shutdown
}
@Test
def testPartitionAndCollateEvents() {
val producerDataList = new ArrayBuffer[KeyedMessage[Int,Message]]
// use bogus key and partition key override for some messages
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = 0, message = new Message("msg1".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic2", key = -99, partKey = 1, message = new Message("msg2".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = 2, message = new Message("msg3".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic1", key = -101, partKey = 3, message = new Message("msg4".getBytes)))
producerDataList.append(new KeyedMessage[Int,Message]("topic2", key = 4, message = new Message("msg5".getBytes)))
val props = new Properties()
props.put("metadata.broker.list", brokerList)
val broker1 = new BrokerEndPoint(0, "localhost", 9092)
val broker2 = new BrokerEndPoint(1, "localhost", 9093)
// form expected partitions metadata
val partition1Metadata = new PartitionMetadata(0, Some(broker1), List(broker1, broker2))
val partition2Metadata = new PartitionMetadata(1, Some(broker2), List(broker1, broker2))
val topic1Metadata = new TopicMetadata("topic1", List(partition1Metadata, partition2Metadata))
val topic2Metadata = new TopicMetadata("topic2", List(partition1Metadata, partition2Metadata))
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
topicPartitionInfos.put("topic2", topic2Metadata)
val intPartitioner = new Partitioner {
def partition(key: Any, numPartitions: Int): Int = key.asInstanceOf[Int] % numPartitions
}
val config = new ProducerConfig(props)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[Int,String](config,
partitioner = intPartitioner,
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = new IntEncoder(),
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val topic1Broker1Data =
ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic1", 0, new Message("msg1".getBytes)),
new KeyedMessage[Int,Message]("topic1", 2, new Message("msg3".getBytes)))
val topic1Broker2Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic1", -101, 3, new Message("msg4".getBytes)))
val topic2Broker1Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic2", 4, new Message("msg5".getBytes)))
val topic2Broker2Data = ArrayBuffer[KeyedMessage[Int,Message]](new KeyedMessage[Int,Message]("topic2", -99, 1, new Message("msg2".getBytes)))
val expectedResult = Some(Map(
0 -> Map(
TopicAndPartition("topic1", 0) -> topic1Broker1Data,
TopicAndPartition("topic2", 0) -> topic2Broker1Data),
1 -> Map(
TopicAndPartition("topic1", 1) -> topic1Broker2Data,
TopicAndPartition("topic2", 1) -> topic2Broker2Data)
))
val actualResult = handler.partitionAndCollate(producerDataList)
assertEquals(expectedResult, actualResult)
}
@Test
def testSerializeEvents() {
val produceData = TestUtils.getMsgStrings(5).map(m => new KeyedMessage[String,String]("topic1",m))
val props = new Properties()
props.put("metadata.broker.list", brokerList)
val config = new ProducerConfig(props)
// form expected partitions metadata
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = new StringEncoder,
keyEncoder = new StringEncoder,
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val serializedData = handler.serialize(produceData)
val deserializedData = serializedData.map(d => new KeyedMessage[String,String](d.topic, TestUtils.readString(d.message.payload)))
// Test that the serialize handles seq from a Stream
val streamedSerializedData = handler.serialize(Stream(produceData:_*))
val deserializedStreamData = streamedSerializedData.map(d => new KeyedMessage[String,String](d.topic, TestUtils.readString(d.message.payload)))
TestUtils.checkEquals(produceData.iterator, deserializedData.iterator)
TestUtils.checkEquals(produceData.iterator, deserializedStreamData.iterator)
}
@Test
def testInvalidPartition() {
val producerDataList = new ArrayBuffer[KeyedMessage[String,Message]]
producerDataList.append(new KeyedMessage[String,Message]("topic1", "key1", new Message("msg1".getBytes)))
val props = new Properties()
props.put("metadata.broker.list", brokerList)
val config = new ProducerConfig(props)
// form expected partitions metadata
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = new NegativePartitioner,
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = null.asInstanceOf[Encoder[String]],
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
try {
handler.partitionAndCollate(producerDataList)
}
catch {
// should not throw any exception
case e: Throwable => fail("Should not throw any exception")
}
}
@Test
def testNoBroker() {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
val config = new ProducerConfig(props)
// create topic metadata with 0 partitions
val topic1Metadata = new TopicMetadata("topic1", Seq.empty)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val producerPool = new ProducerPool(config)
val producerDataList = new ArrayBuffer[KeyedMessage[String,String]]
producerDataList.append(new KeyedMessage[String,String]("topic1", "msg1"))
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = new StringEncoder,
keyEncoder = new StringEncoder,
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
try {
handler.handle(producerDataList)
fail("Should fail with FailedToSendMessageException")
}
catch {
case e: FailedToSendMessageException => // we retry on any exception now
}
}
@Test
def testIncompatibleEncoder() {
val props = new Properties()
// no need to retry since the send will always fail
props.put("message.send.max.retries", "0")
val producer= createProducer[String, String](
brokerList = brokerList,
encoder = classOf[DefaultEncoder].getName,
keyEncoder = classOf[DefaultEncoder].getName,
producerProps = props)
try {
producer.send(getProduceData(1): _*)
fail("Should fail with ClassCastException due to incompatible Encoder")
} catch {
case e: ClassCastException =>
}finally {
producer.close()
}
}
@Test
def testRandomPartitioner() {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
val config = new ProducerConfig(props)
// create topic metadata with 0 partitions
val topic1Metadata = getTopicMetadata("topic1", 0, 0, "localhost", 9092)
val topic2Metadata = getTopicMetadata("topic2", 0, 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
topicPartitionInfos.put("topic2", topic2Metadata)
val producerPool = new ProducerPool(config)
val handler = new DefaultEventHandler[String,String](config,
partitioner = null.asInstanceOf[Partitioner],
encoder = null.asInstanceOf[Encoder[String]],
keyEncoder = null.asInstanceOf[Encoder[String]],
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val producerDataList = new ArrayBuffer[KeyedMessage[String,Message]]
producerDataList.append(new KeyedMessage[String,Message]("topic1", new Message("msg1".getBytes)))
producerDataList.append(new KeyedMessage[String,Message]("topic2", new Message("msg2".getBytes)))
producerDataList.append(new KeyedMessage[String,Message]("topic1", new Message("msg3".getBytes)))
val partitionedDataOpt = handler.partitionAndCollate(producerDataList)
partitionedDataOpt match {
case Some(partitionedData) =>
for ((brokerId, dataPerBroker) <- partitionedData) {
for ( (TopicAndPartition(topic, partitionId), dataPerTopic) <- dataPerBroker)
assertTrue(partitionId == 0)
}
case None =>
fail("Failed to collate requests by topic, partition")
}
}
@Test
def testFailedSendRetryLogic() {
val props = new Properties()
props.put("metadata.broker.list", brokerList)
props.put("request.required.acks", "1")
props.put("serializer.class", classOf[StringEncoder].getName.toString)
props.put("key.serializer.class", classOf[NullEncoder[Int]].getName.toString)
props.put("producer.num.retries", 3.toString)
val config = new ProducerConfig(props)
val topic1 = "topic1"
val topic1Metadata = getTopicMetadata(topic1, Array(0, 1), 0, "localhost", 9092)
val topicPartitionInfos = new collection.mutable.HashMap[String, TopicMetadata]
topicPartitionInfos.put("topic1", topic1Metadata)
val msgs = TestUtils.getMsgStrings(2)
// produce request for topic1 and partitions 0 and 1. Let the first request fail
// entirely. The second request will succeed for partition 1 but fail for partition 0.
// On the third try for partition 0, let it succeed.
val request1 = TestUtils.produceRequestWithAcks(List(topic1), List(0, 1), messagesToSet(msgs), acks = 1, correlationId = 11)
val request2 = TestUtils.produceRequestWithAcks(List(topic1), List(0, 1), messagesToSet(msgs), acks = 1, correlationId = 17)
val response1 = ProducerResponse(0,
Map((TopicAndPartition("topic1", 0), ProducerResponseStatus(Errors.NOT_LEADER_FOR_PARTITION.code, 0L)),
(TopicAndPartition("topic1", 1), ProducerResponseStatus(Errors.NONE.code, 0L))))
val request3 = TestUtils.produceRequest(topic1, 0, messagesToSet(msgs), acks = 1, correlationId = 21)
val response2 = ProducerResponse(0,
Map((TopicAndPartition("topic1", 0), ProducerResponseStatus(Errors.NONE.code, 0L))))
val mockSyncProducer = EasyMock.createMock(classOf[SyncProducer])
// don't care about config mock
EasyMock.expect(mockSyncProducer.config).andReturn(EasyMock.anyObject()).anyTimes()
EasyMock.expect(mockSyncProducer.send(request1)).andThrow(new RuntimeException) // simulate SocketTimeoutException
EasyMock.expect(mockSyncProducer.send(request2)).andReturn(response1)
EasyMock.expect(mockSyncProducer.send(request3)).andReturn(response2)
EasyMock.replay(mockSyncProducer)
val producerPool = EasyMock.createMock(classOf[ProducerPool])
EasyMock.expect(producerPool.getProducer(0)).andReturn(mockSyncProducer).times(4)
EasyMock.expect(producerPool.close())
EasyMock.replay(producerPool)
val handler = new DefaultEventHandler[Int,String](config,
partitioner = new FixedValuePartitioner(),
encoder = new StringEncoder(),
keyEncoder = new NullEncoder[Int](),
producerPool = producerPool,
topicPartitionInfos = topicPartitionInfos)
val data = msgs.map(m => new KeyedMessage[Int,String](topic1, 0, m)) ++ msgs.map(m => new KeyedMessage[Int,String](topic1, 1, m))
handler.handle(data)
handler.close()
EasyMock.verify(mockSyncProducer)
EasyMock.verify(producerPool)
}
@Test
def testJavaProducer() {
val topic = "topic1"
val msgs = TestUtils.getMsgStrings(5)
val scalaProducerData = msgs.map(m => new KeyedMessage[String, String](topic, m))
val javaProducerData: java.util.List[KeyedMessage[String, String]] = {
import scala.collection.JavaConversions._
scalaProducerData
}
val mockScalaProducer = EasyMock.createMock(classOf[kafka.producer.Producer[String, String]])
mockScalaProducer.send(scalaProducerData.head)
EasyMock.expectLastCall()
mockScalaProducer.send(scalaProducerData: _*)
EasyMock.expectLastCall()
EasyMock.replay(mockScalaProducer)
val javaProducer = new kafka.javaapi.producer.Producer[String, String](mockScalaProducer)
javaProducer.send(javaProducerData.get(0))
javaProducer.send(javaProducerData)
EasyMock.verify(mockScalaProducer)
}
@Test
def testInvalidConfiguration() {
val props = new Properties()
props.put("serializer.class", "kafka.serializer.StringEncoder")
props.put("producer.type", "async")
try {
new ProducerConfig(props)
fail("should complain about wrong config")
}
catch {
case e: IllegalArgumentException => //expected
}
}
def getProduceData(nEvents: Int): Seq[KeyedMessage[String,String]] = {
val producerDataList = new ArrayBuffer[KeyedMessage[String,String]]
for (i <- 0 until nEvents)
producerDataList.append(new KeyedMessage[String,String]("topic1", null, "msg" + i))
producerDataList
}
private def getTopicMetadata(topic: String, partition: Int, brokerId: Int, brokerHost: String, brokerPort: Int): TopicMetadata = {
getTopicMetadata(topic, List(partition), brokerId, brokerHost, brokerPort)
}
private def getTopicMetadata(topic: String, partition: Seq[Int], brokerId: Int, brokerHost: String, brokerPort: Int): TopicMetadata = {
val broker1 = new BrokerEndPoint(brokerId, brokerHost, brokerPort)
new TopicMetadata(topic, partition.map(new PartitionMetadata(_, Some(broker1), List(broker1))))
}
def messagesToSet(messages: Seq[String]): ByteBufferMessageSet = {
new ByteBufferMessageSet(NoCompressionCodec, messages.map(m => new Message(m.getBytes)): _*)
}
def messagesToSet(key: Array[Byte], messages: Seq[Array[Byte]]): ByteBufferMessageSet = {
new ByteBufferMessageSet(NoCompressionCodec, messages.map(m => new Message(key = key, bytes = m)): _*)
}
}
class NegativePartitioner(props: VerifiableProperties = null) extends Partitioner {
def partition(data: Any, numPartitions: Int): Int = -1
}
| samaitra/kafka | core/src/test/scala/unit/kafka/producer/AsyncProducerTest.scala | Scala | apache-2.0 | 21,324 |
package editor.model
import util.MapTree
import scala.collection.mutable.ListBuffer
class Model {
val world = new MapTree[String, EditableGameObject]
val actions = new ListBuffer[String]
val properties = new ListBuffer[String]
var rootAdded = false
def addToWorld(obj: EditableGameObject) {
if (rootAdded == false) {
assert(obj.name == "root")
assert(obj.parent == "root")
assert(obj.kind == "header")
world.addRoot((obj.name, obj))
rootAdded = true
} else {
world.addChild(obj.parent, (obj.name, obj))
}
}
} | gregwk/clay-pot | Game_Editor/src/editor/model/Model.scala | Scala | mit | 632 |
package hello.world
object ExampleApp extends App {
val memory = Runtime.getRuntime.maxMemory() / (1024L * 1024L)
println(s"Memory $memory m")
println(s"Args: ${args mkString " | "}")
while (true) {
println(s"[${System.currentTimeMillis()}] Hello, world!")
Thread sleep 5000
}
} | fsat/sbt-native-packager | test-project-simple/src/main/scala/ExampleApp.scala | Scala | bsd-2-clause | 300 |
package com.twitter.finagle.thrift
import org.jboss.netty.channel.ChannelPipelineFactory
import com.twitter.finagle.{CodecFactory, ClientCodecConfig}
import org.apache.thrift.protocol.TProtocolFactory
/**
* ThriftClientBufferedCodec implements a buffered thrift transport
* that supports upgrading in order to provide TraceContexts across
* requests.
*/
object ThriftClientBufferedCodec {
/**
* Create a [[com.twitter.finagle.thrift.ThriftClientBufferedCodecFactory]]
*/
def apply() = new ThriftClientBufferedCodecFactory
def apply(protocolFactory: TProtocolFactory) =
new ThriftClientBufferedCodecFactory(protocolFactory)
}
class ThriftClientBufferedCodecFactory(protocolFactory: TProtocolFactory) extends
CodecFactory[ThriftClientRequest, Array[Byte]]#Client
{
def this() = this(Protocols.binaryFactory())
/**
* Create a [[com.twitter.finagle.thrift.ThriftClientBufferedCodec]]
* with a default TBinaryProtocol.
*/
def apply(config: ClientCodecConfig) = {
new ThriftClientBufferedCodec(protocolFactory, config)
}
}
class ThriftClientBufferedCodec(protocolFactory: TProtocolFactory, config: ClientCodecConfig)
extends ThriftClientFramedCodec(protocolFactory, config)
{
override def pipelineFactory = {
val framedPipelineFactory = super.pipelineFactory
new ChannelPipelineFactory {
def getPipeline() = {
val pipeline = framedPipelineFactory.getPipeline
pipeline.replace(
"thriftFrameCodec", "thriftBufferDecoder",
new ThriftBufferDecoder(protocolFactory))
pipeline
}
}
}
}
| JustinTulloss/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ThriftClientBufferedCodec.scala | Scala | apache-2.0 | 1,598 |
package com.lucidchart.open.xtract
import scala.util.control.NonFatal
import scala.xml.NodeSeq
object XmlReader extends DefaultXmlReaders with XmlReaderExtensions {
import cats.Applicative
/**
* Create a new [[XmlReader]] from a function that converts a NodeSeq to a [[ParseResult]].
* @param f A transformation function for the transformation done by the [[XmlReader]]
*/
def apply[A](f: NodeSeq => ParseResult[A]): XmlReader[A] = new XmlReader[A] {
def read(xml: NodeSeq): ParseResult[A] = f(xml)
}
implicit object algebra extends Applicative[XmlReader] {
def pure[A](a: A): XmlReader[A] = new XmlReader[A] {
def read(xml: NodeSeq): ParseResult[A] = ParseSuccess(a)
}
def ap[A, B](ff: XmlReader[A => B])(fa: XmlReader[A]): XmlReader[B] = new XmlReader[B] {
def read(xml: NodeSeq): ParseResult[B] = ParseResult.algebra.ap(ff.read(xml))(fa.read(xml))
}
override def map[A, B](fa: XmlReader[A])(f: A => B): XmlReader[B] = fa.map(f)
override def product[A, B](fa: XmlReader[A], fb: XmlReader[B]): XmlReader[(A,B)] = new XmlReader[(A,B)] {
def read(xml: NodeSeq): ParseResult[(A,B)] = ParseResult.algebra.product(fa.read(xml), fb.read(xml))
}
}
/**
* Get an implicit [[XmlReader]] for a type
* @tparam A The result type of the desired [[XmlReader]]
* @param r The implicit [[XmlReader]] to use.
*/
def of[A](implicit r: XmlReader[A]): XmlReader[A] = r
}
/**
* An abstraction for a function that takes a NodeSeq and returns
* a [[ParseResult]].
*
* It is used to parse XML to arbitrary scala objects, and supports combinatorial syntax
* to easily compose [[XmlReader]]s into new [[XmlReader]]s.
*/
trait XmlReader[+A] { outer =>
/**
* The core operation of an [[XmlReader]], it converts an xml NodeSeq
* into a ParseResult of the desired type.
* @param xml The xml to read from.
* @return The [[ParseResult]] resulting from reading the xml.
*/
def read(xml: NodeSeq): ParseResult[A]
/**
* Map the [[XmlReader]].
* This converts one [[XmlReader]] into another (usually of a different type).
* @param f The mapping function.
* @return A new XmlReader that succeeds with result of calling `f` on the result of
* this if this succeeds.
*/
def map[B](f: A => B): XmlReader[B] = XmlReader{ xml => this.read(xml).map(f)}
/**
* Try to map, and if there is an exception,
* return a failure with the supplied error
* @param fe A function that returns the appropriate [[ParseError]] if mapping failed.
* @param f The mapping function.
* @tparam B The type to map into.
* @return
*/
def tryMap[B](fe: A => ParseError)(f: A => B): XmlReader[B] = XmlReader { xml =>
this.read(xml).flatMap { x =>
try {
ParseSuccess(f(x))
} catch {
case NonFatal(_) => ParseFailure(fe(x))
}
}
}
/**
* Similar to [[map]] but does a flatMap on the [[ParseResult]] rather than
* a map.
*/
def flatMap[B](f: A => XmlReader[B]): XmlReader[B] = XmlReader { xml =>
this.read(xml).flatMap(t => f(t).read(xml))
}
/**
* Filter the result of the [[XmlReader]].
* It filters the resulting [[ParseResult]] after reading.
* @param p The predicate to filter with
*/
def filter(p: A => Boolean): XmlReader[A] = XmlReader { xml => read(xml).filter(p)}
/**
* Similar to [[filter(p:A=>Boolean):*]], but allows you to supply the [[ParseError]]
* to use if the filter test fails.
* @param error The error to use if the filter fails.
* @param p The predicate to filter with
*/
def filter(error: => ParseError)(p: A => Boolean): XmlReader[A] =
XmlReader { xml => read(xml).filter(error)(p) }
/**
* Map a partial function over the [[XmlReader]]. If the partial function isn't
* defined for the input, returns a [[ParseFailure]] containing `otherwise` as its error.
*/
def collect[B](otherwise: => ParseError)(f: PartialFunction[A, B]): XmlReader[B] =
XmlReader { xml => read(xml).collect(otherwise)(f) }
/**
* Map a partial function over the [[XmlReader]]. If the partial function isn't
* defined for the input, returns a [[ParseFailure]].
*/
def collect[B](f: PartialFunction[A, B]): XmlReader[B] =
XmlReader { xml => read(xml).collect(f) }
/**
* @return New [[XmlReader]] that succeeds if either this or `v` succeeds
* on the input. This has preference. If both fail the [[ParseFailure]]
* contains the errors from both.
*/
def or[B >: A](other: XmlReader[B]): XmlReader[B] = new XmlReader[B] {
def read(xml: NodeSeq): ParseResult[B] = {
val r = outer.read(xml)
if (r.isSuccessful) {
r
} else {
val r2 = other.read(xml)
if (r2.isSuccessful) {
r2
} else {
ParseFailure(r.errors ++ r2.errors)
}
}
}
}
/**
* Alias for `or`
*/
def |[B >: A](other: XmlReader[B]) = or(other)
/**
* Like `or` but takes a by-name parameter and doesn't combine errors.
*
* @return New [[XmlReader]] that succeeds if either this or `v` succeeds
* on the input. this has preference.
*/
def orElse[B >: A](v: => XmlReader[B]): XmlReader[B] = XmlReader { xml =>
read(xml).orElse(v.read(xml))
}
/**
* Compose this [[XmlReader]] with another.
* @param r An [[XmlReader]] that returns a NodeSeq result.
* @return New [[XmlReader]] that uses this [[XmlReader]] to read the result of r.
*/
def compose[B <: NodeSeq](r: XmlReader[B]): XmlReader[A] = XmlReader { xml =>
r.read(xml).flatMap(read(_))
}
/**
* Similar to [[compose]] but with the operands reversed.
* @param other The [[XmlReader]] to compose this with.
*/
def andThen[B](other: XmlReader[B])(implicit witness: <:<[A, NodeSeq]): XmlReader[B] = other.compose(this.map(witness))
/**
* Convert to a reader that always succeeds with an option (None if it would have failed). Any errors are dropped
* @return
*/
def optional: XmlReader[Option[A]] = XmlReader { xml =>
ParseSuccess(read(xml).toOption)
}
/**
* Use a default value if unable to parse, always successful, drops any errors
* @param v
* @return
*/
def default[B >: A](v: B): XmlReader[B] = XmlReader { xml =>
ParseSuccess(read(xml).getOrElse(v))
}
/**
* Recover from a failed parse, keeping any errors.
* @param otherwise
* @tparam B
* @return
*/
def recover[B >: A](otherwise: B): XmlReader[B] = XmlReader { xml =>
read(xml).recoverPartial(otherwise)
}
}
| lucidsoftware/xtract | xtract-core/src/main/scala/com/lucidchart/open/xtract/XmlReader.scala | Scala | apache-2.0 | 6,560 |
package goggles.macros.errors
case class ErrorAt[+Type](error: GogglesError[Type], offset: Int) | kenbot/goggles | macros/src/main/scala/goggles/macros/errors/ErrorAt.scala | Scala | mit | 97 |
/*
* Copyright 2017 Max Meldrum
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package zookeeper
import com.typesafe.scalalogging.LazyLogging
import io.circe.Error
import org.apache.curator.framework.{CuratorFramework, CuratorFrameworkFactory}
import org.apache.curator.framework.api.ACLProvider
import org.apache.curator.retry.ExponentialBackoffRetry
import org.apache.zookeeper.{CreateMode, ZooDefs}
import org.apache.zookeeper.data.ACL
import scala.collection.JavaConverters._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
import io.circe.jawn.decode
import io.circe.generic.auto._
import io.circe.syntax._
/** ZooKeeper Client
*
* Uses application.conf and CuratorFramework to build a
* client
*/
trait ZkClient extends ZooKeeperConfig {
val zkRetryPolicy = new ExponentialBackoffRetry(1000, zkMaxReconnections)
val zkCuratorFrameWork = CuratorFrameworkFactory.builder()
.namespace(zkNamespace)
.connectString(zkHost)
.retryPolicy(zkRetryPolicy)
.sessionTimeoutMs(zkConnectionTimeout)
.connectionTimeoutMs(zkSessionTimeout)
.aclProvider(new ACLProvider {
override def getDefaultAcl: java.util.List[ACL] = ZooDefs.Ids.CREATOR_ALL_ACL
override def getAclForPath(path: String): java.util.List[ACL] = ZooDefs.Ids.CREATOR_ALL_ACL
})
.build()
}
object ZkClient extends ZkClient with ZkPaths with LazyLogging {
type ZooKeeperClient = CuratorFramework
type Err = String
type AgentResult = Future[Either[Err, Agent]]
type AgentAlias = String
/** Attempts to connect to the ZooKeeper ensemble
*
* @return True if connected, otherwise false
*/
def connect()(implicit zk: ZooKeeperClient): Boolean = {
zk.start()
// Short sleep time for letting it try to connect
Thread.sleep(500)
isConnected()
}
/** Checks connection to ZooKeeper
*
* @param zk ZooKeeper client
* @return true if connected, false otherwise
*/
def isConnected()(implicit zk: ZooKeeperClient): Boolean =
zk.getZookeeperClient.isConnected
/** Check if znode exists
*
* @param path znode path
* @return True if it exist, otherwise false
*/
def nodeExists(path: String)(implicit zk: ZooKeeperClient): Boolean =
Option(zk.checkExists().forPath(path)).isDefined
/** Creates ZooKeeper znode
*
* @param path target path
* @param zk ZooKeeper client
*/
def createNode(path: String, data: Option[String] = None)(implicit zk: ZooKeeperClient) : Boolean = {
Option(zk.create().creatingParentsIfNeeded().forPath(path, data.getOrElse("").getBytes))
.isDefined
}
/** Deletes ZooKeeper znode
*
* @param path target path
* @param zk ZooKeeper client
*/
def deleteNode(path: String)(implicit zk: ZooKeeperClient): Unit = {
nodeExists(path) match {
case true => zk.delete().deletingChildrenIfNeeded().forPath(path)
case false => logger.info("Tried deleting a non existing path: " + path)
}
}
/** Update znode
*
* @param path target path for znode
* @param data data to set
* @param zk ZooKeeper client
*/
def updateNode(path: String, data: Option[String] = None)(implicit zk: ZooKeeperClient): Unit =
zk.setData().forPath(path, data.getOrElse("").getBytes)
/** Joins SpaceTurtle cluster
*
* @param agent Agent case class which holds all information
* @param zk ZooKeeper client
*/
def joinCluster(agent: Agent)(implicit zk: ZooKeeperClient): Try[Unit] = {
val path = agentSessionPath + "/" + agent.host
// EPHEMERAL means the data will get deleted after session is lost
Try(zk.create().withMode(CreateMode.EPHEMERAL).forPath(path))
}
/** Registers agent if it has not has already
*
* @param agent Agent case class
* @param zk ZooKeeper client
*/
def registerAgent(agent: Agent)(implicit zk: ZooKeeperClient): Unit = {
val path = agentPersistedPath + "/" + agent.host
if (!nodeExists(path)) {
val data = agent.asJson
.noSpaces
createNode(path, Some(data))
}
}
/** Fetch persisted agents, including full information
*
* @return Future containing List of Agent case classes
*/
def persistedAgentsFull()(implicit zk: ZooKeeperClient, ec: ExecutionContext): Future[List[Agent]] = {
persistedAgents().flatMap { names =>
Future.sequence(names.map(n => getAgent(n).map(_.right.get)))
}
}
/** Fetch active agents
*
* @param zk ZooKeeper client
* @return Future containing list of agent names
*/
def activeAgents()(implicit zk: ZooKeeperClient, ec: ExecutionContext): Future[List[AgentAlias]] =
Future(fetchNodes(agentSessionPath))
/** Fetch persisted agents
*
* @param zk ZooKeeper client
* @return Future containing list of persisted agent names
*/
def persistedAgents()(implicit zk: ZooKeeperClient, ec: ExecutionContext): Future[List[AgentAlias]] =
Future(fetchNodes(agentPersistedPath))
/** Fetch znodes under certain path
*
* @param path znode path /SpaceTurtle/..
* @param zk ZooKeeper client
* @return List of found znodes
*/
private def fetchNodes(path: String)(implicit zk: ZooKeeperClient): List[String] = {
// Ensure we are getting latest commits
zk.sync().forPath(path)
zk.getChildren
.forPath(path)
.asScala
.toList
}
/** Fetch information for specified agent
*
* @param znode target agent
* @param zk ZooKeeper client
* @param ec ExecutionContext for Future
* @return Future with Agent case class
*/
def getAgent(znode: String)(implicit zk: ZooKeeperClient, ec: ExecutionContext): AgentResult = Future {
val byteData= zk.getData().forPath(agentPersistedPath + "/" + znode)
val zkData = new String(byteData)
val agent: Either[Error, Agent] = decode[Agent](zkData)
agent match {
case Left(err) => Left(err.toString)
case Right(agent) => Right(agent)
}
}
}
| Max-Meldrum/SpaceTurtle | zookeeper/src/main/scala/ZkClient.scala | Scala | apache-2.0 | 6,493 |
package com.github.tminglei.slickpg
package utils
import slick.ast.FieldSymbol
import scala.reflect.ClassTag
import java.sql.{PreparedStatement, ResultSet}
import slick.jdbc.{JdbcTypesComponent, PostgresProfile}
trait PgCommonJdbcTypes extends JdbcTypesComponent { driver: PostgresProfile =>
class GenericJdbcType[T](val sqlTypeName: String,
fnFromString: (String => T),
fnToString: (T => String) = ((r: T) => r.toString),
val sqlType: Int = java.sql.Types.OTHER,
override val hasLiteralForm: Boolean = false)(
implicit override val classTag: ClassTag[T]) extends DriverJdbcType[T] {
override def sqlTypeName(sym: Option[FieldSymbol]): String = sqlTypeName
override def getValue(r: ResultSet, idx: Int): T = {
val value = r.getString(idx)
if (r.wasNull) null.asInstanceOf[T] else fnFromString(value)
}
override def setValue(v: T, p: PreparedStatement, idx: Int): Unit = p.setObject(idx, toStr(v), java.sql.Types.OTHER)
override def updateValue(v: T, r: ResultSet, idx: Int): Unit = r.updateObject(idx, toStr(v), java.sql.Types.OTHER)
override def valueToSQLLiteral(v: T) = if(v == null) "NULL" else s"'${fnToString(v)}'"
///
private def toStr(v: T) = if(v == null) null else fnToString(v)
}
}
| TimothyKlim/slick-pg | core/src/main/scala/com/github/tminglei/slickpg/utils/PgCommonJdbcTypes.scala | Scala | bsd-2-clause | 1,376 |
package fpinscala.testing
import fpinscala.laziness.Stream
import fpinscala.parallelism.Par.Par
import fpinscala.state._
import fpinscala.testing.Prop._
/*
The library developed in this chapter goes through several iterations. This file is just the
shell, which you can fill in and modify while working through the chapter.
*/
trait Prop0 {
self =>
def check: Boolean
def &&(p: Prop0): Prop0 = self && p
}
trait Prop1 {
self =>
def check: Either[(FailedCase, SuccessCount), SuccessCount]
def &&(p: Prop1): Prop1 =
new Prop1 {
override def check = self.check match {
case Right(_) => p.check
case left@Left(e) => left
}
}
}
case class Prop(run: (TestCases, RNG) => Result) {
def &&(p: Prop) = Prop {
(n, rng) =>
run(n, rng) match {
case Passed | Proved => p.run(n, rng)
case x => x
}
}
def ||(p: Prop) = Prop {
(n,rng) => run(n,rng) match {
// In case of failure, run the other Proved
case Falsified(msg, _) => p.tag(msg).run(n,rng)
case x => x
}
}
def tag(msg: String) = Prop { (tc, rng) =>
run(tc, rng) match {
case Falsified(e, c) => Falsified(msg + "\n" + e, c)
case x => x
}
}
}
object Prop {
type FailedCase = String
type SuccessCount = Int
type TestCases = Int
sealed trait Result {
def isFalsified: Boolean
}
case object Passed extends Result {
def isFalsified = false
}
case object Proved extends Result {
def isFalsified = false
}
case class Falsified(failure: FailedCase,
successes: SuccessCount) extends Result {
def isFalsified = true
}
/* Produce an infinite random stream from a `Gen` and a starting `RNG`. */
def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] =
Stream.unfold(rng)(rng => Some(g.sample.run(rng)))
def forAll[A](as: Gen[A])(f: A => Boolean): Prop = Prop {
(n, rng) =>
randomStream(as)(rng).zip(Stream.from(0)).take(n).map {
case (a, i) => try {
if (f(a)) Passed else Falsified(a.toString, i)
} catch {
case e: Exception => Falsified(buildMsg(a, e), i)
}
}.find(_.isFalsified).getOrElse(Passed)
}
def buildMsg[A](s: A, e: Exception): String =
s"test case: $s\n" +
s"generated an exception: ${e.getMessage}\n" +
s"stack trace:\n ${e.getStackTrace.mkString("\n")}"
}
object ListProps {
// Exercise 8.14: Prop for List.sorted
lazy val intListGen: Gen[List[Int]] = ???
lazy val sortedProp: Prop =
Prop.forAll(intListGen) { l: List[Int] =>
???
}
// Exercise 8.14: Prop for List.takeWhile
lazy val takeWhileProp: Prop = {
val f = (_: Int) <= 0
val p1 = Prop.forAll(intListGen) { l: List[Int] =>
l.takeWhile(f).forall(f) == true
}
val p2: Prop = ???
p1 && p2
}
}
object Gen {
def unit[A](a: => A): Gen[A] = Gen(State.unit(a))
def choose(start: Int, stopExclusive: Int): Gen[Int] =
Gen(State(RNG.nonNegativeInt).map(n => start + n % (stopExclusive - start)))
def boolean: Gen[Boolean] =
Gen(State(RNG.boolean))
def double: Gen[Double] =
Gen(State(RNG.double))
// here is an example on how to combine generators in a for-comprehension
def option[A](gen: Gen[A]): Gen[Option[A]] =
for {
b <- Gen.boolean
a <- gen
} yield if (b) Some(a) else None
def listOfN[A](n: Int, g: Gen[A]): Gen[List[A]] =
Gen(State.sequence(List.fill(n)(g.sample)))
def stringN(n: Int): Gen[String] = listOfN(n, choose(0, 127)).map(_.map(_.toChar).mkString)
def union[A](g1: Gen[A], g2: Gen[A]): Gen[A] =
boolean.flatMap(b => if (b) g1 else g2)
def weighted[A](g1: (Gen[A], Double), g2: (Gen[A], Double)): Gen[A] = ???
def listOf[A](g: Gen[A]): SGen[List[A]] = ???
def listOf1[A](g: Gen[A]): SGen[List[A]] = ???
lazy val parInt: Gen[Par[Int]] = ???
}
case class Gen[+A](sample: State[RNG, A]) {
def map[B](f: A => B): Gen[B] =
Gen(sample.map(f))
def flatMap[B](f: A => Gen[B]): Gen[B] =
Gen(sample.flatMap(a => f(a).sample))
def map2[B, C](g: Gen[B])(f: (A, B) => C): Gen[C] =
Gen(sample.map2(g.sample)(f))
def listOfN(size: Int): Gen[List[A]] = Gen.listOfN(size, this)
def listOfN(size: Gen[Int]): Gen[List[A]] =
size flatMap (Gen.listOfN(_, this))
def listOf: SGen[List[A]] = Gen.listOf(this)
def listOf1: SGen[List[A]] = Gen.listOf1(this)
def **[B](g: Gen[B]): Gen[(A, B)] =
(this map2 g) ((_, _))
def unsized: SGen[A] = ???
}
case class SGen[+A](forSize: Int => Gen[A]) {
def apply(n: Int): Gen[A] = ???
def map[B](f: A => B): SGen[B] = ???
def flatMap[B](f: A => SGen[B]): SGen[B] = ???
def **[B](s2: SGen[B]): SGen[(A, B)] = ???
}
| RawToast/fpinscala | exercises/src/main/scala/fpinscala/testing/Gen.scala | Scala | mit | 4,743 |
package ls
abstract class Props(resource: String) {
import scala.util.control.Exception.allCatch
trait Provider {
def get(k: String): String
}
object Env extends Provider {
def get(k: String) = System.getenv(k)
}
abstract class FallbackProvider(val fallback: Provider) extends Provider
case class JProps(resource: String) extends FallbackProvider(Env) {
lazy val props = {
val p = new java.util.Properties()
getClass().getResourceAsStream(resource) match {
case null => println("local resource %s not found. (it's okay fallback on env)" format resource)
case r => p.load(r)
}
p
}
def get(k: String) = props.getProperty(k) match {
case null => fallback.get(k)
case value => value
}
}
protected lazy val underlying = JProps(resource)
def get(name: String) = underlying.get(name) match {
case null => sys.error("undefined property %s" format name)
case value => value
}
def getInt(name: String) = allCatch.opt { get(name) toInt } match {
case None => sys.error("undefined int property %s" format name)
case Some(n) => n
}
def apply(name: String) = underlying.get(name) match {
case null => None
case value => Some(value)
}
def int(name: String) = apply(name).map(v => v.toInt)
}
object Props extends Props("/ls.properties")
| Rhinofly/ls | library/src/main/scala/props.scala | Scala | mit | 1,369 |
package se.lu.nateko.cp.meta.services.labeling
import org.eclipse.rdf4j.model.IRI
import org.eclipse.rdf4j.model.ValueFactory
import se.lu.nateko.cp.meta.api.CustomVocab
import se.lu.nateko.cp.meta.core.crypto.Sha256Sum
class StationsVocab(val factory: ValueFactory) extends CustomVocab{
implicit val bup = makeUriProvider("http://meta.icos-cp.eu/ontologies/stationentry/")
val station = getRelativeRaw("Station")
val atmoStationClass = getRelativeRaw("AS")
val ecoStationClass = getRelativeRaw("ES")
val oceStationClass = getRelativeRaw("OS")
val hasShortName = getRelativeRaw("hasShortName")
val hasLongName = getRelativeRaw("hasLongName")
val hasPi = getRelativeRaw("hasPi")
val hasDeputyPi = getRelativeRaw("hasDeputyPi")
val hasFirstName = getRelativeRaw("hasFirstName")
val hasLastName = getRelativeRaw("hasLastName")
val hasEmail = getRelativeRaw("hasEmail")
val hasAffiliation = getRelativeRaw("hasAffiliation")
val hasPhone = getRelativeRaw("hasPhone")
val hasAssociatedFile = getRelativeRaw("hasAssociatedFile")
val hasApplicationStatus = getRelativeRaw("hasApplicationStatus")
val hasAppStatusComment = getRelativeRaw("hasAppStatusComment")
val hasAppStatusDate = getRelativeRaw("hasAppStatusDate")
val hasProductionCounterpart = getRelativeRaw("hasProductionCounterpart")
val hasStationClass = getRelativeRaw("hasStationClass")
def getProperty(fieldName: String) = getRelativeRaw(fieldName)
val files = new FilesVocab(factory)
}
class FilesVocab(val factory: ValueFactory) extends CustomVocab{
implicit val bup = makeUriProvider("http://meta.icos-cp.eu/files/")
val hasType = getRelativeRaw("hasType")
val hasName = getRelativeRaw("hasName")
def getUri(hashsum: Sha256Sum) = getRelativeRaw(hashsum.id)
def getFileHash(fileUri: IRI): Sha256Sum = Sha256Sum.fromString(fileUri.getLocalName).get
}
| ICOS-Carbon-Portal/meta | src/main/scala/se/lu/nateko/cp/meta/services/labeling/Vocabs.scala | Scala | gpl-3.0 | 1,845 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.react
import scala.scalajs.js
import scala.scalajs.js._
import org.scalajs.dom.html
import js.{ UndefOr, Any, Function => JFn }
import js.annotation.{ JSBracketAccess, JSName }
import js.{ Any => jAny }
import org.scalajs.dom._
import org.scalajs.dom.raw._
// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/react/react.d.ts
@js.native
abstract class Touch extends js.Any {
var identifier: Double =js.native
var target: EventTarget = js.native
var screenX: Double =js.native
var screenY: Double = js.native
var clientX: Double =js.native
var clientY: Double =js.native
var pageX: Double =js.native
var pageY: Double = js.native
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/react/Touch.scala | Scala | apache-2.0 | 1,283 |
/*
* Copyright (c) 2013 original authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eigengo.monitor.agent.akka
import org.specs2.mutable.Specification
class ActorTypeFilterSpec extends Specification {
import akka.actor.ActorPath
import org.eigengo.monitor.agent.akka.ActorFilter._
"Type filter" should {
val actorSystemName = "default"
val singlePath = ActorPath.fromString(s"akka://$actorSystemName/foo/bar/baz")
val singlePathFoo = PathAndClass(singlePath, Some("com.foo.BarActor"))
val singlePathFaa = PathAndClass(singlePath, Some("com.faa.BarActor"))
"Match concrete path" in {
ActorTypeFilter(AnyActorSystem, SameType("com.foo.BarActor")).accept(singlePathFoo) mustEqual true
ActorTypeFilter(NamedActorSystem(actorSystemName), SameType("com.foo.BarActor")).accept(singlePathFoo) mustEqual true
ActorTypeFilter(NamedActorSystem("asdadasdasdas"), SameType("com.foo.BarActor")).accept(singlePathFoo) mustEqual false
ActorTypeFilter(AnyActorSystem, SameType("com.foo.BarActor")).accept(singlePathFaa) mustEqual false
}
}
}
| eigengo/monitor | agent-akka/src/test/scala/org/eigengo/monitor/agent/akka/ActorTypeFilterSpec.scala | Scala | apache-2.0 | 1,620 |
import sbt._
import net.usersource.jettyembed.JettyEmbedWebProject
class vBaDProject(info: ProjectInfo) extends JettyEmbedWebProject(info) with IdeaProject {
val liftVersion = "2.2"
val commonsHttpVersion = "3.1"
val junitVersion = "4.5"
val specsVersion = "1.6.5"
val h2databaseVersion = "1.2.138"
override def libraryDependencies = Set(
"net.liftweb" %% "lift-webkit" % liftVersion % "compile->default",
"net.liftweb" %% "lift-mapper" % liftVersion % "compile->default",
"commons-httpclient" % "commons-httpclient" % commonsHttpVersion % "compile->default",
"junit" % "junit" % junitVersion % "test->default",
"org.scalatest" % "scalatest" % "1.2" % "test->default",
"com.h2database" % "h2" % h2databaseVersion
) ++ super.libraryDependencies
}
| glenford/vBaD | project/build/vBaD.scala | Scala | apache-2.0 | 788 |
/*
* Even Fibonacci numbers
* Problem 2
* Each new term in the Fibonacci sequence is generated by adding the previous two terms. By starting with 1 and 2, the first 10 terms will be:
* 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
* By considering the terms in the Fibonacci sequence whose values do not exceed four million, find the sum of the even-valued terms.
*/
object Euler2 {
val fibo: Stream[Int] = 1 #:: fibo.scanLeft(2)(_ + _)
def main(args: Array[String]) {
println(fibo.filter(_ % 2 == 0).takeWhile(_ < 4000000).sum)
}
}
| NotBobTheBuilder/ProjectEuler | scala/p2.scala | Scala | mit | 555 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.lambda
import java.io.Closeable
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.zk.EmbeddedZookeeper
import org.locationtech.geomesa.utils.io.PathUtils
class EmbeddedKafka extends Closeable {
private val zookeeper = new EmbeddedZookeeper()
val zookeepers = s"127.0.0.1:${zookeeper.port}"
private val logs = TestUtils.tempDir()
private val server = {
val config = TestUtils.createBrokerConfig(1, zookeepers)
config.setProperty("offsets.topic.num.partitions", "1")
config.setProperty("listeners", s"PLAINTEXT://127.0.0.1:${TestUtils.RandomPort}")
config.setProperty("log.dirs", logs.getAbsolutePath)
TestUtils.createServer(new KafkaConfig(config))
}
val brokers = s"127.0.0.1:${server.socketServer.boundPort()}"
override def close(): Unit = {
try { server.shutdown() } catch { case _: Throwable => }
try { zookeeper.shutdown() } catch { case _: Throwable => }
PathUtils.deleteRecursively(logs.toPath)
}
} | ronq/geomesa | geomesa-lambda/geomesa-lambda-datastore/src/test/scala/org/locationtech/geomesa/lambda/EmbeddedKafka.scala | Scala | apache-2.0 | 1,482 |
package org.cloudio.morpheus.tutor.chat.frag.step5
import java.util.Locale
import org.morpheus._
import Morpheus._
import org.cloudio.morpheus.tutor.chat.frag.step1.Contact
import org.cloudio.morpheus.tutor.chat.frag.step4.{MemoryOutputChannel, StandardOutputChannel, ContactPrettyPrinter, ContactRawPrinter}
/**
* Introducing morphing strategies and immutable morphs.
*
* Created by zslajchrt on 04/05/15.
*/
object Session {
def main(args: Array[String]) {
val contactKernel = singleton[Contact
with (ContactRawPrinter or ContactPrettyPrinter)
with (StandardOutputChannel or MemoryOutputChannel)]
var contact = contactKernel.!
contact.firstName = "Pepa"
contact.lastName = "Novák"
contact.male = true
contact.nationality = Locale.CANADA
var altNum: Int = 0
val morphStrategy = promote[contactKernel.Model](altNum)
println(s"There is ${morphStrategy.altsCount} alternatives")
contact = contact.remorph(morphStrategy)
println(contact.myAlternative)
//contact.printContact()
altNum = 1
contact = contact.remorph
println(contact.myAlternative)
//contact.printContact()
altNum = 2
contact = contact.remorph
println(contact.myAlternative)
//contact.printContact()
altNum = 3
contact = contact.remorph
println(contact.myAlternative)
//contact.printContact()
}
/**
* The switch strategy using a submodel of the morph model.
*/
def main2(args: Array[String]) {
val contactKernel = singleton[Contact with (ContactRawPrinter or ContactPrettyPrinter) with (StandardOutputChannel or MemoryOutputChannel)]
val contact = contactKernel.!
contact.firstName = "Pepa"
contact.lastName = "Novák"
contact.male = true
contact.nationality = Locale.CANADA
var printerCoord: Int = 0
var channelCoord: Int = 0
val morphStrategy1 = promote[ContactRawPrinter or ContactPrettyPrinter](RootStrategy[contactKernel.Model](), printerCoord)
val morphStrategy2 = promote[StandardOutputChannel or MemoryOutputChannel](morphStrategy1, channelCoord)
def morphContact(): Unit = {
val morph = contactKernel.morph(morphStrategy2)
println(morph.myAlternative)
morph.printContact()
}
for (i <- 0 to 1; j <- 0 to 1) {
printerCoord = i
channelCoord = j
morphContact()
}
// The strategy calculates the the alt index as the modulo of i and altsCount
channelCoord = 1
for (i <- 0 to 100) {
printerCoord = i / 2
channelCoord = i % 2
morphContact()
}
}
}
| zslajchrt/morpheus-tutor | src/main/scala/org/cloudio/morpheus/tutor/chat/frag/step5/Session.scala | Scala | apache-2.0 | 2,584 |
package com.sksamuel.elastic4s.samples
import com.sksamuel.elastic4s.RefreshPolicy
import com.sksamuel.elastic4s.http.{ElasticClient, ElasticProperties}
import com.sksamuel.elastic4s.http.Response
import com.sksamuel.elastic4s.http.search.SearchResponse
object HttpClientExampleApp extends App {
// you must import the DSL to use the syntax helpers
import com.sksamuel.elastic4s.http.ElasticDsl._
val client = ElasticClient(ElasticProperties("http://localhost:9200"))
client.execute {
bulk(
indexInto("myindex" / "mytype").fields("country" -> "Mongolia", "capital" -> "Ulaanbaatar"),
indexInto("myindex" / "mytype").fields("country" -> "Namibia", "capital" -> "Windhoek")
).refresh(RefreshPolicy.WaitFor)
}.await
val response: Response[SearchResponse] = client.execute {
search("myindex").matchQuery("capital", "ulaanbaatar")
}.await
// prints out the original json
println(response.result.hits.hits.head.sourceAsString)
client.close()
}
| stringbean/elastic4s | samples/elastic4s-http-client-sbt/src/main/scala/com/sksamuel/elastic4s/samples/HttpClientExampleApp.scala | Scala | apache-2.0 | 991 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Cayde Dixon
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package net.cazzar.mods.jukeboxreloaded.blocks
import com.google.common.base.Predicate
import net.cazzar.mods.jukeboxreloaded.JukeboxReloaded
import net.cazzar.mods.jukeboxreloaded.Util._
import net.cazzar.mods.jukeboxreloaded.blocks.tileentity.TileJukebox
import net.cazzar.mods.jukeboxreloaded.network.gui.GuiHandler
import net.minecraft.block.material.Material
import net.minecraft.block.properties.PropertyDirection
import net.minecraft.block.state.{BlockState, IBlockState}
import net.minecraft.block.{Block, ITileEntityProvider}
import net.minecraft.entity.EntityLivingBase
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.item.ItemStack
import net.minecraft.tileentity.TileEntity
import net.minecraft.util.{BlockPos, EnumFacing}
import net.minecraft.world.World
object BlockJukebox extends {
val FACING = PropertyDirection.create("facing", EnumFacing.Plane.HORIZONTAL.asInstanceOf[Predicate[EnumFacing]])
} with Block(Material.wood) with ITileEntityProvider {
setDefaultState(blockState.getBaseState.withProperty(FACING, EnumFacing.NORTH))
setUnlocalizedName("Jukebox")
override def createNewTileEntity(worldIn: World, meta: Int): TileEntity = new TileJukebox
override def onBlockDestroyedByPlayer(worldIn: World, pos: BlockPos, state: IBlockState): Unit = {
val tile = worldIn.getTile[TileJukebox](pos)
//TODO: Drop items
}
override def onBlockActivated(worldIn: World, pos: BlockPos, state: IBlockState, playerIn: EntityPlayer, side: EnumFacing, hitX: Float, hitY: Float, hitZ: Float): Boolean = {
if (!worldIn.isRemote) {
val tile = pos.getTileEntityChecked[TileJukebox](worldIn)
if (!tile.isDefined) return false
playerIn.openGui(JukeboxReloaded, GuiHandler.JUKEBOX, worldIn, pos.x, pos.y, pos.z)
}
true
}
override def getStateFromMeta(meta: Int): IBlockState = {
var enumfacing = EnumFacing.getFront(meta)
if (enumfacing.getAxis eq EnumFacing.Axis.Y) {
enumfacing = EnumFacing.NORTH
}
this.getDefaultState.withProperty(FACING, enumfacing)
}
override def getMetaFromState(state: IBlockState): Int = {
state.getValue(FACING).asInstanceOf[EnumFacing].getIndex
}
override def onBlockPlacedBy(worldIn: World, pos: BlockPos, state: IBlockState, placer: EntityLivingBase, stack: ItemStack) {
worldIn.setBlockState(pos, state.withProperty(FACING, placer.getHorizontalFacing.getOpposite), 2)
}
protected override def createBlockState: BlockState = new BlockState(this, FACING)
}
| cazzar/JukeboxReloaded | src/main/scala/net/cazzar/mods/jukeboxreloaded/blocks/BlockJukebox.scala | Scala | mit | 3,655 |
package org.atnos.eff.addon.monix
import cats._
import cats.implicits._
import monix.eval._
import monix.cats._
import monix.execution._
import org.atnos.eff._
import org.atnos.eff.syntax.all._
import scala.concurrent.duration.FiniteDuration
import scala.util._
trait TaskTypes {
type _task[R] = |=[Task, R]
type _Task[R] = <=[Task, R]
}
trait TaskCreation extends TaskTypes {
final def fromTask[R :_task, A](task: Task[A], timeout: Option[FiniteDuration] = None): Eff[R, A] =
timeout.fold(task)(t => task.timeout(t)).send[R]
final def taskFailed[R :_task, A](t: Throwable): Eff[R, A] =
fromTask(Task.fromTry[A](Failure(t)))
final def taskSuspend[R :_task, A](task: =>Task[Eff[R, A]], timeout: Option[FiniteDuration] = None): Eff[R, A] =
fromTask(Task.suspend(task), timeout).flatten
final def taskDelay[R :_task, A](call: => A, timeout: Option[FiniteDuration] = None): Eff[R, A] =
fromTask(Task.delay(call), timeout)
final def taskForkScheduler[R :_task, A](call: Task[A], scheduler: Scheduler, timeout: Option[FiniteDuration] = None): Eff[R, A] =
fromTask(Task.fork(call, scheduler), timeout)
final def taskFork[R :_task, A](call: Task[A], timeout: Option[FiniteDuration] = None): Eff[R, A] =
fromTask(Task.fork(call), timeout)
final def asyncBoundary[R :_task]: Eff[R, Unit] =
fromTask(forkedUnit)
final def asyncBoundary[R :_task](s: Scheduler): Eff[R, Unit] =
fromTask(forkedUnit.executeOn(s))
private val forkedUnit: Task[Unit] =
Task.fork(Task.unit)
final def taskAsync[R :_task, A](callbackConsumer: ((Throwable Either A) => Unit) => Unit,
timeout: Option[FiniteDuration] = None): Eff[R, A] = {
val async = Task.async[A] { (_, cb) =>
callbackConsumer(tea => cb(tea.fold(Failure(_), Success(_))))
Cancelable.empty
}
fromTask(async, timeout)
}
}
object TaskCreation extends TaskCreation
trait TaskInterpretation extends TaskTypes {
private val monixTaskMonad: MonadError[Task, Throwable] =
monix.cats.monixToCatsMonadError(Task.typeClassInstances.monadError)
private val monixTaskApplicative : Applicative[Task] =
monixToCatsApplicative(Task.nondeterminism.applicative)
def runAsync[R, A](e: Eff[R, A])(implicit m: Member.Aux[Task, R, NoFx]): Task[A] =
Eff.detachA(e)(monixTaskMonad, monixTaskApplicative, m)
def runSequential[R, A](e: Eff[R, A])(implicit m: Member.Aux[Task, R, NoFx]): Task[A] =
Eff.detach(e)(monixTaskMonad, m)
import interpret.of
def taskAttempt[R, A](e: Eff[R, A])(implicit task: Task /= R): Eff[R, Throwable Either A] =
interpret.interceptNatM[R, Task, Throwable Either ?, A](e,
new (Task ~> (Task of (Throwable Either ?))#l) {
def apply[X](fa: Task[X]): Task[Throwable Either X] =
fa.attempt
})
def forkTasks[R, A](e: Eff[R, A])(implicit task: Task /= R): Eff[R, A] =
interpret.interceptNat[R, Task, A](e)(
new (Task ~> Task) {
def apply[X](fa: Task[X]): Task[X] =
Task.fork(fa)
})
/** memoize the task result using a cache */
def memoize[A](key: AnyRef, cache: Cache, task: Task[A]): Task[A] =
Task.suspend {
cache.get[A](key).fold(task.map { r => cache.put(key, r); r })(Task.now)
}
/**
* Memoize task effects using a cache
*
* if this method is called with the same key the previous value will be returned
*/
def taskMemo[R, A](key: AnyRef, cache: Cache, e: Eff[R, A])(implicit task: Task /= R): Eff[R, A] =
taskAttempt(Eff.memoizeEffect(e, cache, key)).flatMap {
case Left(t) => Eff.send(taskSequenceCached.reset(cache, key)) >> TaskEffect.taskFailed(t)
case Right(a) => Eff.pure(a)
}
/**
* Memoize task values using a memoization effect
*
* if this method is called with the same key the previous value will be returned
*/
def taskMemoized[R, A](key: AnyRef, e: Eff[R, A])(implicit task: Task /= R, m: Memoized |= R): Eff[R, A] =
MemoEffect.getCache[R].flatMap(cache => taskMemo(key, cache, e))
def runTaskMemo[R, U, A](cache: Cache)(effect: Eff[R, A])(implicit m: Member.Aux[Memoized, R, U], task: Task |= U): Eff[U, A] = {
interpret.translate(effect)(new Translate[Memoized, U] {
def apply[X](mx: Memoized[X]): Eff[U, X] =
mx match {
case Store(key, value) => TaskCreation.taskDelay(cache.memo(key, value()))
case GetCache() => TaskCreation.taskDelay(cache)
}
})
}
implicit val taskSequenceCached: SequenceCached[Task] = new SequenceCached[Task] {
def get[X](cache: Cache, key: AnyRef): Task[Option[X]] =
Task.fork(Task.delay(cache.get(key)))
def apply[X](cache: Cache, key: AnyRef, sequenceKey: Int, tx: =>Task[X]): Task[X] =
cache.memo((key, sequenceKey), tx.memoize)
def reset(cache: Cache, key: AnyRef): Task[Unit] =
Task.delay {
cache.reset(key)
var i = 0
while (cache.get((key, i)).isDefined) {
cache.reset((key, i))
i += 1
}
}
}
}
object TaskInterpretation extends TaskInterpretation
trait TaskEffect extends TaskInterpretation with TaskCreation
object TaskEffect extends TaskEffect
| etorreborre/eff | monix/shared/src/main/scala/org/atnos/eff/addon/monix/TaskEffect.scala | Scala | mit | 5,215 |
// Solution-2.scala
// Solution to Exercise 2 in "For Loops"
import com.atomicscala.AtomicTest._
val r2 = Range(0, 10).inclusive
r2 is (0 to 10)
r2 is (0 until 11)
/* OUTPUT_SHOULD_BE
Range(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
Range(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
*/
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/17_ForLoops/Solution-2.scala | Scala | apache-2.0 | 269 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.httpclient
import java.lang.management.ManagementFactory
import java.util.concurrent.{TimeUnit, TimeoutException}
import javax.management.ObjectName
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpEntity.Chunked
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{Date, Server}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.squbs.resolver.ResolverRegistry
import org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState
import org.squbs.streams.circuitbreaker.{CircuitBreakerOpenException, CircuitBreakerSettings}
import org.squbs.testkit.Timeouts.awaitMax
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{Await, Future, Promise}
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
object ClientFlowCircuitBreakerSpec {
val config = ConfigFactory.parseString(
"""
|squbs.circuit-breaker {
| reset-timeout = 2 minutes
|}
|
|clientWithConfig {
| type = squbs.httpclient
|
| circuit-breaker {
| max-failures = 2
| call-timeout = 10 milliseconds
| reset-timeout = 100 seconds
| }
|}
|
|clientWithConfigWithParam {
| type = squbs.httpclient
|
| circuit-breaker {
| max-failures = 2
| call-timeout = 10 milliseconds
| reset-timeout = 100 seconds
| }
|}
|
|disableCircuitBreaker {
| type = squbs.httpclient
|}
|
|multipleMaterializations {
| type = squbs.httpclient
|
| circuit-breaker {
| reset-timeout = 2 minutes
| }
|}
|
|drain {
| type = squbs.httpclient
|
| akka.http.host-connection-pool.max-connections = 10
| circuit-breaker {
| max-failures = 10000
| call-timeout = 10 milliseconds
| }
|}
|
|do-not-drain {
| type = squbs.httpclient
| akka.http {
| client.idle-timeout = 10 seconds
| host-connection-pool {
| max-connections = 10
| response-entity-subscription-timeout = 1 minute
| }
| }
|}
""".stripMargin)
implicit val system = ActorSystem("ClientFlowCircuitBreakerSpec", config)
implicit val materializer = ActorMaterializer()
val defaultMaxFailures = system.settings.config.getInt("squbs.circuit-breaker.max-failures")
val defaultMaxConnections = system.settings.config.getInt("akka.http.host-connection-pool.max-connections")
val numOfRequests = (defaultMaxFailures + defaultMaxConnections) * 2 // Some random large number
val numOfPassThroughBeforeCircuitBreakerIsOpen = defaultMaxConnections + defaultMaxFailures - 1
val numOfFailFast = numOfRequests - numOfPassThroughBeforeCircuitBreakerIsOpen
ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver") {
(_, _) =>Some(HttpEndpoint(s"http://localhost:$port/"))
}
import akka.http.scaladsl.server.Directives._
import system.dispatcher
implicit val scheduler = system.scheduler
val InternalServerErrorResponse =
HttpResponse(StatusCodes.InternalServerError)
.withHeaders(Server("testServer") :: Date(DateTime(2017, 1, 1)) :: Nil)
val route =
path("internalServerError") {
complete(InternalServerErrorResponse)
} ~
path("delay") {
val promise = Promise[String]()
import scala.concurrent.duration._
val delay = 500.milliseconds
scheduler.scheduleOnce(delay)(promise.success("delayed"))
onComplete(promise.future) {
case _ =>
complete {
HttpResponse(entity =
Chunked(ContentTypes.`text/plain(UTF-8)`,Source.single(ByteString("Response after delay!")))
)
}
}
}
val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0), awaitMax)
val port = serverBinding.localAddress.getPort
}
class ClientFlowCircuitBreakerSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {
import ClientFlowCircuitBreakerSpec._
override def afterAll: Unit = {
serverBinding.unbind() map {_ => system.terminate()}
}
it should "fail fast using default failure decider" in {
val circuitBreakerState =
AtomicCircuitBreakerState("internalServerError", system.settings.config.getConfig("squbs.circuit-breaker"))
val circuitBreakerSettings = CircuitBreakerSettings[HttpRequest, HttpResponse, Int](circuitBreakerState)
val responseSeq =
Source(1 to numOfRequests)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(ClientFlow[Int](s"http://localhost:$port/", circuitBreakerSettings = Some(circuitBreakerSettings)))
.map(_._1) // In case the ordering changes
.runWith(Sink.seq)
val expected =
List.fill(numOfPassThroughBeforeCircuitBreakerIsOpen)(Success(InternalServerErrorResponse)) ++
List.fill(numOfFailFast)(Failure(CircuitBreakerOpenException()))
responseSeq map { _ should contain theSameElementsAs expected }
}
it should "disable circuit breaker" in {
val clientFlow = ClientFlow[Int]("disableCircuitBreaker")
val responseSeq =
Source(1 to numOfRequests)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(clientFlow)
.map(_._1)
.runWith(Sink.seq)
val expected = List.fill(numOfRequests)(Success(InternalServerErrorResponse))
responseSeq map { _ should contain theSameElementsAs expected}
}
it should "fallback" in {
val circuitBreakerSettings =
CircuitBreakerSettings[HttpRequest, HttpResponse, Int](
AtomicCircuitBreakerState("fallbackClient", ConfigFactory.empty))
.withFallback((_: HttpRequest) => Try(HttpResponse(entity = "Fallback Response")))
val clientFlow = ClientFlow[Int]("fallbackClient", circuitBreakerSettings = Some(circuitBreakerSettings))
val responseSeq =
Source(1 to numOfRequests)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(clientFlow)
.map(_._1) // In case the ordering changes
.runWith(Sink.seq)
val expected =
List.fill(numOfPassThroughBeforeCircuitBreakerIsOpen)(Success(InternalServerErrorResponse)) ++
List.fill(numOfFailFast)(Success(HttpResponse(entity = "Fallback Response")))
responseSeq map { _ should contain theSameElementsAs expected }
}
it should "use the provided failure decider" in {
val circuitBreakerSettings =
CircuitBreakerSettings[HttpRequest, HttpResponse, Int](
AtomicCircuitBreakerState("customFailureDeciderClient", ConfigFactory.empty))
.withFailureDecider((_: Try[HttpResponse]) => false)
val clientFlow = ClientFlow[Int]("customFailureDeciderClient", circuitBreakerSettings = Some(circuitBreakerSettings))
val responseSeq =
Source(1 to numOfRequests)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(clientFlow)
.map(_._1)
.runWith(Sink.seq)
// Circuit Breaker should never be OPEN because we do not consider anything as failure
val expected = List.fill(numOfRequests)(Success(InternalServerErrorResponse))
responseSeq map { _ should contain theSameElementsAs expected }
}
it should "share the circuit breaker state across materializations" in {
val graph =
Source(1 to numOfRequests / 2)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(ClientFlow[Int]("multipleMaterializations"))
.map(_._1) // In case the ordering changes
.toMat(Sink.seq)(Keep.right)
val responseSeqFuture1 = graph.run()
val responseSeqFuture2 = graph.run()
val combinedResponses =
for {
responseSeq1 <- responseSeqFuture1
responseSeq2 <- responseSeqFuture2
} yield responseSeq1 ++ responseSeq2
combinedResponses map { responses =>
// Because default max-open-requests = 32 is so requests will wait in the queue of connection pool
// If max-open-requests were equal to max-connections, we would not multiply by 2.
val maxNumOfPassThroughBeforeCircuitBreakerIsOpen = 2 * defaultMaxConnections + defaultMaxFailures - 1
val actualNumPassThrough = responses.filter(_ == Success(InternalServerErrorResponse)).size
val actualNumFailFast = numOfRequests - actualNumPassThrough
actualNumPassThrough should be >= numOfPassThroughBeforeCircuitBreakerIsOpen
actualNumPassThrough should be <= maxNumOfPassThroughBeforeCircuitBreakerIsOpen
actualNumFailFast should be >= numOfRequests - maxNumOfPassThroughBeforeCircuitBreakerIsOpen
actualNumFailFast should be <= numOfRequests - numOfPassThroughBeforeCircuitBreakerIsOpen
}
}
it should "share the circuit breaker state across multiple flows" in {
val circuitBreakerSettings =
CircuitBreakerSettings[HttpRequest, HttpResponse, Int](
AtomicCircuitBreakerState("multipleFlows", ConfigFactory.empty))
val responseSeqFuture1 =
Source(1 to numOfRequests / 2)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(ClientFlow[Int]("multipleFlows", circuitBreakerSettings = Some(circuitBreakerSettings)))
.map(_._1) // In case the ordering changes
.runWith(Sink.seq)
val responseSeqFuture2 =
Source(1 to numOfRequests / 2)
.map(HttpRequest(uri = "/internalServerError") -> _)
.via(ClientFlow[Int]("multipleFlows", circuitBreakerSettings = Some(circuitBreakerSettings)))
.map(_._1) // In case the ordering changes
.runWith(Sink.seq)
val combinedResponses =
for {
responseSeq1 <- responseSeqFuture1
responseSeq2 <- responseSeqFuture2
} yield responseSeq1 ++ responseSeq2
combinedResponses map { responses =>
// Because default max-open-requests = 32 is so requests will wait in the queue of connection pool
// If max-open-requests were equal to max-connections, we would not multiply by 2.
val maxNumOfPassThroughBeforeCircuitBreakerIsOpen = 2 * defaultMaxConnections + defaultMaxFailures - 1
val actualNumPassThrough = responses.filter(_ == Success(InternalServerErrorResponse)).size
val actualNumFailFast = numOfRequests - actualNumPassThrough
actualNumPassThrough should be >= numOfPassThroughBeforeCircuitBreakerIsOpen
actualNumPassThrough should be <= maxNumOfPassThroughBeforeCircuitBreakerIsOpen
actualNumFailFast should be >= numOfRequests - maxNumOfPassThroughBeforeCircuitBreakerIsOpen
actualNumFailFast should be <= numOfRequests - numOfPassThroughBeforeCircuitBreakerIsOpen
}
}
it should "show circuit breaker configuration on JMX" in {
ClientFlow("clientWithConfig")
assertJmxValue("clientWithConfig-httpclient", "Name", "clientWithConfig-httpclient")
assertJmxValue(
"clientWithConfig-httpclient",
"ImplementationClass",
"org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState")
assertJmxValue("clientWithConfig-httpclient", "MaxFailures", 2)
assertJmxValue("clientWithConfig-httpclient", "CallTimeout", "10 milliseconds")
assertJmxValue("clientWithConfig-httpclient", "ResetTimeout", "100 seconds")
assertJmxValue("clientWithConfig-httpclient", "MaxResetTimeout", "36500 days")
assertJmxValue("clientWithConfig-httpclient", "ExponentialBackoffFactor", 1.0)
}
it should "give priority to passed in parameter" in {
import scala.concurrent.duration._
val circuitBreakerState =
AtomicCircuitBreakerState(
"clientWithConfigWithParam-httpclient",
11,
12 seconds,
13 minutes,
14 days,
16.0)
val cbs = CircuitBreakerSettings[HttpRequest, HttpResponse, Int](circuitBreakerState)
ClientFlow("clientWithConfigWithParam", circuitBreakerSettings = Some(cbs))
assertJmxValue("clientWithConfigWithParam-httpclient", "Name", "clientWithConfigWithParam-httpclient")
assertJmxValue(
"clientWithConfigWithParam-httpclient",
"ImplementationClass",
"org.squbs.streams.circuitbreaker.impl.AtomicCircuitBreakerState")
assertJmxValue("clientWithConfigWithParam-httpclient", "MaxFailures", 11)
assertJmxValue("clientWithConfigWithParam-httpclient", "CallTimeout", "12 seconds")
assertJmxValue("clientWithConfigWithParam-httpclient", "ResetTimeout", "13 minutes")
assertJmxValue("clientWithConfigWithParam-httpclient", "MaxResetTimeout", "14 days")
assertJmxValue("clientWithConfigWithParam-httpclient", "ExponentialBackoffFactor", 16.0)
}
it should "drain the http responses that arrive after the timeout" in {
val start = System.nanoTime()
val responseSeqFuture =
Source(1 to 100)
.map(HttpRequest(uri = "/delay") -> _)
.via(ClientFlow[Int]("drain"))
.map(_._1)
.runWith(Sink.seq)
val idleTimeoutConfig = system.settings.config.getString("do-not-drain.akka.http.client.idle-timeout")
val idleTimeout = Duration(idleTimeoutConfig).asInstanceOf[FiniteDuration]
val promise = Promise[Seq[Try[HttpResponse]]]()
import system.dispatcher
system.scheduler.scheduleOnce(idleTimeout) {
// Adding a timeout to make it easier to troubleshoot if draining functionality is somehow broken. Without this
// promise failure, the test case would just hang here when the connection pool is starved. Failing the
// test after a timeout and providing a helpful message should make it easier to debug the problem if that
// ever happens.
promise.failure(
new TimeoutException("Test case timed out! This happens when late arrived http responses are not drained!"))
}
Future.firstCompletedOf(promise.future :: responseSeqFuture :: Nil) map { seq =>
val elapsedTime = FiniteDuration(System.nanoTime - start, TimeUnit.NANOSECONDS)
val idleTimeout = Duration(system.settings.config.getString("akka.http.client.idle-timeout"))
// With a connection pool of size 10, 100 requests each taking 500 ms should be done in about 5+ seconds
// If draining was not happening, it would keep each connection busy till idle-timeout.
elapsedTime should be < idleTimeout
seq.size shouldBe 100
seq.collect {
case Failure(ex) if ex.isInstanceOf[TimeoutException] => ex
}.size shouldBe 100
}
}
it should "saturate the connection pool when no drainer is specified" in {
import scala.concurrent.duration._
val circuitBreakerSettingsa =
CircuitBreakerSettings[HttpRequest, HttpResponse, Int](
AtomicCircuitBreakerState(
"do-not-drain",
maxFailures = 1000,
callTimeout = 10 milliseconds,
resetTimeout = 100 seconds))
val responseSeqFuture =
Source(1 to 20)
.map(HttpRequest(uri = "/delay") -> _)
.via(ClientFlow[Int]("do-not-drain", circuitBreakerSettings = Some(circuitBreakerSettingsa)))
.map(_._1)
.runWith(Sink.seq)
val idleTimeoutConfig = system.settings.config.getString("do-not-drain.akka.http.client.idle-timeout")
val idleTimeout = Duration(idleTimeoutConfig).asInstanceOf[FiniteDuration]
val promise = Promise[String]()
import system.dispatcher
system.scheduler.scheduleOnce(idleTimeout)(promise.success("idle-timeout reached!"))
promise.future map { _ =>
// With a connection pool of size 10, 20 requests each taking 500 ms should be done in about 1+ seconds, if late
// arriving responses are drained (as each request times out in 100 ms). If draining is not happening, it would
// keep each connection busy till idle-timeout. So, the stream would take at least 2 x idle-timeout to finish.
responseSeqFuture.isCompleted shouldBe false
}
}
def assertJmxValue(name: String, key: String, expectedValue: Any) = {
val oName = ObjectName.getInstance(
s"org.squbs.configuration:type=squbs.circuitbreaker,name=${ObjectName.quote(name)}")
val actualValue = ManagementFactory.getPlatformMBeanServer.getAttribute(oName, key)
actualValue shouldEqual expectedValue
}
}
| az-qbradley/squbs | squbs-httpclient/src/test/scala/org/squbs/httpclient/ClientFlowCircuitBreakerSpec.scala | Scala | apache-2.0 | 16,976 |
package org.jetbrains.plugins.scala.lang.transformation
package functions
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.{&&, ReferenceTarget}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaCode._
import org.jetbrains.plugins.scala.lang.psi.types.ScParameterizedType
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.ScMethodType
import org.jetbrains.plugins.scala.project.ProjectContext
/**
* @author Pavel Fatin
*/
class MakeEtaExpansionExplicit extends AbstractTransformer {
def transformation(implicit project: ProjectContext): PartialFunction[PsiElement, Unit] = {
case (e: ScReferenceExpression) && ReferenceTarget(_: ScFunction) &&
NonValueType(_: ScMethodType) && ExpectedType(_: ScParameterizedType)
if !e.getParent.isInstanceOf[ScUnderscoreSection] =>
e.replace(code"$e _")
case (e @ ScMethodCall(ReferenceTarget(_: ScFunction), _)) &&
NonValueType(_: ScMethodType) && ExpectedType(_: ScParameterizedType)
if !e.getParent.isInstanceOf[ScUnderscoreSection] =>
e.replace(code"$e _")
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/transformation/functions/MakeEtaExpansionExplicit.scala | Scala | apache-2.0 | 1,227 |
package spatial.codegen.scalagen
import argon.core._
import spatial.aliases._
trait ScalaGenMemories extends ScalaGenBits {
var globalMems: Boolean = false
def emitMem(lhs: Exp[_], x: String) = if (globalMems) emit(s"if ($lhs == null) $x") else emit("val " + x)
def flattenAddress(dims: Seq[Exp[Index]], indices: Seq[Exp[Index]], ofs: Option[Exp[Index]]): String = {
val strides = List.tabulate(dims.length){i => (dims.drop(i+1).map(quote) :+ "1").mkString("*") }
indices.zip(strides).map{case (i,s) => src"$i*$s" }.mkString(" + ") + ofs.map{o => src" + $o"}.getOrElse("")
}
def flattenAddress(dims: Seq[Exp[Index]], indices: Seq[Exp[Index]]): String = {
val strides = List.tabulate(dims.length){i => (dims.drop(i+1).map(quote) :+ "1").mkString("*") }
indices.zip(strides).map{case (i,s) => src"$i*$s"}.mkString(" + ")
}
private def oob(tp: Type[_], mem: Exp[_], lhs: Exp[_], inds: Seq[Exp[_]], pre: String, post: String, isRead: Boolean)(lines: => Unit) = {
val name = u"$mem"
val addr = if (inds.isEmpty && pre == "" && post == "") "err.getMessage"
else "\"" + pre + "\" + " + "s\"\"\"${" + inds.map(quote).map(_ + ".toString").mkString(" + \", \" + ") + "}\"\"\" + \"" + post + "\""
val op = if (isRead) "read" else "write"
open(src"try {")
lines
close("}")
open(src"catch {case err: java.lang.ArrayIndexOutOfBoundsException => ")
emit(s"""System.out.println("[warn] ${lhs.ctx} Memory $name: Out of bounds $op at address " + $addr)""")
if (isRead) emit(src"${invalid(tp)}")
close("}")
}
def oobApply(tp: Type[_], mem: Exp[_], lhs: Exp[_], inds: Seq[Exp[_]], pre: String = "", post: String = "")(lines: => Unit) = {
oob(tp, mem, lhs, inds, pre, post, isRead = true)(lines)
}
def oobUpdate(tp: Type[_], mem: Exp[_], lhs: Exp[_], inds: Seq[Exp[_]], pre: String = "", post: String = "")(lines: => Unit) = {
oob(tp, mem, lhs, inds, pre, post, isRead = false)(lines)
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/codegen/scalagen/ScalaGenMemories.scala | Scala | mit | 1,971 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.scalatest.BeforeAndAfter
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{AnalysisException, DataFrame, QueryTest, Row, SaveMode, SparkSession, SQLContext}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreeNodeTag
import org.apache.spark.sql.connector.catalog.{Identifier, InMemoryTable, SupportsRead, SupportsWrite, Table, TableCapability}
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.connector.read.{Scan, ScanBuilder, V1Scan}
import org.apache.spark.sql.connector.write.{LogicalWriteInfo, LogicalWriteInfoImpl, SupportsOverwrite, SupportsTruncate, V1Write, WriteBuilder}
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.internal.SQLConf.V2_SESSION_CATALOG_IMPLEMENTATION
import org.apache.spark.sql.internal.connector.SimpleTableProvider
import org.apache.spark.sql.sources._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class V1WriteFallbackSuite extends QueryTest with SharedSparkSession with BeforeAndAfter {
import testImplicits._
private val v2Format = classOf[InMemoryV1Provider].getName
override def beforeAll(): Unit = {
super.beforeAll()
InMemoryV1Provider.clear()
}
override def afterEach(): Unit = {
super.afterEach()
InMemoryV1Provider.clear()
}
test("append fallback") {
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.mode("append").option("name", "t1").format(v2Format).save()
checkAnswer(InMemoryV1Provider.getTableData(spark, "t1"), df)
assert(InMemoryV1Provider.tables("t1").schema === df.schema.asNullable)
assert(InMemoryV1Provider.tables("t1").partitioning.isEmpty)
df.write.mode("append").option("name", "t1").format(v2Format).save()
checkAnswer(InMemoryV1Provider.getTableData(spark, "t1"), df.union(df))
}
test("overwrite by truncate fallback") {
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.mode("append").option("name", "t1").format(v2Format).save()
val df2 = Seq((10, "k"), (20, "l"), (30, "m")).toDF("a", "b")
df2.write.mode("overwrite").option("name", "t1").format(v2Format).save()
checkAnswer(InMemoryV1Provider.getTableData(spark, "t1"), df2)
}
SaveMode.values().foreach { mode =>
test(s"save: new table creations with partitioning for table - mode: $mode") {
val format = classOf[InMemoryV1Provider].getName
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.mode(mode).option("name", "t1").format(format).partitionBy("a").save()
checkAnswer(InMemoryV1Provider.getTableData(spark, "t1"), df)
assert(InMemoryV1Provider.tables("t1").schema === df.schema.asNullable)
assert(InMemoryV1Provider.tables("t1").partitioning.sameElements(
Array(IdentityTransform(FieldReference(Seq("a"))))))
}
}
test("save: default mode is ErrorIfExists") {
val format = classOf[InMemoryV1Provider].getName
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.option("name", "t1").format(format).partitionBy("a").save()
// default is ErrorIfExists, and since a table already exists we throw an exception
val e = intercept[AnalysisException] {
df.write.option("name", "t1").format(format).partitionBy("a").save()
}
assert(e.getMessage.contains("already exists"))
}
test("save: Ignore mode") {
val format = classOf[InMemoryV1Provider].getName
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.option("name", "t1").format(format).partitionBy("a").save()
// no-op
df.write.option("name", "t1").format(format).mode("ignore").partitionBy("a").save()
checkAnswer(InMemoryV1Provider.getTableData(spark, "t1"), df)
}
test("save: tables can perform schema and partitioning checks if they already exist") {
val format = classOf[InMemoryV1Provider].getName
val df = Seq((1, "x"), (2, "y"), (3, "z")).toDF("a", "b")
df.write.option("name", "t1").format(format).partitionBy("a").save()
val e2 = intercept[IllegalArgumentException] {
df.write.mode("append").option("name", "t1").format(format).partitionBy("b").save()
}
assert(e2.getMessage.contains("partitioning"))
val e3 = intercept[IllegalArgumentException] {
Seq((1, "x")).toDF("c", "d").write.mode("append").option("name", "t1").format(format)
.save()
}
assert(e3.getMessage.contains("schema"))
}
test("fallback writes should only analyze plan once") {
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
try {
val session = SparkSession.builder()
.master("local[1]")
.withExtensions(_.injectPostHocResolutionRule(_ => OnlyOnceRule))
.config(V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[V1FallbackTableCatalog].getName)
.getOrCreate()
val df = session.createDataFrame(Seq((1, "x"), (2, "y"), (3, "z")))
df.write.mode("append").option("name", "t1").format(v2Format).saveAsTable("test")
} finally {
SparkSession.setActiveSession(spark)
SparkSession.setDefaultSession(spark)
}
}
test("SPARK-33492: append fallback should refresh cache") {
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
try {
val session = SparkSession.builder()
.master("local[1]")
.config(V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[V1FallbackTableCatalog].getName)
.getOrCreate()
val df = session.createDataFrame(Seq((1, "x")))
df.write.mode("append").option("name", "t1").format(v2Format).saveAsTable("test")
session.catalog.cacheTable("test")
checkAnswer(session.read.table("test"), Row(1, "x") :: Nil)
val df2 = session.createDataFrame(Seq((2, "y")))
df2.writeTo("test").append()
checkAnswer(session.read.table("test"), Row(1, "x") :: Row(2, "y") :: Nil)
} finally {
SparkSession.setActiveSession(spark)
SparkSession.setDefaultSession(spark)
}
}
test("SPARK-33492: overwrite fallback should refresh cache") {
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
try {
val session = SparkSession.builder()
.master("local[1]")
.config(V2_SESSION_CATALOG_IMPLEMENTATION.key, classOf[V1FallbackTableCatalog].getName)
.getOrCreate()
val df = session.createDataFrame(Seq((1, "x")))
df.write.mode("append").option("name", "t1").format(v2Format).saveAsTable("test")
session.catalog.cacheTable("test")
checkAnswer(session.read.table("test"), Row(1, "x") :: Nil)
val df2 = session.createDataFrame(Seq((2, "y")))
df2.writeTo("test").overwrite(lit(true))
checkAnswer(session.read.table("test"), Row(2, "y") :: Nil)
} finally {
SparkSession.setActiveSession(spark)
SparkSession.setDefaultSession(spark)
}
}
}
class V1WriteFallbackSessionCatalogSuite
extends InsertIntoTests(supportsDynamicOverwrite = false, includeSQLOnlyTests = true)
with SessionCatalogTest[InMemoryTableWithV1Fallback, V1FallbackTableCatalog] {
override protected val v2Format = classOf[InMemoryV1Provider].getName
override protected val catalogClassName: String = classOf[V1FallbackTableCatalog].getName
override protected val catalogAndNamespace: String = ""
override protected def verifyTable(tableName: String, expected: DataFrame): Unit = {
checkAnswer(InMemoryV1Provider.getTableData(spark, s"default.$tableName"), expected)
}
protected def doInsert(tableName: String, insert: DataFrame, mode: SaveMode): Unit = {
val tmpView = "tmp_view"
withTempView(tmpView) {
insert.createOrReplaceTempView(tmpView)
val overwrite = if (mode == SaveMode.Overwrite) "OVERWRITE" else "INTO"
sql(s"INSERT $overwrite TABLE $tableName SELECT * FROM $tmpView")
}
}
}
class V1FallbackTableCatalog extends TestV2SessionCatalogBase[InMemoryTableWithV1Fallback] {
override def newTable(
name: String,
schema: StructType,
partitions: Array[Transform],
properties: java.util.Map[String, String]): InMemoryTableWithV1Fallback = {
val t = new InMemoryTableWithV1Fallback(name, schema, partitions, properties)
InMemoryV1Provider.tables.put(name, t)
tables.put(Identifier.of(Array("default"), name), t)
t
}
}
private object InMemoryV1Provider {
val tables: mutable.Map[String, InMemoryTableWithV1Fallback] = mutable.Map.empty
def getTableData(spark: SparkSession, name: String): DataFrame = {
val t = tables.getOrElse(name, throw new IllegalArgumentException(s"Table $name doesn't exist"))
spark.createDataFrame(t.getData.asJava, t.schema)
}
def clear(): Unit = {
tables.clear()
}
}
class InMemoryV1Provider
extends SimpleTableProvider
with DataSourceRegister
with CreatableRelationProvider {
override def getTable(options: CaseInsensitiveStringMap): Table = {
InMemoryV1Provider.tables.getOrElse(options.get("name"), {
new InMemoryTableWithV1Fallback(
"InMemoryTableWithV1Fallback",
new StructType(),
Array.empty,
options.asCaseSensitiveMap()
)
})
}
override def shortName(): String = "in-memory"
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
val _sqlContext = sqlContext
val partitioning = parameters.get(DataSourceUtils.PARTITIONING_COLUMNS_KEY).map { value =>
DataSourceUtils.decodePartitioningColumns(value).map { partitioningColumn =>
IdentityTransform(FieldReference(partitioningColumn))
}
}.getOrElse(Nil)
val tableName = parameters("name")
val tableOpt = InMemoryV1Provider.tables.get(tableName)
val table = tableOpt.getOrElse(new InMemoryTableWithV1Fallback(
"InMemoryTableWithV1Fallback",
data.schema.asNullable,
partitioning.toArray,
Map.empty[String, String].asJava
))
if (tableOpt.isEmpty) {
InMemoryV1Provider.tables.put(tableName, table)
} else {
if (data.schema.asNullable != table.schema) {
throw new IllegalArgumentException("Wrong schema provided")
}
if (!partitioning.sameElements(table.partitioning)) {
throw new IllegalArgumentException("Wrong partitioning provided")
}
}
def getRelation: BaseRelation = new BaseRelation {
override def sqlContext: SQLContext = _sqlContext
override def schema: StructType = table.schema
}
if (mode == SaveMode.ErrorIfExists && tableOpt.isDefined) {
throw new AnalysisException("Table already exists")
} else if (mode == SaveMode.Ignore && tableOpt.isDefined) {
// do nothing
return getRelation
}
val writer = table.newWriteBuilder(
LogicalWriteInfoImpl(
"", StructType(Seq.empty), new CaseInsensitiveStringMap(parameters.asJava)))
if (mode == SaveMode.Overwrite) {
writer.asInstanceOf[SupportsTruncate].truncate()
}
val write = writer.build()
write.asInstanceOf[V1Write].toInsertableRelation.insert(data, overwrite = false)
getRelation
}
}
class InMemoryTableWithV1Fallback(
override val name: String,
override val schema: StructType,
override val partitioning: Array[Transform],
override val properties: java.util.Map[String, String])
extends Table
with SupportsWrite with SupportsRead {
partitioning.foreach { t =>
if (!t.isInstanceOf[IdentityTransform]) {
throw new IllegalArgumentException(s"Transform $t must be IdentityTransform")
}
}
override def capabilities: java.util.Set[TableCapability] = java.util.EnumSet.of(
TableCapability.BATCH_READ,
TableCapability.V1_BATCH_WRITE,
TableCapability.OVERWRITE_BY_FILTER,
TableCapability.TRUNCATE)
@volatile private var dataMap: mutable.Map[Seq[Any], Seq[Row]] = mutable.Map.empty
private val partFieldNames = partitioning.flatMap(_.references).toSeq.flatMap(_.fieldNames)
private val partIndexes = partFieldNames.map(schema.fieldIndex(_))
def getData: Seq[Row] = dataMap.values.flatten.toSeq
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new FallbackWriteBuilder(info.options)
}
private class FallbackWriteBuilder(options: CaseInsensitiveStringMap)
extends WriteBuilder
with SupportsTruncate
with SupportsOverwrite {
private var mode = "append"
override def truncate(): WriteBuilder = {
dataMap.clear()
mode = "truncate"
this
}
override def overwrite(filters: Array[Filter]): WriteBuilder = {
val keys = InMemoryTable.filtersToKeys(dataMap.keys, partFieldNames, filters)
dataMap --= keys
mode = "overwrite"
this
}
private def getPartitionValues(row: Row): Seq[Any] = {
partIndexes.map(row.get)
}
override def build(): V1Write = new V1Write {
override def toInsertableRelation: InsertableRelation = {
(data: DataFrame, overwrite: Boolean) => {
assert(!overwrite, "V1 write fallbacks cannot be called with overwrite=true")
val rows = data.collect()
rows.groupBy(getPartitionValues).foreach { case (partition, elements) =>
if (dataMap.contains(partition) && mode == "append") {
dataMap.put(partition, dataMap(partition) ++ elements)
} else if (dataMap.contains(partition)) {
throw new IllegalStateException("Partition was not removed properly")
} else {
dataMap.put(partition, elements)
}
}
}
}
}
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder =
new V1ReadFallbackScanBuilder(schema)
private class V1ReadFallbackScanBuilder(schema: StructType) extends ScanBuilder {
override def build(): Scan = new V1ReadFallbackScan(schema)
}
private class V1ReadFallbackScan(schema: StructType) extends V1Scan {
override def readSchema(): StructType = schema
override def toV1TableScan[T <: BaseRelation with TableScan](context: SQLContext): T =
new V1TableScan(context, schema).asInstanceOf[T]
}
private class V1TableScan(
context: SQLContext,
requiredSchema: StructType) extends BaseRelation with TableScan {
override def sqlContext: SQLContext = context
override def schema: StructType = requiredSchema
override def buildScan(): RDD[Row] = {
val data = InMemoryV1Provider.getTableData(context.sparkSession, name).collect()
context.sparkContext.makeRDD(data)
}
}
}
/** A rule that fails if a query plan is analyzed twice. */
object OnlyOnceRule extends Rule[LogicalPlan] {
private val tag = TreeNodeTag[String]("test")
private val counts = new mutable.HashMap[LogicalPlan, Int]()
override def apply(plan: LogicalPlan): LogicalPlan = {
if (plan.getTagValue(tag).isEmpty) {
plan.setTagValue(tag, "abc")
plan
} else {
val cnt = counts.getOrElseUpdate(plan, 0) + 1
// This rule will be run as injectPostHocResolutionRule, and is supposed to be run only twice.
// Once during planning and once during checkBatchIdempotence
assert(cnt <= 1, "This rule shouldn't have been called again")
counts.put(plan, cnt)
plan
}
}
}
| ueshin/apache-spark | sql/core/src/test/scala/org/apache/spark/sql/connector/V1WriteFallbackSuite.scala | Scala | apache-2.0 | 16,515 |
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.trace
import com.codedx.codepulse.hq.data.TraceSegmentEvent
import reactive.EventSource
import reactive.EventStream
trait HasTraceSegmentBuilder {
private var _sm: TraceSegmentManager = _
private val es = new EventSource[TraceSegmentEvent]
protected def segmentManager_=(sm: TraceSegmentManager) = {
_sm = sm.wrap { sa =>
new SegmentAccessNotifier(sa, { es fire _ })
}
}
protected def segmentManager = _sm
/** Tell the segmentManager to open a segment with the given name
* with the current time as the timestamp.
* @return The ID of the segment that gets opened
*/
def openSegment(name: String): Int = {
segmentManager.openSegment(name, System.currentTimeMillis)
}
/** Tell the segmentManager to close the latest segment, using
* the current time as the timestamp.
*/
def closeSegment() = {
segmentManager.close(System.currentTimeMillis)
}
/** Tell the segmentManager to rename the given segment.
*
* @param id The ID of the segment to rename
* @param newName The new name for the segment
*/
def renameSegment(id: Int, newName: String) = {
segmentManager.renameSegment(id, newName)
}
/** Exposes generated segment events through an EventStream */
def segmentEvents: EventStream[TraceSegmentEvent] = es
} | secdec/codepulse | hq/src/main/scala/com/secdec/bytefrog/hq/trace/HasTraceSegmentBuilder.scala | Scala | apache-2.0 | 2,028 |
package code
package model
import lib.RogueMetaRecord
import org.bson.types.ObjectId
import org.joda.time.DateTime
import net.liftweb._
import common._
import http.{StringField => _, BooleanField => _, _}
import mongodb.record.field._
import record.field._
import util.FieldContainer
import net.liftmodules.mongoauth._
import net.liftmodules.mongoauth.field._
import net.liftmodules.mongoauth.model._
class User private () extends ProtoAuthUser[User] with ObjectIdPk[User] {
def meta = User
def userIdAsString: String = id.toString
object locale extends LocaleField(this) {
override def displayName = "Locale"
override def defaultValue = "en_US"
}
object timezone extends TimeZoneField(this) {
override def displayName = "Time Zone"
override def defaultValue = "America/Chicago"
}
object name extends StringField(this, 64) {
override def displayName = "Name"
override def validations =
valMaxLen(64, "Name must be 64 characters or less") _ ::
super.validations
}
object location extends StringField(this, 64) {
override def displayName = "Location"
override def validations =
valMaxLen(64, "Location must be 64 characters or less") _ ::
super.validations
}
object bio extends TextareaField(this, 160) {
override def displayName = "Bio"
override def validations =
valMaxLen(160, "Bio must be 160 characters or less") _ ::
super.validations
}
/*
* FieldContainers for various LiftScreeens.
*/
def accountScreenFields = new FieldContainer {
def allFields = List(username, email, locale, timezone)
}
def profileScreenFields = new FieldContainer {
def allFields = List(name, location, bio)
}
def registerScreenFields = new FieldContainer {
def allFields = List(username, email)
}
def whenCreated: DateTime = new DateTime(id.get.getDate)
}
object User extends User with ProtoAuthUserMeta[User] with RogueMetaRecord[User] with Loggable {
import mongodb.BsonDSL._
override def collectionName = "user.users"
createIndex((email.name -> 1), true)
createIndex((username.name -> 1), true)
def findByEmail(in: String): Box[User] = find(email.name, in)
def findByUsername(in: String): Box[User] = find(username.name, in)
def findByStringId(id: String): Box[User] =
if (ObjectId.isValid(id)) find(new ObjectId(id))
else Empty
override def onLogIn: List[User => Unit] = List(user => User.loginCredentials.remove())
override def onLogOut: List[Box[User] => Unit] = List(
x => logger.debug("User.onLogOut called."),
boxedUser => boxedUser.foreach { u =>
ExtSession.deleteExtCookie()
}
)
/*
* MongoAuth vars
*/
private lazy val siteName = MongoAuth.siteName.vend
private lazy val sysUsername = MongoAuth.systemUsername.vend
private lazy val indexUrl = MongoAuth.indexUrl.vend
private lazy val registerUrl = MongoAuth.registerUrl.vend
private lazy val loginTokenAfterUrl = MongoAuth.loginTokenAfterUrl.vend
/*
* LoginToken
*/
override def handleLoginToken: Box[LiftResponse] = {
val resp = S.param("token").flatMap(LoginToken.findByStringId) match {
case Full(at) if (at.expires.isExpired) => {
at.delete_!
RedirectWithState(indexUrl, RedirectState(() => { S.error("Login token has expired") }))
}
case Full(at) => find(at.userId.get).map(user => {
if (user.validate.length == 0) {
user.verified(true)
user.update
logUserIn(user)
at.delete_!
RedirectResponse(loginTokenAfterUrl)
}
else {
at.delete_!
regUser(user)
RedirectWithState(registerUrl, RedirectState(() => { S.notice("Please complete the registration form") }))
}
}).openOr(RedirectWithState(indexUrl, RedirectState(() => { S.error("User not found") })))
case _ => RedirectWithState(indexUrl, RedirectState(() => { S.warning("Login token not provided") }))
}
Full(resp)
}
// send an email to the user with a link for logging in
def sendLoginToken(user: User): Unit = {
import net.liftweb.util.Mailer._
LoginToken.createForUserIdBox(user.id.get).foreach { token =>
val msgTxt =
"""
|Someone requested a link to change your password on the %s website.
|
|If you did not request this, you can safely ignore it. It will expire 48 hours from the time this message was sent.
|
|Follow the link below or copy and paste it into your internet browser.
|
|%s
|
|Thanks,
|%s
""".format(siteName, token.url, sysUsername).stripMargin
sendMail(
From(MongoAuth.systemFancyEmail),
Subject("%s Password Help".format(siteName)),
To(user.fancyEmail),
PlainMailBodyType(msgTxt)
)
}
}
/*
* ExtSession
*/
def createExtSession(uid: ObjectId): Box[Unit] = ExtSession.createExtSessionBox(uid)
/*
* Test for active ExtSession.
*/
def testForExtSession: Box[Req] => Unit = {
ignoredReq => {
if (currentUserId.isEmpty) {
ExtSession.handleExtSession match {
case Full(es) => find(es.userId.get).foreach { user => logUserIn(user, false) }
case Failure(msg, _, _) =>
logger.warn("Error logging user in with ExtSession: %s".format(msg))
case Empty =>
}
}
}
}
// used during login process
object loginCredentials extends SessionVar[LoginCredentials](LoginCredentials(""))
object regUser extends SessionVar[User](createRecord.email(loginCredentials.is.email))
}
case class LoginCredentials(email: String, isRememberMe: Boolean = false)
| eltimn/lift-poly-example | src/main/scala/code/model/User.scala | Scala | apache-2.0 | 5,740 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.