code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
// TODO(high): proper incremental xsbt.version.properties generation
// TODO(low): proper generated API sources caching: doesn't detect output directory change
import sbt._
import Keys._
import Project.Initialize
import Util._
import Common._
import Licensed._
import Scope.ThisScope
import LaunchProguard.{proguard, Proguard}
object Sbt extends Build
{
override lazy val settings = super.settings ++ buildSettings ++ Status.settings
def buildSettings = Seq(
organization := "org.scala-tools.sbt",
version := "0.10.2-SNAPSHOT",
publishArtifact in packageDoc := false,
scalaVersion := "2.8.1",
publishMavenStyle := false,
componentID := None
)
lazy val myProvided = config("provided") intransitive;
override def projects = super.projects.map(p => p.copy(configurations = (p.configurations.filter(_ != Provided)) :+ myProvided))
lazy val root: Project = Project("xsbt", file("."), aggregate = nonRoots ) settings( rootSettings : _*) configs( Sxr.sxrConf, Proguard )
lazy val nonRoots = projects.filter(_ != root).map(p => LocalProject(p.id))
/*** Subproject declarations ***/
// defines the Java interfaces through which the launcher and the launched application communicate
lazy val launchInterfaceSub = project(launchPath / "interface", "Launcher Interface") settings(javaOnly : _*)
// the launcher. Retrieves, loads, and runs applications based on a configuration file.
lazy val launchSub = testedBaseProject(launchPath, "Launcher") dependsOn(ioSub % "test->test", interfaceSub % "test", launchInterfaceSub) settings(launchSettings : _*)
// used to test the retrieving and loading of an application: sample app is packaged and published to the local repository
lazy val testSamples = noPublish( baseProject(launchPath / "test-sample", "Launch Test") ) dependsOn(interfaceSub, launchInterfaceSub) settings(scalaCompiler, crossPaths := false)
// defines Java structures used across Scala versions, such as the API structures and relationships extracted by
// the analysis compiler phases and passed back to sbt. The API structures are defined in a simple
// format from which Java sources are generated by the datatype generator subproject
lazy val interfaceSub = project(file("interface"), "Interface") settings(interfaceSettings : _*)
// defines operations on the API of a source, including determining whether it has changed and converting it to a string
lazy val apiSub = baseProject(compilePath / "api", "API") dependsOn(interfaceSub)
/***** Utilities *****/
lazy val controlSub = baseProject(utilPath / "control", "Control")
lazy val collectionSub = testedBaseProject(utilPath / "collection", "Collections")
// The API for forking, combining, and doing I/O with system processes
lazy val processSub = baseProject(utilPath / "process", "Process") dependsOn(ioSub % "test->test")
// Path, IO (formerly FileUtilities), NameFilter and other I/O utility classes
lazy val ioSub = testedBaseProject(utilPath / "io", "IO") dependsOn(controlSub)
// Utilities related to reflection, managing Scala versions, and custom class loaders
lazy val classpathSub = baseProject(utilPath / "classpath", "Classpath") dependsOn(launchInterfaceSub, ioSub) settings(scalaCompiler)
// Command line-related utilities.
lazy val completeSub = testedBaseProject(utilPath / "complete", "Completion") dependsOn(collectionSub, controlSub, ioSub) settings(jline)
// logging
lazy val logSub = testedBaseProject(utilPath / "log", "Logging") dependsOn(interfaceSub, processSub) settings(libraryDependencies += jlineDep % "optional")
// class file reader and analyzer
lazy val classfileSub = testedBaseProject(utilPath / "classfile", "Classfile") dependsOn(ioSub, interfaceSub, logSub)
// generates immutable or mutable Java data types according to a simple input format
lazy val datatypeSub = baseProject(utilPath /"datatype", "Datatype Generator") dependsOn(ioSub)
/***** Intermediate-level Modules *****/
// Apache Ivy integration
lazy val ivySub = baseProject(file("ivy"), "Ivy") dependsOn(interfaceSub, launchInterfaceSub, logSub % "compile;test->test", ioSub % "compile;test->test", launchSub % "test->test") settings(ivy, jsch, httpclient)
// Runner for uniform test interface
lazy val testingSub = baseProject(file("testing"), "Testing") dependsOn(ioSub, classpathSub, logSub) settings(libraryDependencies += "org.scala-tools.testing" % "test-interface" % "0.5")
// Basic task engine
lazy val taskSub = testedBaseProject(tasksPath, "Tasks") dependsOn(controlSub, collectionSub)
// Standard task system. This provides map, flatMap, join, and more on top of the basic task model.
lazy val stdTaskSub = testedBaseProject(tasksPath / "standard", "Task System") dependsOn(taskSub % "compile;test->test", collectionSub, logSub, ioSub, processSub)
// Persisted caching based on SBinary
lazy val cacheSub = baseProject(cachePath, "Cache") dependsOn(ioSub, collectionSub) settings(sbinary)
// Builds on cache to provide caching for filesystem-related operations
lazy val trackingSub = baseProject(cachePath / "tracking", "Tracking") dependsOn(cacheSub, ioSub)
// Embedded Scala code runner
lazy val runSub = baseProject(file("run"), "Run") dependsOn(ioSub, logSub, classpathSub, processSub)
// Compiler-side interface to compiler that is compiled against the compiler being used either in advance or on the fly.
// Includes API and Analyzer phases that extract source API and relationships.
lazy val compileInterfaceSub = baseProject(compilePath / "interface", "Compiler Interface") dependsOn(interfaceSub, ioSub % "test->test", logSub % "test->test", launchSub % "test->test") settings( compileInterfaceSettings : _*)
lazy val precompiled29 = precompiled("2.9.0-1")
// lazy val precompiled27 = precompiled("2.7.7")
// Implements the core functionality of detecting and propagating changes incrementally.
// Defines the data structures for representing file fingerprints and relationships and the overall source analysis
lazy val compileIncrementalSub = testedBaseProject(compilePath / "inc", "Incremental Compiler") dependsOn(collectionSub, apiSub, ioSub, logSub)
// Persists the incremental data structures using SBinary
lazy val compilePersistSub = baseProject(compilePath / "persist", "Persist") dependsOn(compileIncrementalSub, apiSub) settings(sbinary)
// sbt-side interface to compiler. Calls compiler-side interface reflectively
lazy val compilerSub = testedBaseProject(compilePath, "Compile") dependsOn(launchInterfaceSub, interfaceSub % "compile;test->test", ivySub, ioSub, classpathSub,
logSub % "test->test", launchSub % "test->test", apiSub % "test->test") settings( compilerSettings : _*)
// Searches the source API data structures, currently looks for subclasses and annotations
lazy val discoverySub = testedBaseProject(compilePath / "discover", "Discovery") dependsOn(compileIncrementalSub, apiSub, compilerSub % "test->test")
lazy val scriptedBaseSub = baseProject(scriptedPath / "base", "Scripted Framework") dependsOn(ioSub, processSub)
lazy val scriptedSbtSub = baseProject(scriptedPath / "sbt", "Scripted sbt") dependsOn(ioSub, logSub, processSub, scriptedBaseSub, launchInterfaceSub % "provided")
lazy val scriptedPluginSub = baseProject(scriptedPath / "plugin", "Scripted Plugin") dependsOn(sbtSub, classpathSub)
// Implementation and support code for defining actions.
lazy val actionsSub = baseProject(mainPath / "actions", "Actions") dependsOn(
classfileSub, classpathSub, compileIncrementalSub, compilePersistSub, compilerSub, completeSub, discoverySub,
interfaceSub, ioSub, ivySub, logSub, processSub, runSub, stdTaskSub, taskSub, trackingSub, testingSub)
// The main integration project for sbt. It brings all of the subsystems together, configures them, and provides for overriding conventions.
lazy val mainSub = testedBaseProject(mainPath, "Main") dependsOn(actionsSub, interfaceSub, ioSub, ivySub, launchInterfaceSub, logSub, processSub, runSub)
// Strictly for bringing implicits and aliases from subsystems into the top-level sbt namespace through a single package object
// technically, we need a dependency on all of mainSub's dependencies, but we don't do that since this is strictly an integration project
// with the sole purpose of providing certain identifiers without qualification (with a package object)
lazy val sbtSub = baseProject(sbtPath, "Simple Build Tool") dependsOn(mainSub, compileInterfaceSub, precompiled29, scriptedSbtSub % "test->test") settings(sbtSettings : _*)
/* Nested subproject paths */
def sbtPath = file("sbt")
def cachePath = file("cache")
def tasksPath = file("tasks")
def launchPath = file("launch")
def utilPath = file("util")
def compilePath = file("compile")
def mainPath = file("main")
def scriptedPath = file("scripted")
def sbtSettings = Seq(
normalizedName := "sbt"
)
def scriptedTask: Initialize[InputTask[Unit]] = inputTask { result =>
(proguard in Proguard, fullClasspath in scriptedSbtSub in Test, scalaInstance in scriptedSbtSub, publishAll, version, scalaVersion, scriptedScalaVersion, scriptedSource, result) map {
(launcher, scriptedSbtClasspath, scriptedSbtInstance, _, v, sv, ssv, sourcePath, args) =>
val loader = classpath.ClasspathUtilities.toLoader(scriptedSbtClasspath.files, scriptedSbtInstance.loader)
val m = ModuleUtilities.getObject("sbt.test.ScriptedTests", loader)
val r = m.getClass.getMethod("run", classOf[File], classOf[Boolean], classOf[String], classOf[String], classOf[String], classOf[Array[String]], classOf[File])
try { r.invoke(m, sourcePath, true: java.lang.Boolean, v, sv, ssv, args.toArray[String], launcher) }
catch { case ite: java.lang.reflect.InvocationTargetException => throw ite.getCause }
}
}
lazy val scriptedScalaVersion = SettingKey[String]("scripted-scala-version")
lazy val scripted = InputKey[Unit]("scripted")
lazy val scriptedSource = SettingKey[File]("scripted-source")
lazy val publishAll = TaskKey[Unit]("publish-all")
def deepTasks[T](scoped: ScopedTask[Seq[T]]): Initialize[Task[Seq[T]]] = deep(scoped.task).map { _.flatMap(_.join.map(_.flatten)) }
def deep[T](scoped: ScopedSetting[T]): Initialize[Task[Seq[T]]] =
state map { s =>
val sxrProjects = projects filterNot Set(root, sbtSub, scriptedBaseSub, scriptedSbtSub, scriptedPluginSub) map { p => LocalProject(p.id) }
Defaults.inAllProjects(sxrProjects, scoped, Project.extract(s).structure.data)
}
def launchSettings = inConfig(Compile)(Transform.configSettings) ++ Seq(jline, ivy, crossPaths := false,
compile in Test <<= compile in Test dependsOn(publishLocal in interfaceSub, publishLocal in testSamples, publishLocal in launchInterfaceSub)
// mappings in (Compile, packageBin) <++= (mappings in (launchInterfaceSub, Compile, packageBin) ).identity
)
import Sxr.sxr
def releaseSettings = Release.settings(nonRoots, proguard in Proguard)
def rootSettings = releaseSettings ++ LaunchProguard.settings ++ LaunchProguard.specific(launchSub) ++ Sxr.settings ++ docSetting ++ Seq(
scriptedScalaVersion := "2.8.1",
scripted <<= scriptedTask,
scriptedSource <<= (sourceDirectory in sbtSub) / "sbt-test",
sources in sxr <<= deepTasks(sources in Compile),
Sxr.sourceDirectories <<= deep(sourceDirectories in Compile).map(_.map(_.flatten)),
fullClasspath in sxr <<= (externalDependencyClasspath in Compile in sbtSub).identity,
compileInputs in (Compile,sxr) <<= (sources in sxr, compileInputs in sbtSub in Compile, fullClasspath in sxr) map { (srcs, in, cp) =>
in.copy(config = in.config.copy(sources = srcs, classpath = cp.files))
},
publishAll <<= inAll(nonRoots, publishLocal.task),
TaskKey[Unit]("build-all") <<= (publishAll, proguard in Proguard, sxr, doc) map { (_,_,_,_) => () }
)
def docSetting = inConfig(Compile)(inTask(sxr)(doc in ThisScope.copy(task = Global, config = Global) <<= Defaults.docTask))
def interfaceSettings = javaOnly ++ Seq(
crossPaths := false,
projectComponent,
exportJars := true,
componentID := Some("xsbti"),
watchSources <++= apiDefinitions.identity,
resourceGenerators in Compile <+= (version, resourceManaged, streams) map generateVersionFile,
apiDefinitions <<= baseDirectory map { base => (base / "definition") :: (base / "other") :: (base / "type") :: Nil },
sourceGenerators in Compile <+= (cacheDirectory, apiDefinitions, fullClasspath in Compile in datatypeSub, sourceManaged in Compile, mainClass in datatypeSub in Compile, runner, streams) map generateAPICached
)
def precompiledSettings = Seq(
artifact in packageBin <<= scalaInstance in Compile apply { si =>
val bincID = binID + "_" + si.actualVersion
Artifact(binID) extra("e:component" -> bincID)
},
target <<= (target, scalaVersion) { (base, sv) => base / ("precompiled_" + sv) },
scalacOptions := Nil,
crossPaths := false,
exportedProducts in Compile := Nil,
exportedProducts in Test := Nil,
libraryDependencies <+= scalaVersion( "org.scala-lang" % "scala-compiler" % _ % "provided"),
libraryDependencies += jlineDep artifacts(Artifact("jline", Map("e:component" -> srcID)))
)
//
def compileInterfaceSettings: Seq[Setting[_]] = precompiledSettings ++ Seq(
exportJars := true,
artifact in (Compile, packageSrc) := Artifact(srcID) extra("e:component" -> srcID)
)
def compilerSettings = Seq(
libraryDependencies <+= scalaVersion( "org.scala-lang" % "scala-compiler" % _ % "test"),
unmanagedJars in Test <<= (packageSrc in compileInterfaceSub in Compile).map(x => Seq(x).classpath)
)
def precompiled(scalav: String): Project = baseProject(compilePath / "interface", "Precompiled " + scalav.replace('.', '_')) dependsOn(interfaceSub) settings(scalaVersion := scalav) settings(precompiledSettings : _*)
}
|
ornicar/xsbt
|
project/Sbt.scala
|
Scala
|
bsd-3-clause
| 13,775 |
package feh.phtpe
import org.specs2.Specification
import feh.phtpe.short._
class CollectionsOfPhisTypedSpec extends Specification{
def is = s2"""
__map__
${ ( 1 to 10 map (_.of[kg]) map(_ * 2) ).map(_.value) == Range(2, 22, 2) }
__reduce__
${ ( 1 to 10 map(_.of[kg]) ).reduceLeft(_ + _) =@= 55.of[kg] }
__fold__
${ (0.of[kg] /: (1 to 10))(_ + _.of[kg]) =@= 55.of[kg] }
${ (0.of[kg] /: (1 to 10).map(_.of[kg]))(_ + _) =@= 55.of[kg] }
__min/max__ $todo
__sum/product__ $todo
"""
}
|
fehu/phtpe
|
phtpe/src/test/scala/feh/phtpe/CollectionsOfPhisTypedSpec.scala
|
Scala
|
mit
| 534 |
package com.typesafe.slick.testkit.tests
import com.typesafe.slick.testkit.util.{JdbcTestDB, AsyncTest}
import slick.jdbc.{ResultSetHoldability, ResultSetConcurrency, ResultSetType, JdbcBackend}
class JdbcMiscTest extends AsyncTest[JdbcTestDB] {
import tdb.profile.api._
def testNullability = {
class T1(tag: Tag) extends Table[String](tag, "t1") {
def a = column[String]("a", O.PrimaryKey)
def * = a
}
val t1 = TableQuery[T1]
class T3(tag: Tag) extends Table[Option[String]](tag, "t3") {
def a = column[Option[String]]("a")
def * = a
}
val t3 = TableQuery[T3]
seq(
(t1.schema ++ t3.schema).create,
t1 += "a",
t3 += Some("a"),
t3 += None,
(t1 += null.asInstanceOf[String]).failed
)
}
def testSimpleDBIO = {
val getAutoCommit = SimpleDBIO[Boolean](_.connection.getAutoCommit)
getAutoCommit.map(_ shouldBe true)
}
def testStatementParameters = {
def check(sp: JdbcBackend.StatementParameters) =
GetStatementParameters.map { csp => csp shouldBe sp }
DBIO.seq(
check(JdbcBackend.StatementParameters(ResultSetType.Auto, ResultSetConcurrency.Auto, ResultSetHoldability.Auto, null, 0)),
DBIO.seq(
check(JdbcBackend.StatementParameters(ResultSetType.ScrollInsensitive, ResultSetConcurrency.Auto, ResultSetHoldability.Auto, null, 0)),
check(JdbcBackend.StatementParameters(ResultSetType.ScrollInsensitive, ResultSetConcurrency.Auto, ResultSetHoldability.HoldCursorsOverCommit, null, 100)).
withStatementParameters(rsHoldability = ResultSetHoldability.HoldCursorsOverCommit, fetchSize = 100),
check(JdbcBackend.StatementParameters(ResultSetType.ScrollInsensitive, ResultSetConcurrency.Auto, ResultSetHoldability.Auto, null, 0))
).withStatementParameters(rsType = ResultSetType.ScrollInsensitive),
check(JdbcBackend.StatementParameters(ResultSetType.Auto, ResultSetConcurrency.Auto, ResultSetHoldability.Auto, null, 0))
)
}
def testOverrideStatements = {
class T(tag: Tag) extends Table[Int](tag, u"t") {
def id = column[Int]("a")
def * = id
}
val t = TableQuery[T]
val a1 = t.filter(_.id === 1)
val a2 = t.filter(_.id === 2)
seq(
t.schema.create,
t ++= Seq(1, 2, 3),
a1.result.map(_ shouldBe Seq(1)),
a1.result.overrideStatements(a2.result.statements).map(_ shouldBe Seq(2)),
a1.result.head.map(_ shouldBe 1),
a1.result.head.overrideStatements(a2.result.head.statements).map(_ shouldBe 2)
)
}
}
|
AtkinsChang/slick
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/JdbcMiscTest.scala
|
Scala
|
bsd-2-clause
| 2,561 |
/*
* Copyright 2013 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.http4s.blaze
import cats.effect._
import com.codahale.metrics.{Timer => _, _}
import com.example.http4s.ExampleService
import org.http4s.HttpApp
import org.http4s.blaze.server.BlazeServerBuilder
import org.http4s.implicits._
import org.http4s.metrics.dropwizard._
import org.http4s.server.HttpMiddleware
import org.http4s.server.Router
import org.http4s.server.Server
import org.http4s.server.middleware.Metrics
class BlazeMetricsExample extends IOApp {
override def run(args: List[String]): IO[ExitCode] =
BlazeMetricsExampleApp.resource[IO].use(_ => IO.never).as(ExitCode.Success)
}
object BlazeMetricsExampleApp {
def httpApp[F[_]: Async]: HttpApp[F] = {
val metricsRegistry: MetricRegistry = new MetricRegistry()
val metrics: HttpMiddleware[F] = Metrics[F](Dropwizard(metricsRegistry, "server"))
Router(
"/http4s" -> metrics(ExampleService[F].routes),
"/http4s/metrics" -> metricsService[F](metricsRegistry),
).orNotFound
}
def resource[F[_]: Async]: Resource[F, Server] = {
val app = httpApp[F]
BlazeServerBuilder[F]
.bindHttp(8080)
.withHttpApp(app)
.resource
}
}
|
http4s/http4s
|
examples/blaze/src/main/scala/com/example/http4s/blaze/BlazeMetricsExample.scala
|
Scala
|
apache-2.0
| 1,757 |
package kata.calc
import kata.calc.Calculator
import org.scalatest.{FlatSpec, Matchers}
class StringCalculatorTest extends FlatSpec with Matchers {
it should " evaluate integer " in {
Calculator.evaluate("100") shouldBe 100
}
it should " evaluate float number " in {
Calculator.evaluate("100.21") shouldEqual (100.21 +- 0.01)
}
it should "evaluate addition" in {
Calculator.evaluate("10+23") shouldBe 33
}
it should "evaluate subtraction" in {
Calculator.evaluate("43-12") shouldBe 31
}
it should "evaluate multiplication" in {
Calculator.evaluate("43.25×5") shouldBe 216.25
}
it should "evaluate division" in {
Calculator.evaluate("54÷2") shouldBe 27
}
it should " evaluate number of different operations" in {
Calculator.evaluate("453+43242-31.15+34×15-1521÷15") shouldBe 44072.45
}
}
|
Alex-Diez/Scala-TDD-Katas
|
old-katas/string-calc/day-1/src/test/scala/kata/calc/StringCalculatorTest.scala
|
Scala
|
mit
| 912 |
package nl.malienkolders.htm.admin.comet
import net.liftweb._
import http._
import actor._
object ChatServer extends LiftActor with ListenerManager {
private var msgs: InboxMessages = InboxMessages(Vector())
def createUpdate = msgs
override def lowPriority = {
case s: String =>
msgs = InboxMessages(msgs.v ++ Vector(System.currentTimeMillis() -> s))
updateListeners()
}
}
case class InboxMessages(v: Vector[(Long, String)])
|
hema-tournament-manager/htm
|
htm-admin/src/main/scala/nl/malienkolders/htm/admin/comet/ChatServer.scala
|
Scala
|
apache-2.0
| 453 |
package w2v
/**
* Created by cbadenes on 17/09/15.
*/
object TestWikiW2V {
def main(args: Array[String]) : Unit={
ModelBuilder.main(List("2","1g","/Users/cbadenes/Projects/siminwikart-challenge4/text/wikipedia/articles_body10000.csv","model/w2v").toArray)
}
}
|
cbadenes/text-mining-algorithms
|
src/test/scala/w2v/TestWikiW2V.scala
|
Scala
|
apache-2.0
| 273 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.common.jsonformat
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import play.api.libs.json.Json
import play.api.libs.json.OFormat
class TypedFormatsTest extends AssertionsForJUnit {
import TypedFormatsTest._
@Test
def typedDefinitionReads(): Unit = {
val plainJs = Json.obj("a" -> 1)
val testJs = Json.obj(
"typeName" -> "T1",
"definition" -> plainJs)
val reads = TypedFormats.typedDefinitionReads("T1", format1)
assertResult(format1.reads(plainJs))(reads.reads(testJs))
}
@Test
def typedDefinitionWrites(): Unit = {
val writes = TypedFormats.typedDefinitionWrites("T1", format1)
val expectedJs = Json.obj(
"typeName" -> "T1",
"definition" -> Json.obj("a" -> 1))
assertResult(expectedJs)(writes.writes(T1(1)))
}
}
object TypedFormatsTest {
case class T1(a: Int)
case class T2(b: Int)
implicit val format1: OFormat[T1] = Json.format[T1]
implicit val format2: OFormat[T2] = Json.format[T2]
}
|
coursera/courscala
|
courscala/src/test/scala/org/coursera/common/jsonformat/TypedFormatsTest.scala
|
Scala
|
apache-2.0
| 1,618 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v3
import org.joda.time.LocalDate
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600j.v3.retriever.CT600JBoxRetriever
case class J25A(value: Option[LocalDate]) extends SchemeDateBox{
override def validate(boxRetriever: CT600JBoxRetriever): Set[CtValidation] =
validateSchemeDate(boxRetriever.j20(), boxRetriever.j20A(), boxRetriever.j25())
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600j/v3/J25A.scala
|
Scala
|
apache-2.0
| 992 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.agent.rest
import javax.ws.rs._
import net.lshift.diffa.kernel.frontend.{EscalationDef, Configuration}
import net.lshift.diffa.kernel.differencing.DomainDifferenceStore
import net.lshift.diffa.kernel.config.PairRef
import net.lshift.diffa.agent.rest.ResponseUtils._
/**
* ATM this resource proxies directly through to the underlying configuration, because the current scope of
* escalations is quite minimal.
*
* This is likely to change when #274 lands
*/
class EscalationsResource(val config:Configuration,
val diffStore:DomainDifferenceStore,
val space:Long) {
@GET
@Path("/{pairId}")
@Produces(Array("application/json"))
def listEscalations(@PathParam("pairId") pairId: String): Array[EscalationDef] = config.listEscalationForPair(space, pairId).toArray
@DELETE
@Path("/{pairId}")
def unscheduleEscalations(@PathParam("pairId") pairId: String) = {
diffStore.unscheduleEscalations(PairRef(space = space, name = pairId))
resourceDeleted()
}
}
|
0x6e6562/diffa
|
agent/src/main/scala/net/lshift/diffa/agent/rest/EscalationsResource.scala
|
Scala
|
apache-2.0
| 1,665 |
object Test {
def f[T](xs: T*): T = xs.head
def g[T] = f[T] _
def main(args: Array[String]): Unit = {
println(g("hello" +: args))
}
}
|
folone/dotty
|
tests/pending/run/eta-expand-star.scala
|
Scala
|
bsd-3-clause
| 147 |
package spatial.codegen.chiselgen
import argon.core._
import argon.nodes._
import spatial.aliases._
import spatial.metadata._
import spatial.nodes._
import spatial.utils._
trait ChiselGenRegFile extends ChiselGenSRAM {
private var nbufs: List[(Sym[SRAM[_]], Int)] = List()
override protected def name(s: Dyn[_]): String = s match {
case Def(_: RegFileNew[_,_]) => s"""${s}_${s.name.getOrElse("regfile")}"""
case Def(_: LUTNew[_,_]) => s"""${s}_${s.name.getOrElse("lut")}"""
case _ => super.name(s)
}
override protected def remap(tp: Type[_]): String = tp match {
case tp: RegFileType[_] => src"Array[${tp.child}]"
case _ => super.remap(tp)
}
override protected def emitNode(lhs: Sym[_], rhs: Op[_]): Unit = rhs match {
case op@RegFileNew(dims, inits) =>
val initVals = if (inits.isDefined) {
getConstValues(inits.get).toList.map{a => src"${a}d"}.mkString(",")
} else { "None"}
val initString = if (inits.isDefined) src"Some(List(${initVals}))" else "None"
val f = lhs.tp.typeArguments.head match {
case a: FixPtType[_,_,_] => a.fracBits
case _ => 0
}
val width = bitWidth(lhs.tp.typeArguments.head)
duplicatesOf(lhs).zipWithIndex.foreach{ case (mem, i) =>
val writerInfo = writersOf(lhs).toSeq.map{w =>
val port = portsOf(w, lhs, i).head
w.node match {
case Def(_:RegFileStore[_]) => (port, 1, 1)
case Def(_:RegFileShiftIn[_]) => (port, 1, 1)
case Def(_@ParRegFileShiftIn(_,inds,d,data,en)) => (port, inds.length, data.tp.asInstanceOf[VectorType[_]].width) // Get stride
case Def(_@ParRegFileStore(_,_,_,en)) => (port, en.length, 1)
}
}
if (writerInfo.isEmpty) {warn(s"RegFile $lhs has no writers!")}
val parInfo = writerInfo.groupBy(_._1).map{case (k,v) => src"($k -> ${v.map{_._2}.sum})"}
val stride = if (writerInfo.isEmpty) 1 else writerInfo.map(_._3).max
val depth = mem match {
case BankedMemory(dims, d, isAccum) => d
case _ => 1
}
if (depth == 1) {
emitGlobalModule(src"""val ${lhs}_$i = Module(new templates.ShiftRegFile(List(${getConstValues(dims)}), $initString, $stride, ${if (writerInfo.length == 0) 1 else writerInfo.map{_._2}.reduce{_+_}}, false, $width, $f))""")
emitGlobalModule(src"${lhs}_$i.io.dump_en := false.B")
} else {
appPropertyStats += HasNBufRegFile
nbufs = nbufs :+ (lhs.asInstanceOf[Sym[SRAM[_]]], i)
emitGlobalModule(src"""val ${lhs}_$i = Module(new NBufShiftRegFile(List(${getConstValues(dims)}), $initString, $stride, $depth, Map(${parInfo.mkString(",")}), $width, $f))""")
}
resettersOf(lhs).indices.foreach{ ii => emitGlobalWire(src"""val ${lhs}_${i}_manual_reset_$ii = Wire(Bool())""")}
if (resettersOf(lhs).nonEmpty) {
emitGlobalModule(src"""val ${lhs}_${i}_manual_reset = ${resettersOf(lhs).indices.map{ii => src"${lhs}_${i}_manual_reset_$ii"}.mkString(" | ")}""")
emitGlobalModule(src"""${lhs}_$i.io.reset := ${lhs}_${i}_manual_reset | accelReset""")
} else {emitGlobalModule(src"${lhs}_$i.io.reset := accelReset")}
}
case RegFileReset(rf,en) =>
val parent = parentOf(lhs).get
val id = resettersOf(rf).map{_._1}.indexOf(lhs)
duplicatesOf(rf).indices.foreach{i => emit(src"${rf}_${i}_manual_reset_$id := $en & ${DL(swap(parent, DatapathEn), enableRetimeMatch(en, lhs), true)} ")}
case op@RegFileLoad(rf,inds,en) =>
val dispatch = dispatchOf(lhs, rf).toList.head
val port = portsOf(lhs, rf, dispatch).toList.head
val addr = inds.map{i => src"${i}.r"}.mkString(",")
emitGlobalWireMap(src"""${lhs}""",src"""Wire(${newWire(lhs.tp)})""")
emit(src"""${lhs}.r := ${rf}_${dispatch}.readValue(List($addr), $port)""")
case op@RegFileStore(rf,inds,data,en) =>
val width = bitWidth(rf.tp.typeArguments.head)
val parent = writersOf(rf).find{_.node == lhs}.get.ctrlNode
val enable = src"""${swap(parent, DatapathEn)} & ~${swap(parent, Inhibitor)} & ${swap(parent, IIDone)}"""
emit(s"""// Assemble multidimW vector""")
emit(src"""val ${lhs}_wVec = Wire(Vec(1, new multidimRegW(${inds.length}, List(${constDimsOf(rf)}), ${width}))) """)
emit(src"""${lhs}_wVec(0).data := ${data}.r""")
emit(src"""${lhs}_wVec(0).en := ${en} & ${DL(enable, enableRetimeMatch(en, lhs), true)}""")
inds.zipWithIndex.foreach{ case(ind,j) =>
emit(src"""${lhs}_wVec(0).addr($j) := ${ind}.r // Assume always an int""")
}
emit(src"""${lhs}_wVec(0).shiftEn := false.B""")
duplicatesOf(rf).zipWithIndex.foreach{ case (mem, i) =>
emit(src"""${rf}_$i.connectWPort(${lhs}_wVec, List(${portsOf(lhs, rf, i)})) """)
}
case ParRegFileLoad(rf, inds, ens) => //FIXME: Not correct for more than par=1
val dispatch = dispatchOf(lhs, rf).toList.head
val port = portsOf(lhs, rf, dispatch).toList.head
emitGlobalWire(s"""val ${quote(lhs)} = Wire(Vec(${ens.length}, ${newWire(lhs.tp.typeArguments.head)}))""")
ens.zipWithIndex.foreach { case (en, i) =>
val addr = inds(i).map{id => src"${id}.r"}.mkString(",")
emit(src"""val ${lhs}_$i = Wire(${newWire(lhs.tp.typeArguments.head)})""")
emit(src"""${lhs}(${i}).r := ${rf}_${dispatch}.readValue(List(${addr}), $port)""")
}
// emit(s"""${quote(lhs)} := Vec(${(0 until ens.length).map{i => src"${lhs}_$i"}.mkString(",")})""")
case ParRegFileStore(rf, inds, data, ens) => //FIXME: Not correct for more than par=1
val width = bitWidth(rf.tp.typeArguments.head)
val parent = writersOf(rf).find{_.node == lhs}.get.ctrlNode
val enable = src"""${swap(parent, DatapathEn)} & ~${swap(parent, Inhibitor)} && ${swap(parent, IIDone)}"""
emit(s"""// Assemble multidimW vector""")
emitGlobalWireMap(src"""${lhs}_wVec""", src"""Wire(Vec(${ens.length}, new multidimRegW(${inds.head.length}, List(${constDimsOf(rf)}), ${width})))""")
ens.indices.foreach{ k =>
emit(src"""${swap(lhs, WVec)}($k).data := ${data(k)}.r""")
emit(src"""${swap(lhs, WVec)}($k).en := ${ens(k)} & ${DL(enable, enableRetimeMatch(ens.head, lhs), true)}""")
inds(k).zipWithIndex.foreach{ case(ind,j) =>
emit(src"""${swap(lhs, WVec)}($k).addr($j) := ${ind}.r // Assume always an int""")
}
emit(src"""${swap(lhs, WVec)}($k).shiftEn := false.B""")
}
duplicatesOf(rf).zipWithIndex.foreach{ case (mem, i) =>
val p = portsOf(lhs, rf, i).mkString(",")
emit(src"""${rf}_$i.connectWPort(${swap(lhs, WVec)}, List(${p})) """)
}
case RegFileShiftIn(rf,inds,d,data,en) =>
val width = bitWidth(rf.tp.typeArguments.head)
val parent = writersOf(rf).find{_.node == lhs}.get.ctrlNode
val enable = src"""${swap(parent, DatapathEn)} & ~${swap(parent, Inhibitor)}"""
emit(s"""// Assemble multidimW vector""")
emit(src"""val ${lhs}_wVec = Wire(Vec(1, new multidimRegW(${inds.length}, List(${constDimsOf(rf)}), ${width}))) """)
emit(src"""${lhs}_wVec(0).data := ${data}.r""")
emit(src"""${lhs}_wVec(0).shiftEn := ${en} & ${DL(enable, enableRetimeMatch(en, lhs), true)}""")
inds.zipWithIndex.foreach{ case(ind,j) =>
emit(src"""${lhs}_wVec(0).addr($j) := ${ind}.r // Assume always an int""")
}
emit(src"""${lhs}_wVec(0).en := false.B""")
duplicatesOf(rf).zipWithIndex.foreach{ case (mem, i) =>
emit(src"""${rf}_$i.connectShiftPort(${lhs}_wVec, List(${portsOf(lhs, rf, i)})) """)
}
case ParRegFileShiftIn(rf,inds,d,data,en) =>
val width = bitWidth(rf.tp.typeArguments.head)
val parent = writersOf(rf).find{_.node == lhs}.get.ctrlNode
val enable = src"""${swap(parent, DatapathEn)} & ~${swap(parent, Inhibitor)}"""
emit(s"""// Assemble multidimW vectors""")
emit(src"""val ${lhs}_wVec = Wire(Vec(${inds.length}, new multidimRegW(${inds.length}, List(${constDimsOf(rf)}), ${width}))) """)
open(src"""for (${lhs}_i <- 0 until ${data}.length) {""")
emit(src"""${lhs}_wVec(${lhs}_i).data := ${data}(${lhs}_i).r""")
emit(src"""${lhs}_wVec(${lhs}_i).shiftEn := ${en} & ${DL(enable, enableRetimeMatch(en, lhs), true)}""")
inds.zipWithIndex.foreach{ case(ind,j) =>
emit(src"""${lhs}_wVec(${lhs}_i).addr($j) := ${ind}.r // Assume always an int""")
}
emit(src"""${lhs}_wVec(${lhs}_i).en := false.B""")
close(src"}")
duplicatesOf(rf).zipWithIndex.foreach{ case (mem, i) =>
emit(src"""${rf}_$i.connectShiftPort(${lhs}_wVec, List(${portsOf(lhs, rf, i)})) """)
}
case op@LUTNew(dims, init) =>
appPropertyStats += HasLUT
val width = bitWidth(lhs.tp.typeArguments.head)
val f = lhs.tp.typeArguments.head match {
case a: FixPtType[_,_,_] => a.fracBits
case _ => 0
}
val lut_consts = if (width == 1) {
getConstValues(init).toList.map{a => if (a == true) "1.0" else "0.0"}.mkString(",")
} else {
getConstValues(init).toList.map{a => src"${a}d"}.mkString(",")
}
duplicatesOf(lhs).zipWithIndex.foreach{ case (mem, i) =>
val numReaders = readersOf(lhs).filter{read => dispatchOf(read, lhs) contains i}.length
emitGlobalModule(src"""val ${lhs}_$i = Module(new LUT(List($dims), List(${lut_consts}), ${numReaders}, $width, $f))""")
}
// } else {
// nbufs = nbufs :+ (lhs.asInstanceOf[Sym[SRAM[_]]], i)
// emitGlobalModule(s"val ${quote(lhs)}_$i = Module(new templates.NBufShiftRegFile(${dims(0)}, ${dims(1)}, 1, $depth, ${par}/${dims(0)}, $width))")
// emitGlobalModule(s"${quote(lhs)}_$i.io.reset := reset")
// }
case op@LUTLoad(lut,inds,en) =>
val dispatch = dispatchOf(lhs, lut).toList.head
val idquote = src"${lhs}_id"
emitGlobalWireMap(src"""${lhs}""",src"""Wire(${newWire(lhs.tp)})""")
val parent = parentOf(lhs).get
emit(src"""val ${idquote} = ${lut}_${dispatch}.connectRPort(List(${inds.map{a => src"${a}.r"}}), $en & ${DL(swap(parent, DatapathEn), enableRetimeMatch(en, lhs), true)})""")
emit(src"""${lhs}.raw := ${lut}_${dispatch}.io.data_out(${idquote}).raw""")
case op@VarRegNew(init) =>
case VarRegRead(reg) =>
case VarRegWrite(reg,v,en) =>
case Print(x) =>
case Println(x) =>
case PrintIf(_,_) =>
case PrintlnIf(_,_) =>
case _ => super.emitNode(lhs, rhs)
}
override protected def emitFileFooter() {
withStream(getStream("BufferControlCxns")) {
nbufs.foreach{ case (mem, i) =>
val info = bufferControlInfo(mem, i)
info.zipWithIndex.foreach{ case (inf, port) =>
emit(src"""${mem}_${i}.connectStageCtrl(${DL(swap(quote(inf._1), Done), 1, true)}, ${swap(quote(inf._1), BaseEn)}, List(${port})) ${inf._2}""")
}
}
}
super.emitFileFooter()
}
}
|
stanford-ppl/spatial-lang
|
spatial/core/src/spatial/codegen/chiselgen/ChiselGenRegFile.scala
|
Scala
|
mit
| 11,082 |
package varys.examples.tracedriven
import java.io._
import java.net._
import varys.util.AkkaUtils
import varys.{Logging, Utils}
import varys.framework.client._
import varys.framework._
import scala.concurrent.duration._
import scala.concurrent.{Future, Await, ExecutionContext}
/**
* Created by wakira on 15-7-17.
*/
case class WorkerOnline()
case class PutComplete()
case class GetComplete()
object Worker extends Logging {
class TestListener extends ClientListener with Logging {
def connected(id: String) {
logInfo("Connected to master, got client ID " + id)
}
def disconnected() {
logInfo("Disconnected from master")
System.exit(0)
}
}
private val traceMasterUrlRegex = "([^:]+):([0-9]+)".r
// ExecutionContext for Futures
implicit val futureExecContext = ExecutionContext.fromExecutor(Utils.newDaemonCachedThreadPool())
var sock: Socket = null
var oos: ObjectOutputStream = null
var ois: ObjectInputStream = null
var jobMission : JobMission = null
private def createSocket(host: String, port: Int): Socket = {
var retriesLeft = TraceUtils.WORKER_NUM_RETRIES
while (retriesLeft > 0) {
try {
val sock = new Socket(host, port)
return sock
} catch {
case e => {
logWarning("Failed to connect to " + host + ":" + port + " due to " + e.toString)
}
}
Thread.sleep(TraceUtils.WORKER_RETRY_INTERVAL_MS)
retriesLeft -= 1
}
null
}
def main(args: Array[String]) {
if (args.length < 2) {
//println("USAGE: TraceWorker <varysMasterUrl> <traceMasterUrl> <networkInterface>")
println("USAGE: TraceWorker <varysMasterUrl> <traceMasterUrl>")
System.exit(1)
}
val url = args(0)
val tUrl = args(1)
//val nInterface = args(2)
var masterHost: String = null
var masterPort: Int = 0
tUrl match {
case traceMasterUrlRegex(h, p) =>
masterHost = h
masterPort = p.toInt
case _ =>
logError("Invalid traceMasterUrl: " + tUrl)
logInfo("traceMasterUrl should be given as host:port")
System.exit(1)
}
// Connect to trace master, retry silently if required
sock = createSocket(masterHost, masterPort)
if (sock == null) {
System.exit(1)
}
oos = new ObjectOutputStream(sock.getOutputStream)
oos.flush()
ois = new ObjectInputStream(sock.getInputStream)
// Mark start
oos.writeObject(WorkerOnline())
oos.flush()
// Receive JobMission
jobMission = ois.readObject.asInstanceOf[JobMission]
logInfo("Received JobMission")
val listener = new TestListener
val client = new VarysClient("TraceWorker", url, listener)
client.start()
//client.startDNBD(5678, nInterface)
//Thread.sleep(5000)
logInfo("Varys start Putting")
/*
val putFutureList = Future.traverse(jobMission.putList)(x => Future{
client.putFake(x.id, jobMission.coflowId, x.size, 1)
logInfo("Varys put id " + x.id + " with size " + x.size.toString)
})
Await.result(putFutureList, Duration.Inf)
*/
jobMission.putList.foreach(x => {
client.putFake(x.id, jobMission.coflowId, x.size, 1)
logInfo("Varys put id " + x.id + " with size " + x.size.toString)
})
logInfo("Varys Put Completed")
oos.writeObject(PutComplete())
oos.flush()
ois.readObject().asInstanceOf[StartGetting]
logInfo("Received StartGetting")
//Thread.sleep(1000) // FIXME for debug
if (jobMission.getList.nonEmpty) {
logInfo("Varys start Getting")
val getFutureList = Future.traverse(jobMission.getList)(x => Future {
client.getFake(x.id, jobMission.coflowId)
logInfo("asking Varys to get id " + x.id)
})
Await.result(getFutureList, Duration.Inf)
logInfo("Get Complete")
}
oos.writeObject(GetComplete())
oos.flush()
ois.readObject().asInstanceOf[StopWorker]
logInfo("Worker finished")
if (sock != null)
sock.close()
System.exit(0)
}
}
|
frankfzw/varys
|
examples/src/main/scala/varys/examples/tracedriven/Worker.scala
|
Scala
|
apache-2.0
| 4,043 |
package scala.tasty.internal
package dotc
package core
package tasty
trait TastyPrinters {
self: PicklerAPI =>
import Contexts._, Decorators._
import printing.Texts._
import TastyName._
import StdNames._
import TastyUnpickler._
import TastyBuffer.Addr
import util.Positions.{Position, offsetToInt}
import collection.mutable
class TastyPrinter(bytes: Array[Byte])(implicit ctx: Context) {
val unpickler = new TastyUnpickler(bytes)
import unpickler.{tastyName, unpickle}
def nameToString(name: TastyName): String = name match {
case Simple(name) => name.toString
case Qualified(qual, name) => nameRefToString(qual) + "." + nameRefToString(name)
case Signed(original, params, result) =>
i"${nameRefToString(original)}@${params.map(nameRefToString)}%,%:${nameRefToString(result)}"
case Expanded(prefix, original) => s"$prefix${nme.EXPAND_SEPARATOR}$original"
case ModuleClass(original) => nameRefToString(original) + "/MODULECLASS"
case SuperAccessor(accessed) => nameRefToString(accessed) + "/SUPERACCESSOR"
case DefaultGetter(meth, num) => nameRefToString(meth) + "/DEFAULTGETTER" + num
case Shadowed(original) => nameRefToString(original) + "/SHADOWED"
}
def nameRefToString(ref: NameRef): String = nameToString(tastyName(ref))
def printNames() =
for ((name, idx) <- tastyName.contents.zipWithIndex)
println(f"$idx%4d: " + nameToString(name))
def printContents(): Unit = {
println("Names:")
printNames()
println("Trees:")
unpickle(new TreeSectionUnpickler)
unpickle(new PositionSectionUnpickler)
}
class TreeSectionUnpickler extends SectionUnpickler[Unit]("ASTs") {
import TastyFormat._
def unpickle(reader: TastyReader, tastyName: TastyName.Table): Unit = {
import reader._
var indent = 0
def newLine() = print(f"\\n ${index(currentAddr) - index(startAddr)}%5d:" + " " * indent)
def printNat() = print(" " + readNat())
def printName() = {
val idx = readNat()
print(" ") ;print(idx); print("["); print(nameRefToString(NameRef(idx))); print("]")
}
def printTree(): Unit = {
newLine()
val tag = readByte()
print(" ");print(astTagToString(tag))
indent += 2
if (tag >= firstLengthTreeTag) {
val len = readNat()
print(s"($len)")
val end = currentAddr + len
def printTrees() = until(end)(printTree())
tag match {
case RENAMED =>
printName(); printName()
case VALDEF | DEFDEF | TYPEDEF | TYPEPARAM | PARAM | NAMEDARG | BIND =>
printName(); printTrees()
case REFINEDtype =>
printTree(); printName(); printTrees()
case RETURN =>
printNat(); printTrees()
case METHODtype | POLYtype =>
printTree()
until(end) { printName(); printTree() }
case PARAMtype =>
printNat(); printNat()
case _ =>
printTrees()
}
if (currentAddr != end) {
println(s"incomplete read, current = $currentAddr, end = $end")
goto(end)
}
}
else if (tag >= firstNatASTTreeTag) {
tag match {
case IDENT | SELECT | TERMREF | TYPEREF | SELFDEF => printName()
case _ => printNat()
}
printTree()
}
else if (tag >= firstASTTreeTag)
printTree()
else if (tag >= firstNatTreeTag)
tag match {
case TERMREFpkg | TYPEREFpkg | STRINGconst | IMPORTED => printName()
case _ => printNat()
}
indent -= 2
}
println(i"start = ${reader.startAddr}, base = $base, current = $currentAddr, end = $endAddr")
println(s"${endAddr.index - startAddr.index} bytes of AST, base = $currentAddr")
while (!isAtEnd) {
printTree()
newLine()
}
}
}
class PositionSectionUnpickler extends SectionUnpickler[Unit]("Positions") {
def unpickle(reader: TastyReader, tastyName: TastyName.Table): Unit = {
print(s"${reader.endAddr.index - reader.currentAddr.index}")
val (totalRange, positions) = new PositionUnpickler(reader).unpickle()
println(s" position bytes in $totalRange:")
val sorted = positions.toSeq.sortBy(_._1.index)
for ((addr, pos) <- sorted) println(s"${addr.index}: ${offsetToInt(pos.start)} .. ${pos.end}")
}
}
}
}
|
VladimirNik/tasty
|
plugin/src/main/scala/scala/tasty/internal/dotc/core/tasty/TastyPrinter.scala
|
Scala
|
bsd-3-clause
| 4,494 |
/*
* StreamIn.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape
package stream
import java.util
import akka.stream.Outlet
import scala.annotation.switch
import scala.collection.immutable.{IndexedSeq => Vec}
import scala.language.implicitConversions
object StreamIn {
def singleD(peer: OutD): StreamIn = new SingleD(peer)
def singleI(peer: OutI): StreamIn = new SingleI(peer)
def singleL(peer: OutL): StreamIn = new SingleL(peer)
def multiD(peer: OutD, numSinks: Int): StreamIn = new MultiD(peer, numSinks)
def multiI(peer: OutI, numSinks: Int): StreamIn = new MultiI(peer, numSinks)
def multiL(peer: OutL, numSinks: Int): StreamIn = new MultiL(peer, numSinks)
object unused extends StreamIn {
private def unsupported(method: String): Nothing =
throw new UnsupportedOperationException(s"StreamIn.unused.$method")
def toAny (implicit b: Builder): Outlet[BufLike] = unsupported("toAny")
def toDouble(implicit b: Builder): OutD = unsupported("toDouble")
def toInt (implicit b: Builder): OutI = unsupported("toInt")
def toLong (implicit b: Builder): OutL = unsupported("toLong")
def toElem (implicit b: Builder): Outlet[Buf] = unsupported("toElem")
def isInt : Boolean = false
def isLong : Boolean = false
def isDouble: Boolean = true // arbitrary
// type Elem = Nothing
def tpe: StreamType[A, Buf] = unsupported("tpe")
}
object DoubleType extends StreamType[Double, BufD] {
val ordering: Ordering[Double] = implicitly[Ordering[Double]] // Ordering.Double
final val zero = 0.0
final val minValue = Double.NegativeInfinity
final val maxValue = Double.PositiveInfinity
final def mkStreamOut(out: OutD): StreamOut = out
override def allocBuf()(implicit allocator: Allocator): BufD = allocator.borrowBufD()
def newArray(size: Int): Array[Double] = new Array(size)
def fill(a: Array[Double], off: Int, len: Int, elem: Double): Unit =
util.Arrays.fill(a, off, off + len, elem)
def clear(a: Array[Double], off: Int, len: Int): Unit =
util.Arrays.fill(a, off, off + len, 0.0)
def reverse(a: Array[Double], off: Int, len: Int): Unit = {
var i = off
var j = i + len - 1
while (i < j) {
val tmp = a(i)
a(i) = a(j)
a(j) = tmp
i += 1
j -= 1
}
}
def isInt : Boolean = false
def isLong : Boolean = false
def isDouble: Boolean = true
}
trait DoubleLike extends StreamIn {
final def isInt : Boolean = false
final def isLong : Boolean = false
final def isDouble: Boolean = true
final def toAny (implicit b: Builder): Outlet[BufLike] = toDouble.as[BufLike] // retarded Akka API. Why is Outlet not covariant?
final def toElem(implicit b: Builder): OutD = toDouble
final type A = Double
final type Buf = BufD
final def tpe: StreamType[A, Buf] = DoubleType
}
private final class SingleD(peer: OutD) extends DoubleLike {
private[this] var exhausted = false
def toDouble(implicit b: Builder): OutD = {
require(!exhausted)
exhausted = true
peer
}
def toInt(implicit builder: Builder): OutI = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "D.toInt") { bufD =>
val bufI = allocator.borrowBufI()
val sz = bufD.size
bufI.size = sz
val a = bufD.buf
val b = bufI.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = math.max(Int.MinValue, math.min(Int.MaxValue, math.round(x))).toInt
i += 1
}
bufD.release()
bufI
}
}
def toLong(implicit builder: Builder): OutL = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "D.toLong") { bufD =>
val bufL = allocator.borrowBufL()
val sz = bufD.size
bufL.size = sz
val a = bufD.buf
val b = bufL.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = math.round(x)
i += 1
}
bufD.release()
bufL
}
}
}
object IntType extends StreamType[Int, BufI] {
val ordering: Ordering[Int] = Ordering.Int
final val zero = 0
final val minValue = Int.MinValue
final val maxValue = Int.MaxValue
final def mkStreamOut(out: OutI): StreamOut = out
override def allocBuf()(implicit allocator: Allocator): BufI = allocator.borrowBufI()
def newArray(size: Int): Array[Int] = new Array(size)
def fill(a: Array[Int], off: Int, len: Int, elem: Int): Unit =
util.Arrays.fill(a, off, off + len, elem)
def clear(a: Array[Int], off: Int, len: Int): Unit =
util.Arrays.fill(a, off, off + len, 0)
def reverse(a: Array[Int], off: Int, len: Int): Unit = {
var i = off
var j = i + len - 1
while (i < j) {
val tmp = a(i)
a(i) = a(j)
a(j) = tmp
i += 1
j -= 1
}
}
def isInt : Boolean = true
def isLong : Boolean = false
def isDouble: Boolean = false
}
trait IntLike extends StreamIn {
final def isInt : Boolean = true
final def isLong : Boolean = false
final def isDouble: Boolean = false
final def toAny (implicit b: Builder): Outlet[BufLike] = toInt.as[BufLike]
final def toElem(implicit b: Builder): OutI = toInt
final type A = Int
final type Buf = BufI
final def tpe: StreamType[Int, BufI] = IntType
}
private final class SingleI(peer: OutI) extends IntLike {
private[this] var exhausted = false
def toInt(implicit b: Builder): OutI = {
require(!exhausted)
exhausted = true
peer
}
def toLong(implicit builder: Builder): OutL = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "I.toLong") { bufI =>
val bufL = allocator.borrowBufL()
val sz = bufI.size
bufL.size = sz
val a = bufI.buf
val b = bufL.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = x.toLong
i += 1
}
bufI.release()
bufL
}
}
def toDouble(implicit builder: Builder): OutD = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "I.toDouble") { bufI =>
val bufD = allocator.borrowBufD()
val sz = bufI.size
bufD.size = sz
val a = bufI.buf
val b = bufD.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = x.toDouble
i += 1
}
bufI.release()
bufD
}
}
}
object LongType extends StreamType[Long, BufL] {
val ordering: Ordering[Long] = Ordering.Long
final val zero = 0L
final val minValue = Long.MinValue
final val maxValue = Long.MaxValue
final def mkStreamOut(out: OutL): StreamOut = out
override def allocBuf()(implicit allocator: Allocator): BufL = allocator.borrowBufL()
def newArray(size: Int): Array[Long] = new Array(size)
def fill(a: Array[Long], off: Int, len: Int, elem: Long): Unit =
util.Arrays.fill(a, off, off + len, elem)
def clear(a: Array[Long], off: Int, len: Int): Unit =
util.Arrays.fill(a, off, off + len, 0L)
def reverse(a: Array[Long], off: Int, len: Int): Unit = {
var i = off
var j = i + len - 1
while (i < j) {
val tmp = a(i)
a(i) = a(j)
a(j) = tmp
i += 1
j -= 1
}
}
def isInt : Boolean = false
def isLong : Boolean = true
def isDouble: Boolean = false
}
trait LongLike extends StreamIn {
final def isInt : Boolean = false
final def isLong : Boolean = true
final def isDouble: Boolean = false
final def toAny (implicit b: Builder): Outlet[BufLike] = toLong.as[BufLike]
final def toElem(implicit b: Builder): OutL = toLong
final type A = Long
final type Buf = BufL
final def tpe: StreamType[Long, BufL] = LongType
}
private final class SingleL(peer: OutL) extends LongLike {
private[this] var exhausted = false
def toLong(implicit b: Builder): OutL = {
require(!exhausted)
exhausted = true
peer
}
def toInt(implicit builder: Builder): OutI = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "L.toInt") { bufL =>
val bufI = allocator.borrowBufI()
val sz = bufL.size
bufI.size = sz
val a = bufL.buf
val b = bufI.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = math.max(Int.MinValue, math.min(Int.MaxValue, x)).toInt
i += 1
}
bufL.release()
bufI
}
}
def toDouble(implicit builder: Builder): OutD = {
require(!exhausted)
exhausted = true
val allocator = builder.allocator
builder.map(peer, "L.toDouble") { bufL =>
val bufD = allocator.borrowBufD()
val sz = bufL.size
bufD.size = sz
val a = bufL.buf
val b = bufD.buf
var i = 0
while (i < sz) {
val x = a(i)
b(i) = x.toDouble
i += 1
}
bufL.release()
bufD
}
}
}
private final class MultiD(peer: OutD, numSinks: Int) extends DoubleLike {
private[this] var remain = numSinks
private[this] var broad: Vec[OutD] = _ // create lazily because we need stream.Builder
private def alloc()(implicit b: Builder): OutD = {
require(remain > 0)
if (broad == null) broad = Broadcast(peer, numSinks)
remain -= 1
val head +: tail = broad
broad = tail
head
}
def toDouble(implicit b: Builder): OutD = alloc()
def toInt (implicit b: Builder): OutI = singleD(alloc()).toInt // just reuse this functionality
def toLong (implicit b: Builder): OutL = singleD(alloc()).toLong // just reuse this functionality
}
private final class MultiI(peer: OutI, numSinks: Int) extends IntLike {
private[this] var remain = numSinks
private[this] var broad: Vec[OutI] = _ // create lazily because we need stream.Builder
private def alloc()(implicit b: Builder): OutI = {
require(remain > 0)
if (broad == null) broad = Broadcast(peer, numSinks)
remain -= 1
val head +: tail = broad
broad = tail
head
}
def toDouble(implicit b: Builder): OutD = singleI(alloc()).toDouble // just reuse this functionality
def toInt (implicit b: Builder): OutI = alloc()
def toLong (implicit b: Builder): OutL = singleI(alloc()).toLong // just reuse this functionality
}
private final class MultiL(peer: OutL, numSinks: Int) extends LongLike {
private[this] var remain = numSinks
private[this] var broad: Vec[OutL] = _ // create lazily because we need stream.Builder
private def alloc()(implicit b: Builder): OutL = {
require(remain > 0)
if (broad == null) broad = Broadcast(peer, numSinks)
remain -= 1
val head +: tail = broad
broad = tail
head
}
def toDouble(implicit b: Builder): OutD = singleL(alloc()).toDouble // just reuse this functionality
def toInt (implicit b: Builder): OutI = singleL(alloc()).toInt // just reuse this functionality
def toLong (implicit b: Builder): OutL = alloc()
}
}
trait StreamIn {
type A
type Buf >: Null <: BufElem[A]
def isInt : Boolean
def isLong : Boolean
def isDouble: Boolean
def toInt (implicit b: Builder): OutI
def toLong (implicit b: Builder): OutL
def toDouble(implicit b: Builder): OutD
def toAny (implicit b: Builder): Outlet[BufLike]
def toElem (implicit b: Builder): Outlet[Buf]
implicit def tpe: StreamType[A, Buf]
}
object StreamType {
implicit def int : StreamType[Int , BufI] = StreamIn.IntType
implicit def double: StreamType[Double, BufD] = StreamIn.DoubleType
implicit def long : StreamType[Long , BufL] = StreamIn.LongType
}
trait StreamType[@specialized(Args) A, Buf <: BufElem[A]] {
implicit val ordering: Ordering[A]
def mkStreamOut(out: Outlet[Buf]): StreamOut
def allocBuf()(implicit allocator: Allocator): Buf
def fill(a: Array[A], off: Int, len: Int, elem: A): Unit
def clear(a: Array[A], off: Int, len: Int): Unit
def reverse(a: Array[A], off: Int, len: Int): Unit
def zero: A
def minValue: A
def maxValue: A
// def reverse(a: Array[A], off: Int, len: Int): Unit
def newArray(size: Int): Array[A]
def isInt : Boolean
def isLong : Boolean
def isDouble: Boolean
}
object StreamOut {
implicit def fromDouble (peer: OutD ): StreamOut = new StreamOutD(peer)
implicit def fromDoubleVec(peer: Vec[OutD]): Vec[StreamOut] = peer.map(new StreamOutD(_))
implicit def fromInt (peer: OutI ): StreamOut = new StreamOutI(peer)
implicit def fromIntVec (peer: Vec[OutI]): Vec[StreamOut] = peer.map(new StreamOutI(_))
implicit def fromLong (peer: OutL ): StreamOut = new StreamOutL(peer)
implicit def fromLongVec (peer: Vec[OutL]): Vec[StreamOut] = peer.map(new StreamOutL(_))
private final class StreamOutD(peer: OutD) extends StreamOut {
override def toString = s"StreamOut($peer)"
def toIn(numChildren: Int)(implicit b: stream.Builder): StreamIn = (numChildren: @switch) match {
case 0 =>
SinkIgnore(peer)
StreamIn.unused
case 1 => StreamIn.singleD(peer)
case _ => StreamIn.multiD (peer, numChildren)
}
}
private final class StreamOutI(peer: OutI) extends StreamOut {
override def toString = s"StreamOut($peer)"
def toIn(numChildren: Int)(implicit b: stream.Builder): StreamIn = (numChildren: @switch) match {
case 0 =>
SinkIgnore(peer)
StreamIn.unused
case 1 => StreamIn.singleI(peer)
case _ => StreamIn.multiI (peer, numChildren)
}
}
private final class StreamOutL(peer: OutL) extends StreamOut {
override def toString = s"StreamOut($peer)"
def toIn(numChildren: Int)(implicit b: stream.Builder): StreamIn = (numChildren: @switch) match {
case 0 =>
SinkIgnore(peer)
StreamIn.unused
case 1 => StreamIn.singleL(peer)
case _ => StreamIn.multiL (peer, numChildren)
}
}
}
trait StreamOut {
def toIn(numChildren: Int)(implicit b: stream.Builder): StreamIn
}
|
Sciss/FScape-next
|
core/shared/src/main/scala/de/sciss/fscape/stream/StreamIn.scala
|
Scala
|
agpl-3.0
| 14,899 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.runtime.universe.{typeTag, TypeTag}
import scala.util.Try
import scala.util.control.NonFatal
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.sql.api.java._
import org.apache.spark.sql.catalyst.ScalaReflection
import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedFunction}
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical.{HintInfo, ResolvedHint}
import org.apache.spark.sql.execution.SparkSqlParser
import org.apache.spark.sql.expressions.UserDefinedFunction
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Functions available for DataFrame operations.
*
* @groupname udf_funcs UDF functions
* @groupname agg_funcs Aggregate functions
* @groupname datetime_funcs Date time functions
* @groupname sort_funcs Sorting functions
* @groupname normal_funcs Non-aggregate functions
* @groupname math_funcs Math functions
* @groupname misc_funcs Misc functions
* @groupname window_funcs Window functions
* @groupname string_funcs String functions
* @groupname collection_funcs Collection functions
* @groupname Ungrouped Support functions for DataFrames
* @since 1.3.0
*/
@InterfaceStability.Stable
// scalastyle:off
object functions {
// scalastyle:on
private def withExpr(expr: Expression): Column = Column(expr)
private def withAggregateFunction(
func: AggregateFunction,
isDistinct: Boolean = false): Column = {
Column(func.toAggregateExpression(isDistinct))
}
/**
* Returns a [[Column]] based on the given column name.
*
* @group normal_funcs
* @since 1.3.0
*/
def col(colName: String): Column = Column(colName)
/**
* Returns a [[Column]] based on the given column name. Alias of [[col]].
*
* @group normal_funcs
* @since 1.3.0
*/
def column(colName: String): Column = Column(colName)
/**
* Creates a [[Column]] of literal value.
*
* The passed in object is returned directly if it is already a [[Column]].
* If the object is a Scala Symbol, it is converted into a [[Column]] also.
* Otherwise, a new [[Column]] is created to represent the literal value.
*
* @group normal_funcs
* @since 1.3.0
*/
def lit(literal: Any): Column = typedLit(literal)
/**
* Creates a [[Column]] of literal value.
*
* The passed in object is returned directly if it is already a [[Column]].
* If the object is a Scala Symbol, it is converted into a [[Column]] also.
* Otherwise, a new [[Column]] is created to represent the literal value.
* The difference between this function and [[lit]] is that this function
* can handle parameterized scala types e.g.: List, Seq and Map.
*
* @group normal_funcs
* @since 2.2.0
*/
def typedLit[T : TypeTag](literal: T): Column = literal match {
case c: Column => c
case s: Symbol => new ColumnName(s.name)
case _ => Column(Literal.create(literal))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Sort functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns a sort expression based on ascending order of the column.
* {{{
* df.sort(asc("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 1.3.0
*/
def asc(columnName: String): Column = Column(columnName).asc
/**
* Returns a sort expression based on ascending order of the column,
* and null values return before non-null values.
* {{{
* df.sort(asc_nulls_first("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def asc_nulls_first(columnName: String): Column = Column(columnName).asc_nulls_first
/**
* Returns a sort expression based on ascending order of the column,
* and null values appear after non-null values.
* {{{
* df.sort(asc_nulls_last("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def asc_nulls_last(columnName: String): Column = Column(columnName).asc_nulls_last
/**
* Returns a sort expression based on the descending order of the column.
* {{{
* df.sort(asc("dept"), desc("age"))
* }}}
*
* @group sort_funcs
* @since 1.3.0
*/
def desc(columnName: String): Column = Column(columnName).desc
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear before non-null values.
* {{{
* df.sort(asc("dept"), desc_nulls_first("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def desc_nulls_first(columnName: String): Column = Column(columnName).desc_nulls_first
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear after non-null values.
* {{{
* df.sort(asc("dept"), desc_nulls_last("age"))
* }}}
*
* @group sort_funcs
* @since 2.1.0
*/
def desc_nulls_last(columnName: String): Column = Column(columnName).desc_nulls_last
//////////////////////////////////////////////////////////////////////////////////////////////
// Aggregate functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(e: Column): Column = approx_count_distinct(e)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(columnName: String): Column = approx_count_distinct(columnName)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(e: Column, rsd: Double): Column = approx_count_distinct(e, rsd)
/**
* @group agg_funcs
* @since 1.3.0
*/
@deprecated("Use approx_count_distinct", "2.1.0")
def approxCountDistinct(columnName: String, rsd: Double): Column = {
approx_count_distinct(Column(columnName), rsd)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(e: Column): Column = withAggregateFunction {
HyperLogLogPlusPlus(e.expr)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(columnName: String): Column = approx_count_distinct(column(columnName))
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @param rsd maximum estimation error allowed (default = 0.05)
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(e: Column, rsd: Double): Column = withAggregateFunction {
HyperLogLogPlusPlus(e.expr, rsd, 0, 0)
}
/**
* Aggregate function: returns the approximate number of distinct items in a group.
*
* @param rsd maximum estimation error allowed (default = 0.05)
*
* @group agg_funcs
* @since 2.1.0
*/
def approx_count_distinct(columnName: String, rsd: Double): Column = {
approx_count_distinct(Column(columnName), rsd)
}
/**
* Aggregate function: returns the average of the values in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def avg(e: Column): Column = withAggregateFunction { Average(e.expr) }
/**
* Aggregate function: returns the average of the values in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def avg(columnName: String): Column = avg(Column(columnName))
/**
* Aggregate function: returns a list of objects with duplicates.
*
* @note The function is non-deterministic because the order of collected results depends
* on order of rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_list(e: Column): Column = withAggregateFunction { CollectList(e.expr) }
/**
* Aggregate function: returns a list of objects with duplicates.
*
* @note The function is non-deterministic because the order of collected results depends
* on order of rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_list(columnName: String): Column = collect_list(Column(columnName))
/**
* Aggregate function: returns a set of objects with duplicate elements eliminated.
*
* @note The function is non-deterministic because the order of collected results depends
* on order of rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_set(e: Column): Column = withAggregateFunction { CollectSet(e.expr) }
/**
* Aggregate function: returns a set of objects with duplicate elements eliminated.
*
* @note The function is non-deterministic because the order of collected results depends
* on order of rows which may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.6.0
*/
def collect_set(columnName: String): Column = collect_set(Column(columnName))
/**
* Aggregate function: returns the Pearson Correlation Coefficient for two columns.
*
* @group agg_funcs
* @since 1.6.0
*/
def corr(column1: Column, column2: Column): Column = withAggregateFunction {
Corr(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the Pearson Correlation Coefficient for two columns.
*
* @group agg_funcs
* @since 1.6.0
*/
def corr(columnName1: String, columnName2: String): Column = {
corr(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the number of items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def count(e: Column): Column = withAggregateFunction {
e.expr match {
// Turn count(*) into count(1)
case s: Star => Count(Literal(1))
case _ => Count(e.expr)
}
}
/**
* Aggregate function: returns the number of items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def count(columnName: String): TypedColumn[Any, Long] =
count(Column(columnName)).as(ExpressionEncoder[Long]())
/**
* Aggregate function: returns the number of distinct items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def countDistinct(expr: Column, exprs: Column*): Column = {
withAggregateFunction(Count.apply((expr +: exprs).map(_.expr)), isDistinct = true)
}
/**
* Aggregate function: returns the number of distinct items in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def countDistinct(columnName: String, columnNames: String*): Column =
countDistinct(Column(columnName), columnNames.map(Column.apply) : _*)
/**
* Aggregate function: returns the population covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_pop(column1: Column, column2: Column): Column = withAggregateFunction {
CovPopulation(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the population covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_pop(columnName1: String, columnName2: String): Column = {
covar_pop(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the sample covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_samp(column1: Column, column2: Column): Column = withAggregateFunction {
CovSample(column1.expr, column2.expr)
}
/**
* Aggregate function: returns the sample covariance for two columns.
*
* @group agg_funcs
* @since 2.0.0
*/
def covar_samp(columnName1: String, columnName2: String): Column = {
covar_samp(Column(columnName1), Column(columnName2))
}
/**
* Aggregate function: returns the first value in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def first(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction {
new First(e.expr, Literal(ignoreNulls))
}
/**
* Aggregate function: returns the first value of a column in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def first(columnName: String, ignoreNulls: Boolean): Column = {
first(Column(columnName), ignoreNulls)
}
/**
* Aggregate function: returns the first value in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def first(e: Column): Column = first(e, ignoreNulls = false)
/**
* Aggregate function: returns the first value of a column in a group.
*
* The function by default returns the first values it sees. It will return the first non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def first(columnName: String): Column = first(Column(columnName))
/**
* Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
* or not, returns 1 for aggregated or 0 for not aggregated in the result set.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping(e: Column): Column = Column(Grouping(e.expr))
/**
* Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
* or not, returns 1 for aggregated or 0 for not aggregated in the result set.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping(columnName: String): Column = grouping(Column(columnName))
/**
* Aggregate function: returns the level of grouping, equals to
*
* {{{
* (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn)
* }}}
*
* @note The list of columns should match with grouping columns exactly, or empty (means all the
* grouping columns).
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping_id(cols: Column*): Column = Column(GroupingID(cols.map(_.expr)))
/**
* Aggregate function: returns the level of grouping, equals to
*
* {{{
* (grouping(c1) <<; (n-1)) + (grouping(c2) <<; (n-2)) + ... + grouping(cn)
* }}}
*
* @note The list of columns should match with grouping columns exactly.
*
* @group agg_funcs
* @since 2.0.0
*/
def grouping_id(colName: String, colNames: String*): Column = {
grouping_id((Seq(colName) ++ colNames).map(n => Column(n)) : _*)
}
/**
* Aggregate function: returns the kurtosis of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def kurtosis(e: Column): Column = withAggregateFunction { Kurtosis(e.expr) }
/**
* Aggregate function: returns the kurtosis of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def kurtosis(columnName: String): Column = kurtosis(Column(columnName))
/**
* Aggregate function: returns the last value in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def last(e: Column, ignoreNulls: Boolean): Column = withAggregateFunction {
new Last(e.expr, Literal(ignoreNulls))
}
/**
* Aggregate function: returns the last value of the column in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 2.0.0
*/
def last(columnName: String, ignoreNulls: Boolean): Column = {
last(Column(columnName), ignoreNulls)
}
/**
* Aggregate function: returns the last value in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def last(e: Column): Column = last(e, ignoreNulls = false)
/**
* Aggregate function: returns the last value of the column in a group.
*
* The function by default returns the last values it sees. It will return the last non-null
* value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
*
* @note The function is non-deterministic because its results depends on order of rows which
* may be non-deterministic after a shuffle.
*
* @group agg_funcs
* @since 1.3.0
*/
def last(columnName: String): Column = last(Column(columnName), ignoreNulls = false)
/**
* Aggregate function: returns the maximum value of the expression in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def max(e: Column): Column = withAggregateFunction { Max(e.expr) }
/**
* Aggregate function: returns the maximum value of the column in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def max(columnName: String): Column = max(Column(columnName))
/**
* Aggregate function: returns the average of the values in a group.
* Alias for avg.
*
* @group agg_funcs
* @since 1.4.0
*/
def mean(e: Column): Column = avg(e)
/**
* Aggregate function: returns the average of the values in a group.
* Alias for avg.
*
* @group agg_funcs
* @since 1.4.0
*/
def mean(columnName: String): Column = avg(columnName)
/**
* Aggregate function: returns the minimum value of the expression in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def min(e: Column): Column = withAggregateFunction { Min(e.expr) }
/**
* Aggregate function: returns the minimum value of the column in a group.
*
* @group agg_funcs
* @since 1.3.0
*/
def min(columnName: String): Column = min(Column(columnName))
/**
* Aggregate function: returns the skewness of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def skewness(e: Column): Column = withAggregateFunction { Skewness(e.expr) }
/**
* Aggregate function: returns the skewness of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def skewness(columnName: String): Column = skewness(Column(columnName))
/**
* Aggregate function: alias for `stddev_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) }
/**
* Aggregate function: alias for `stddev_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev(columnName: String): Column = stddev(Column(columnName))
/**
* Aggregate function: returns the sample standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_samp(e: Column): Column = withAggregateFunction { StddevSamp(e.expr) }
/**
* Aggregate function: returns the sample standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_samp(columnName: String): Column = stddev_samp(Column(columnName))
/**
* Aggregate function: returns the population standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_pop(e: Column): Column = withAggregateFunction { StddevPop(e.expr) }
/**
* Aggregate function: returns the population standard deviation of
* the expression in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def stddev_pop(columnName: String): Column = stddev_pop(Column(columnName))
/**
* Aggregate function: returns the sum of all values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sum(e: Column): Column = withAggregateFunction { Sum(e.expr) }
/**
* Aggregate function: returns the sum of all values in the given column.
*
* @group agg_funcs
* @since 1.3.0
*/
def sum(columnName: String): Column = sum(Column(columnName))
/**
* Aggregate function: returns the sum of distinct values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sumDistinct(e: Column): Column = withAggregateFunction(Sum(e.expr), isDistinct = true)
/**
* Aggregate function: returns the sum of distinct values in the expression.
*
* @group agg_funcs
* @since 1.3.0
*/
def sumDistinct(columnName: String): Column = sumDistinct(Column(columnName))
/**
* Aggregate function: alias for `var_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def variance(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) }
/**
* Aggregate function: alias for `var_samp`.
*
* @group agg_funcs
* @since 1.6.0
*/
def variance(columnName: String): Column = variance(Column(columnName))
/**
* Aggregate function: returns the unbiased variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_samp(e: Column): Column = withAggregateFunction { VarianceSamp(e.expr) }
/**
* Aggregate function: returns the unbiased variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_samp(columnName: String): Column = var_samp(Column(columnName))
/**
* Aggregate function: returns the population variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_pop(e: Column): Column = withAggregateFunction { VariancePop(e.expr) }
/**
* Aggregate function: returns the population variance of the values in a group.
*
* @group agg_funcs
* @since 1.6.0
*/
def var_pop(columnName: String): Column = var_pop(Column(columnName))
//////////////////////////////////////////////////////////////////////////////////////////////
// Window functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Window function: returns the special frame boundary that represents the first row in the
* window partition.
*
* @group window_funcs
* @since 2.3.0
*/
def unboundedPreceding(): Column = Column(UnboundedPreceding)
/**
* Window function: returns the special frame boundary that represents the last row in the
* window partition.
*
* @group window_funcs
* @since 2.3.0
*/
def unboundedFollowing(): Column = Column(UnboundedFollowing)
/**
* Window function: returns the special frame boundary that represents the current row in the
* window partition.
*
* @group window_funcs
* @since 2.3.0
*/
def currentRow(): Column = Column(CurrentRow)
/**
* Window function: returns the cumulative distribution of values within a window partition,
* i.e. the fraction of rows that are below the current row.
*
* {{{
* N = total number of rows in the partition
* cumeDist(x) = number of values before (and including) x / N
* }}}
*
* @group window_funcs
* @since 1.6.0
*/
def cume_dist(): Column = withExpr { new CumeDist }
/**
* Window function: returns the rank of rows within a window partition, without any gaps.
*
* The difference between rank and dense_rank is that denseRank leaves no gaps in ranking
* sequence when there are ties. That is, if you were ranking a competition using dense_rank
* and had three people tie for second place, you would say that all three were in second
* place and that the next person came in third. Rank would give me sequential numbers, making
* the person that came in third place (after the ties) would register as coming in fifth.
*
* This is equivalent to the DENSE_RANK function in SQL.
*
* @group window_funcs
* @since 1.6.0
*/
def dense_rank(): Column = withExpr { new DenseRank }
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `null` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(e: Column, offset: Int): Column = lag(e, offset, null)
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `null` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(columnName: String, offset: Int): Column = lag(columnName, offset, null)
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `defaultValue` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(columnName: String, offset: Int, defaultValue: Any): Column = {
lag(Column(columnName), offset, defaultValue)
}
/**
* Window function: returns the value that is `offset` rows before the current row, and
* `defaultValue` if there is less than `offset` rows before the current row. For example,
* an `offset` of one will return the previous row at any given point in the window partition.
*
* This is equivalent to the LAG function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lag(e: Column, offset: Int, defaultValue: Any): Column = withExpr {
Lag(e.expr, Literal(offset), Literal(defaultValue))
}
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `null` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(columnName: String, offset: Int): Column = { lead(columnName, offset, null) }
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `null` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(e: Column, offset: Int): Column = { lead(e, offset, null) }
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `defaultValue` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(columnName: String, offset: Int, defaultValue: Any): Column = {
lead(Column(columnName), offset, defaultValue)
}
/**
* Window function: returns the value that is `offset` rows after the current row, and
* `defaultValue` if there is less than `offset` rows after the current row. For example,
* an `offset` of one will return the next row at any given point in the window partition.
*
* This is equivalent to the LEAD function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def lead(e: Column, offset: Int, defaultValue: Any): Column = withExpr {
Lead(e.expr, Literal(offset), Literal(defaultValue))
}
/**
* Window function: returns the ntile group id (from 1 to `n` inclusive) in an ordered window
* partition. For example, if `n` is 4, the first quarter of the rows will get value 1, the second
* quarter will get 2, the third quarter will get 3, and the last quarter will get 4.
*
* This is equivalent to the NTILE function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def ntile(n: Int): Column = withExpr { new NTile(Literal(n)) }
/**
* Window function: returns the relative rank (i.e. percentile) of rows within a window partition.
*
* This is computed by:
* {{{
* (rank of row in its partition - 1) / (number of rows in the partition - 1)
* }}}
*
* This is equivalent to the PERCENT_RANK function in SQL.
*
* @group window_funcs
* @since 1.6.0
*/
def percent_rank(): Column = withExpr { new PercentRank }
/**
* Window function: returns the rank of rows within a window partition.
*
* The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
* sequence when there are ties. That is, if you were ranking a competition using dense_rank
* and had three people tie for second place, you would say that all three were in second
* place and that the next person came in third. Rank would give me sequential numbers, making
* the person that came in third place (after the ties) would register as coming in fifth.
*
* This is equivalent to the RANK function in SQL.
*
* @group window_funcs
* @since 1.4.0
*/
def rank(): Column = withExpr { new Rank }
/**
* Window function: returns a sequential number starting at 1 within a window partition.
*
* @group window_funcs
* @since 1.6.0
*/
def row_number(): Column = withExpr { RowNumber() }
//////////////////////////////////////////////////////////////////////////////////////////////
// Non-aggregate functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Creates a new array column. The input columns must all have the same data type.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def array(cols: Column*): Column = withExpr { CreateArray(cols.map(_.expr)) }
/**
* Creates a new array column. The input columns must all have the same data type.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def array(colName: String, colNames: String*): Column = {
array((colName +: colNames).map(col) : _*)
}
/**
* Creates a new map column. The input columns must be grouped as key-value pairs, e.g.
* (key1, value1, key2, value2, ...). The key columns must all have the same data type, and can't
* be null. The value columns must all have the same data type.
*
* @group normal_funcs
* @since 2.0
*/
@scala.annotation.varargs
def map(cols: Column*): Column = withExpr { CreateMap(cols.map(_.expr)) }
/**
* Creates a new map column. The array in the first column is used for keys. The array in the
* second column is used for values. All elements in the array for key should not be null.
*
* @group normal_funcs
* @since 2.4
*/
def map_from_arrays(keys: Column, values: Column): Column = withExpr {
MapFromArrays(keys.expr, values.expr)
}
/**
* Marks a DataFrame as small enough for use in broadcast joins.
*
* The following example marks the right DataFrame for broadcast hash join using `joinKey`.
* {{{
* // left and right are DataFrames
* left.join(broadcast(right), "joinKey")
* }}}
*
* @group normal_funcs
* @since 1.5.0
*/
def broadcast[T](df: Dataset[T]): Dataset[T] = {
Dataset[T](df.sparkSession,
ResolvedHint(df.logicalPlan, HintInfo(broadcast = true)))(df.exprEnc)
}
/**
* Returns the first column that is not null, or null if all inputs are null.
*
* For example, `coalesce(a, b, c)` will return a if a is not null,
* or b if a is null and b is not null, or c if both a and b are null but c is not null.
*
* @group normal_funcs
* @since 1.3.0
*/
@scala.annotation.varargs
def coalesce(e: Column*): Column = withExpr { Coalesce(e.map(_.expr)) }
/**
* Creates a string column for the file name of the current Spark task.
*
* @group normal_funcs
* @since 1.6.0
*/
def input_file_name(): Column = withExpr { InputFileName() }
/**
* Return true iff the column is NaN.
*
* @group normal_funcs
* @since 1.6.0
*/
def isnan(e: Column): Column = withExpr { IsNaN(e.expr) }
/**
* Return true iff the column is null.
*
* @group normal_funcs
* @since 1.6.0
*/
def isnull(e: Column): Column = withExpr { IsNull(e.expr) }
/**
* A column expression that generates monotonically increasing 64-bit integers.
*
* The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
* The current implementation puts the partition ID in the upper 31 bits, and the record number
* within each partition in the lower 33 bits. The assumption is that the data frame has
* less than 1 billion partitions, and each partition has less than 8 billion records.
*
* As an example, consider a `DataFrame` with two partitions, each with 3 records.
* This expression would return the following IDs:
*
* {{{
* 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
* }}}
*
* @group normal_funcs
* @since 1.4.0
*/
@deprecated("Use monotonically_increasing_id()", "2.0.0")
def monotonicallyIncreasingId(): Column = monotonically_increasing_id()
/**
* A column expression that generates monotonically increasing 64-bit integers.
*
* The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
* The current implementation puts the partition ID in the upper 31 bits, and the record number
* within each partition in the lower 33 bits. The assumption is that the data frame has
* less than 1 billion partitions, and each partition has less than 8 billion records.
*
* As an example, consider a `DataFrame` with two partitions, each with 3 records.
* This expression would return the following IDs:
*
* {{{
* 0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
* }}}
*
* @group normal_funcs
* @since 1.6.0
*/
def monotonically_increasing_id(): Column = withExpr { MonotonicallyIncreasingID() }
/**
* Returns col1 if it is not NaN, or col2 if col1 is NaN.
*
* Both inputs should be floating point columns (DoubleType or FloatType).
*
* @group normal_funcs
* @since 1.5.0
*/
def nanvl(col1: Column, col2: Column): Column = withExpr { NaNvl(col1.expr, col2.expr) }
/**
* Unary minus, i.e. negate the expression.
* {{{
* // Select the amount column and negates all values.
* // Scala:
* df.select( -df("amount") )
*
* // Java:
* df.select( negate(df.col("amount")) );
* }}}
*
* @group normal_funcs
* @since 1.3.0
*/
def negate(e: Column): Column = -e
/**
* Inversion of boolean expression, i.e. NOT.
* {{{
* // Scala: select rows that are not active (isActive === false)
* df.filter( !df("isActive") )
*
* // Java:
* df.filter( not(df.col("isActive")) );
* }}}
*
* @group normal_funcs
* @since 1.3.0
*/
def not(e: Column): Column = !e
/**
* Generate a random column with independent and identically distributed (i.i.d.) samples
* from U[0.0, 1.0].
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def rand(seed: Long): Column = withExpr { Rand(seed) }
/**
* Generate a random column with independent and identically distributed (i.i.d.) samples
* from U[0.0, 1.0].
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def rand(): Column = rand(Utils.random.nextLong)
/**
* Generate a column with independent and identically distributed (i.i.d.) samples from
* the standard normal distribution.
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def randn(seed: Long): Column = withExpr { Randn(seed) }
/**
* Generate a column with independent and identically distributed (i.i.d.) samples from
* the standard normal distribution.
*
* @note The function is non-deterministic in general case.
*
* @group normal_funcs
* @since 1.4.0
*/
def randn(): Column = randn(Utils.random.nextLong)
/**
* Partition ID.
*
* @note This is non-deterministic because it depends on data partitioning and task scheduling.
*
* @group normal_funcs
* @since 1.6.0
*/
def spark_partition_id(): Column = withExpr { SparkPartitionID() }
/**
* Computes the square root of the specified float value.
*
* @group math_funcs
* @since 1.3.0
*/
def sqrt(e: Column): Column = withExpr { Sqrt(e.expr) }
/**
* Computes the square root of the specified float value.
*
* @group math_funcs
* @since 1.5.0
*/
def sqrt(colName: String): Column = sqrt(Column(colName))
/**
* Creates a new struct column.
* If the input column is a column in a `DataFrame`, or a derived column expression
* that is named (i.e. aliased), its name would be retained as the StructField's name,
* otherwise, the newly generated StructField's name would be auto generated as
* `col` with a suffix `index + 1`, i.e. col1, col2, col3, ...
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def struct(cols: Column*): Column = withExpr { CreateStruct(cols.map(_.expr)) }
/**
* Creates a new struct column that composes multiple input columns.
*
* @group normal_funcs
* @since 1.4.0
*/
@scala.annotation.varargs
def struct(colName: String, colNames: String*): Column = {
struct((colName +: colNames).map(col) : _*)
}
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group normal_funcs
* @since 1.4.0
*/
def when(condition: Column, value: Any): Column = withExpr {
CaseWhen(Seq((condition.expr, lit(value).expr)))
}
/**
* Computes bitwise NOT (~) of a number.
*
* @group normal_funcs
* @since 1.4.0
*/
def bitwiseNOT(e: Column): Column = withExpr { BitwiseNot(e.expr) }
/**
* Parses the expression string into the column that it represents, similar to
* [[Dataset#selectExpr]].
* {{{
* // get the number of words of each length
* df.groupBy(expr("length(word)")).count()
* }}}
*
* @group normal_funcs
*/
def expr(expr: String): Column = {
val parser = SparkSession.getActiveSession.map(_.sessionState.sqlParser).getOrElse {
new SparkSqlParser(new SQLConf)
}
Column(parser.parseExpression(expr))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Math Functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Computes the absolute value of a numeric value.
*
* @group math_funcs
* @since 1.3.0
*/
def abs(e: Column): Column = withExpr { Abs(e.expr) }
/**
* @return inverse cosine of `e` in radians, as if computed by `java.lang.Math.acos`
*
* @group math_funcs
* @since 1.4.0
*/
def acos(e: Column): Column = withExpr { Acos(e.expr) }
/**
* @return inverse cosine of `columnName`, as if computed by `java.lang.Math.acos`
*
* @group math_funcs
* @since 1.4.0
*/
def acos(columnName: String): Column = acos(Column(columnName))
/**
* @return inverse sine of `e` in radians, as if computed by `java.lang.Math.asin`
*
* @group math_funcs
* @since 1.4.0
*/
def asin(e: Column): Column = withExpr { Asin(e.expr) }
/**
* @return inverse sine of `columnName`, as if computed by `java.lang.Math.asin`
*
* @group math_funcs
* @since 1.4.0
*/
def asin(columnName: String): Column = asin(Column(columnName))
/**
* @return inverse tangent of `e`, as if computed by `java.lang.Math.atan`
*
* @group math_funcs
* @since 1.4.0
*/
def atan(e: Column): Column = withExpr { Atan(e.expr) }
/**
* @return inverse tangent of `columnName`, as if computed by `java.lang.Math.atan`
*
* @group math_funcs
* @since 1.4.0
*/
def atan(columnName: String): Column = atan(Column(columnName))
/**
* @param y coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, x: Column): Column = withExpr { Atan2(y.expr, x.expr) }
/**
* @param y coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, xName: String): Column = atan2(y, Column(xName))
/**
* @param yName coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, x: Column): Column = atan2(Column(yName), x)
/**
* @param yName coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, xName: String): Column =
atan2(Column(yName), Column(xName))
/**
* @param y coordinate on y-axis
* @param xValue coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(y: Column, xValue: Double): Column = atan2(y, lit(xValue))
/**
* @param yName coordinate on y-axis
* @param xValue coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yName: String, xValue: Double): Column = atan2(Column(yName), xValue)
/**
* @param yValue coordinate on y-axis
* @param x coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yValue: Double, x: Column): Column = atan2(lit(yValue), x)
/**
* @param yValue coordinate on y-axis
* @param xName coordinate on x-axis
* @return the <i>theta</i> component of the point
* (<i>r</i>, <i>theta</i>)
* in polar coordinates that corresponds to the point
* (<i>x</i>, <i>y</i>) in Cartesian coordinates,
* as if computed by `java.lang.Math.atan2`
*
* @group math_funcs
* @since 1.4.0
*/
def atan2(yValue: Double, xName: String): Column = atan2(yValue, Column(xName))
/**
* An expression that returns the string representation of the binary value of the given long
* column. For example, bin("12") returns "1100".
*
* @group math_funcs
* @since 1.5.0
*/
def bin(e: Column): Column = withExpr { Bin(e.expr) }
/**
* An expression that returns the string representation of the binary value of the given long
* column. For example, bin("12") returns "1100".
*
* @group math_funcs
* @since 1.5.0
*/
def bin(columnName: String): Column = bin(Column(columnName))
/**
* Computes the cube-root of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def cbrt(e: Column): Column = withExpr { Cbrt(e.expr) }
/**
* Computes the cube-root of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def cbrt(columnName: String): Column = cbrt(Column(columnName))
/**
* Computes the ceiling of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def ceil(e: Column): Column = withExpr { Ceil(e.expr) }
/**
* Computes the ceiling of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def ceil(columnName: String): Column = ceil(Column(columnName))
/**
* Convert a number in a string column from one base to another.
*
* @group math_funcs
* @since 1.5.0
*/
def conv(num: Column, fromBase: Int, toBase: Int): Column = withExpr {
Conv(num.expr, lit(fromBase).expr, lit(toBase).expr)
}
/**
* @param e angle in radians
* @return cosine of the angle, as if computed by `java.lang.Math.cos`
*
* @group math_funcs
* @since 1.4.0
*/
def cos(e: Column): Column = withExpr { Cos(e.expr) }
/**
* @param columnName angle in radians
* @return cosine of the angle, as if computed by `java.lang.Math.cos`
*
* @group math_funcs
* @since 1.4.0
*/
def cos(columnName: String): Column = cos(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh`
*
* @group math_funcs
* @since 1.4.0
*/
def cosh(e: Column): Column = withExpr { Cosh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh`
*
* @group math_funcs
* @since 1.4.0
*/
def cosh(columnName: String): Column = cosh(Column(columnName))
/**
* Computes the exponential of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def exp(e: Column): Column = withExpr { Exp(e.expr) }
/**
* Computes the exponential of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def exp(columnName: String): Column = exp(Column(columnName))
/**
* Computes the exponential of the given value minus one.
*
* @group math_funcs
* @since 1.4.0
*/
def expm1(e: Column): Column = withExpr { Expm1(e.expr) }
/**
* Computes the exponential of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def expm1(columnName: String): Column = expm1(Column(columnName))
/**
* Computes the factorial of the given value.
*
* @group math_funcs
* @since 1.5.0
*/
def factorial(e: Column): Column = withExpr { Factorial(e.expr) }
/**
* Computes the floor of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def floor(e: Column): Column = withExpr { Floor(e.expr) }
/**
* Computes the floor of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def floor(columnName: String): Column = floor(Column(columnName))
/**
* Returns the greatest value of the list of values, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def greatest(exprs: Column*): Column = withExpr { Greatest(exprs.map(_.expr)) }
/**
* Returns the greatest value of the list of column names, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def greatest(columnName: String, columnNames: String*): Column = {
greatest((columnName +: columnNames).map(Column.apply): _*)
}
/**
* Computes hex value of the given column.
*
* @group math_funcs
* @since 1.5.0
*/
def hex(column: Column): Column = withExpr { Hex(column.expr) }
/**
* Inverse of hex. Interprets each pair of characters as a hexadecimal number
* and converts to the byte representation of number.
*
* @group math_funcs
* @since 1.5.0
*/
def unhex(column: Column): Column = withExpr { Unhex(column.expr) }
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, r: Column): Column = withExpr { Hypot(l.expr, r.expr) }
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, rightName: String): Column = hypot(l, Column(rightName))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, r: Column): Column = hypot(Column(leftName), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, rightName: String): Column =
hypot(Column(leftName), Column(rightName))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Column, r: Double): Column = hypot(l, lit(r))
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(leftName: String, r: Double): Column = hypot(Column(leftName), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Double, r: Column): Column = hypot(lit(l), r)
/**
* Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.
*
* @group math_funcs
* @since 1.4.0
*/
def hypot(l: Double, rightName: String): Column = hypot(l, Column(rightName))
/**
* Returns the least value of the list of values, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def least(exprs: Column*): Column = withExpr { Least(exprs.map(_.expr)) }
/**
* Returns the least value of the list of column names, skipping null values.
* This function takes at least 2 parameters. It will return null iff all parameters are null.
*
* @group normal_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def least(columnName: String, columnNames: String*): Column = {
least((columnName +: columnNames).map(Column.apply): _*)
}
/**
* Computes the natural logarithm of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def log(e: Column): Column = withExpr { Log(e.expr) }
/**
* Computes the natural logarithm of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def log(columnName: String): Column = log(Column(columnName))
/**
* Returns the first argument-base logarithm of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def log(base: Double, a: Column): Column = withExpr { Logarithm(lit(base).expr, a.expr) }
/**
* Returns the first argument-base logarithm of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def log(base: Double, columnName: String): Column = log(base, Column(columnName))
/**
* Computes the logarithm of the given value in base 10.
*
* @group math_funcs
* @since 1.4.0
*/
def log10(e: Column): Column = withExpr { Log10(e.expr) }
/**
* Computes the logarithm of the given value in base 10.
*
* @group math_funcs
* @since 1.4.0
*/
def log10(columnName: String): Column = log10(Column(columnName))
/**
* Computes the natural logarithm of the given value plus one.
*
* @group math_funcs
* @since 1.4.0
*/
def log1p(e: Column): Column = withExpr { Log1p(e.expr) }
/**
* Computes the natural logarithm of the given column plus one.
*
* @group math_funcs
* @since 1.4.0
*/
def log1p(columnName: String): Column = log1p(Column(columnName))
/**
* Computes the logarithm of the given column in base 2.
*
* @group math_funcs
* @since 1.5.0
*/
def log2(expr: Column): Column = withExpr { Log2(expr.expr) }
/**
* Computes the logarithm of the given value in base 2.
*
* @group math_funcs
* @since 1.5.0
*/
def log2(columnName: String): Column = log2(Column(columnName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, r: Column): Column = withExpr { Pow(l.expr, r.expr) }
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, rightName: String): Column = pow(l, Column(rightName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, r: Column): Column = pow(Column(leftName), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, rightName: String): Column = pow(Column(leftName), Column(rightName))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Column, r: Double): Column = pow(l, lit(r))
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(leftName: String, r: Double): Column = pow(Column(leftName), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Double, r: Column): Column = pow(lit(l), r)
/**
* Returns the value of the first argument raised to the power of the second argument.
*
* @group math_funcs
* @since 1.4.0
*/
def pow(l: Double, rightName: String): Column = pow(l, Column(rightName))
/**
* Returns the positive value of dividend mod divisor.
*
* @group math_funcs
* @since 1.5.0
*/
def pmod(dividend: Column, divisor: Column): Column = withExpr {
Pmod(dividend.expr, divisor.expr)
}
/**
* Returns the double value that is closest in value to the argument and
* is equal to a mathematical integer.
*
* @group math_funcs
* @since 1.4.0
*/
def rint(e: Column): Column = withExpr { Rint(e.expr) }
/**
* Returns the double value that is closest in value to the argument and
* is equal to a mathematical integer.
*
* @group math_funcs
* @since 1.4.0
*/
def rint(columnName: String): Column = rint(Column(columnName))
/**
* Returns the value of the column `e` rounded to 0 decimal places with HALF_UP round mode.
*
* @group math_funcs
* @since 1.5.0
*/
def round(e: Column): Column = round(e, 0)
/**
* Round the value of `e` to `scale` decimal places with HALF_UP round mode
* if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0.
*
* @group math_funcs
* @since 1.5.0
*/
def round(e: Column, scale: Int): Column = withExpr { Round(e.expr, Literal(scale)) }
/**
* Returns the value of the column `e` rounded to 0 decimal places with HALF_EVEN round mode.
*
* @group math_funcs
* @since 2.0.0
*/
def bround(e: Column): Column = bround(e, 0)
/**
* Round the value of `e` to `scale` decimal places with HALF_EVEN round mode
* if `scale` is greater than or equal to 0 or at integral part when `scale` is less than 0.
*
* @group math_funcs
* @since 2.0.0
*/
def bround(e: Column, scale: Int): Column = withExpr { BRound(e.expr, Literal(scale)) }
/**
* Shift the given value numBits left. If the given value is a long value, this function
* will return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftLeft(e: Column, numBits: Int): Column = withExpr { ShiftLeft(e.expr, lit(numBits).expr) }
/**
* (Signed) shift the given value numBits right. If the given value is a long value, it will
* return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftRight(e: Column, numBits: Int): Column = withExpr {
ShiftRight(e.expr, lit(numBits).expr)
}
/**
* Unsigned shift the given value numBits right. If the given value is a long value,
* it will return a long value else it will return an integer value.
*
* @group math_funcs
* @since 1.5.0
*/
def shiftRightUnsigned(e: Column, numBits: Int): Column = withExpr {
ShiftRightUnsigned(e.expr, lit(numBits).expr)
}
/**
* Computes the signum of the given value.
*
* @group math_funcs
* @since 1.4.0
*/
def signum(e: Column): Column = withExpr { Signum(e.expr) }
/**
* Computes the signum of the given column.
*
* @group math_funcs
* @since 1.4.0
*/
def signum(columnName: String): Column = signum(Column(columnName))
/**
* @param e angle in radians
* @return sine of the angle, as if computed by `java.lang.Math.sin`
*
* @group math_funcs
* @since 1.4.0
*/
def sin(e: Column): Column = withExpr { Sin(e.expr) }
/**
* @param columnName angle in radians
* @return sine of the angle, as if computed by `java.lang.Math.sin`
*
* @group math_funcs
* @since 1.4.0
*/
def sin(columnName: String): Column = sin(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh`
*
* @group math_funcs
* @since 1.4.0
*/
def sinh(e: Column): Column = withExpr { Sinh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic sine of the given value, as if computed by `java.lang.Math.sinh`
*
* @group math_funcs
* @since 1.4.0
*/
def sinh(columnName: String): Column = sinh(Column(columnName))
/**
* @param e angle in radians
* @return tangent of the given value, as if computed by `java.lang.Math.tan`
*
* @group math_funcs
* @since 1.4.0
*/
def tan(e: Column): Column = withExpr { Tan(e.expr) }
/**
* @param columnName angle in radians
* @return tangent of the given value, as if computed by `java.lang.Math.tan`
*
* @group math_funcs
* @since 1.4.0
*/
def tan(columnName: String): Column = tan(Column(columnName))
/**
* @param e hyperbolic angle
* @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh`
*
* @group math_funcs
* @since 1.4.0
*/
def tanh(e: Column): Column = withExpr { Tanh(e.expr) }
/**
* @param columnName hyperbolic angle
* @return hyperbolic tangent of the given value, as if computed by `java.lang.Math.tanh`
*
* @group math_funcs
* @since 1.4.0
*/
def tanh(columnName: String): Column = tanh(Column(columnName))
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use degrees", "2.1.0")
def toDegrees(e: Column): Column = degrees(e)
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use degrees", "2.1.0")
def toDegrees(columnName: String): Column = degrees(Column(columnName))
/**
* Converts an angle measured in radians to an approximately equivalent angle measured in degrees.
*
* @param e angle in radians
* @return angle in degrees, as if computed by `java.lang.Math.toDegrees`
*
* @group math_funcs
* @since 2.1.0
*/
def degrees(e: Column): Column = withExpr { ToDegrees(e.expr) }
/**
* Converts an angle measured in radians to an approximately equivalent angle measured in degrees.
*
* @param columnName angle in radians
* @return angle in degrees, as if computed by `java.lang.Math.toDegrees`
*
* @group math_funcs
* @since 2.1.0
*/
def degrees(columnName: String): Column = degrees(Column(columnName))
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use radians", "2.1.0")
def toRadians(e: Column): Column = radians(e)
/**
* @group math_funcs
* @since 1.4.0
*/
@deprecated("Use radians", "2.1.0")
def toRadians(columnName: String): Column = radians(Column(columnName))
/**
* Converts an angle measured in degrees to an approximately equivalent angle measured in radians.
*
* @param e angle in degrees
* @return angle in radians, as if computed by `java.lang.Math.toRadians`
*
* @group math_funcs
* @since 2.1.0
*/
def radians(e: Column): Column = withExpr { ToRadians(e.expr) }
/**
* Converts an angle measured in degrees to an approximately equivalent angle measured in radians.
*
* @param columnName angle in degrees
* @return angle in radians, as if computed by `java.lang.Math.toRadians`
*
* @group math_funcs
* @since 2.1.0
*/
def radians(columnName: String): Column = radians(Column(columnName))
//////////////////////////////////////////////////////////////////////////////////////////////
// Misc functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Calculates the MD5 digest of a binary column and returns the value
* as a 32 character hex string.
*
* @group misc_funcs
* @since 1.5.0
*/
def md5(e: Column): Column = withExpr { Md5(e.expr) }
/**
* Calculates the SHA-1 digest of a binary column and returns the value
* as a 40 character hex string.
*
* @group misc_funcs
* @since 1.5.0
*/
def sha1(e: Column): Column = withExpr { Sha1(e.expr) }
/**
* Calculates the SHA-2 family of hash functions of a binary column and
* returns the value as a hex string.
*
* @param e column to compute SHA-2 on.
* @param numBits one of 224, 256, 384, or 512.
*
* @group misc_funcs
* @since 1.5.0
*/
def sha2(e: Column, numBits: Int): Column = {
require(Seq(0, 224, 256, 384, 512).contains(numBits),
s"numBits $numBits is not in the permitted values (0, 224, 256, 384, 512)")
withExpr { Sha2(e.expr, lit(numBits).expr) }
}
/**
* Calculates the cyclic redundancy check value (CRC32) of a binary column and
* returns the value as a bigint.
*
* @group misc_funcs
* @since 1.5.0
*/
def crc32(e: Column): Column = withExpr { Crc32(e.expr) }
/**
* Calculates the hash code of given columns, and returns the result as an int column.
*
* @group misc_funcs
* @since 2.0.0
*/
@scala.annotation.varargs
def hash(cols: Column*): Column = withExpr {
new Murmur3Hash(cols.map(_.expr))
}
//////////////////////////////////////////////////////////////////////////////////////////////
// String functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Computes the numeric value of the first character of the string column, and returns the
* result as an int column.
*
* @group string_funcs
* @since 1.5.0
*/
def ascii(e: Column): Column = withExpr { Ascii(e.expr) }
/**
* Computes the BASE64 encoding of a binary column and returns it as a string column.
* This is the reverse of unbase64.
*
* @group string_funcs
* @since 1.5.0
*/
def base64(e: Column): Column = withExpr { Base64(e.expr) }
/**
* Concatenates multiple input string columns together into a single string column,
* using the given separator.
*
* @group string_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def concat_ws(sep: String, exprs: Column*): Column = withExpr {
ConcatWs(Literal.create(sep, StringType) +: exprs.map(_.expr))
}
/**
* Computes the first argument into a string from a binary using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*
* @group string_funcs
* @since 1.5.0
*/
def decode(value: Column, charset: String): Column = withExpr {
Decode(value.expr, lit(charset).expr)
}
/**
* Computes the first argument into a binary from a string using the provided character set
* (one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
* If either argument is null, the result will also be null.
*
* @group string_funcs
* @since 1.5.0
*/
def encode(value: Column, charset: String): Column = withExpr {
Encode(value.expr, lit(charset).expr)
}
/**
* Formats numeric column x to a format like '#,###,###.##', rounded to d decimal places
* with HALF_EVEN round mode, and returns the result as a string column.
*
* If d is 0, the result has no decimal point or fractional part.
* If d is less than 0, the result will be null.
*
* @group string_funcs
* @since 1.5.0
*/
def format_number(x: Column, d: Int): Column = withExpr {
FormatNumber(x.expr, lit(d).expr)
}
/**
* Formats the arguments in printf-style and returns the result as a string column.
*
* @group string_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def format_string(format: String, arguments: Column*): Column = withExpr {
FormatString((lit(format) +: arguments).map(_.expr): _*)
}
/**
* Returns a new string column by converting the first letter of each word to uppercase.
* Words are delimited by whitespace.
*
* For example, "hello world" will become "Hello World".
*
* @group string_funcs
* @since 1.5.0
*/
def initcap(e: Column): Column = withExpr { InitCap(e.expr) }
/**
* Locate the position of the first occurrence of substr column in the given string.
* Returns null if either of the arguments are null.
*
* @note The position is not zero based, but 1 based index. Returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def instr(str: Column, substring: String): Column = withExpr {
StringInstr(str.expr, lit(substring).expr)
}
/**
* Computes the character length of a given string or number of bytes of a binary string.
* The length of character strings include the trailing spaces. The length of binary strings
* includes binary zeros.
*
* @group string_funcs
* @since 1.5.0
*/
def length(e: Column): Column = withExpr { Length(e.expr) }
/**
* Converts a string column to lower case.
*
* @group string_funcs
* @since 1.3.0
*/
def lower(e: Column): Column = withExpr { Lower(e.expr) }
/**
* Computes the Levenshtein distance of the two given string columns.
* @group string_funcs
* @since 1.5.0
*/
def levenshtein(l: Column, r: Column): Column = withExpr { Levenshtein(l.expr, r.expr) }
/**
* Locate the position of the first occurrence of substr.
*
* @note The position is not zero based, but 1 based index. Returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def locate(substr: String, str: Column): Column = withExpr {
new StringLocate(lit(substr).expr, str.expr)
}
/**
* Locate the position of the first occurrence of substr in a string column, after position pos.
*
* @note The position is not zero based, but 1 based index. returns 0 if substr
* could not be found in str.
*
* @group string_funcs
* @since 1.5.0
*/
def locate(substr: String, str: Column, pos: Int): Column = withExpr {
StringLocate(lit(substr).expr, str.expr, lit(pos).expr)
}
/**
* Left-pad the string column with pad to a length of len. If the string column is longer
* than len, the return value is shortened to len characters.
*
* @group string_funcs
* @since 1.5.0
*/
def lpad(str: Column, len: Int, pad: String): Column = withExpr {
StringLPad(str.expr, lit(len).expr, lit(pad).expr)
}
/**
* Trim the spaces from left end for the specified string value.
*
* @group string_funcs
* @since 1.5.0
*/
def ltrim(e: Column): Column = withExpr {StringTrimLeft(e.expr) }
/**
* Trim the specified character string from left end for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def ltrim(e: Column, trimString: String): Column = withExpr {
StringTrimLeft(e.expr, Literal(trimString))
}
/**
* Extract a specific group matched by a Java regex, from the specified string column.
* If the regex did not match, or the specified group did not match, an empty string is returned.
*
* @group string_funcs
* @since 1.5.0
*/
def regexp_extract(e: Column, exp: String, groupIdx: Int): Column = withExpr {
RegExpExtract(e.expr, lit(exp).expr, lit(groupIdx).expr)
}
/**
* Replace all substrings of the specified string value that match regexp with rep.
*
* @group string_funcs
* @since 1.5.0
*/
def regexp_replace(e: Column, pattern: String, replacement: String): Column = withExpr {
RegExpReplace(e.expr, lit(pattern).expr, lit(replacement).expr)
}
/**
* Replace all substrings of the specified string value that match regexp with rep.
*
* @group string_funcs
* @since 2.1.0
*/
def regexp_replace(e: Column, pattern: Column, replacement: Column): Column = withExpr {
RegExpReplace(e.expr, pattern.expr, replacement.expr)
}
/**
* Decodes a BASE64 encoded string column and returns it as a binary column.
* This is the reverse of base64.
*
* @group string_funcs
* @since 1.5.0
*/
def unbase64(e: Column): Column = withExpr { UnBase64(e.expr) }
/**
* Right-pad the string column with pad to a length of len. If the string column is longer
* than len, the return value is shortened to len characters.
*
* @group string_funcs
* @since 1.5.0
*/
def rpad(str: Column, len: Int, pad: String): Column = withExpr {
StringRPad(str.expr, lit(len).expr, lit(pad).expr)
}
/**
* Repeats a string column n times, and returns it as a new string column.
*
* @group string_funcs
* @since 1.5.0
*/
def repeat(str: Column, n: Int): Column = withExpr {
StringRepeat(str.expr, lit(n).expr)
}
/**
* Trim the spaces from right end for the specified string value.
*
* @group string_funcs
* @since 1.5.0
*/
def rtrim(e: Column): Column = withExpr { StringTrimRight(e.expr) }
/**
* Trim the specified character string from right end for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def rtrim(e: Column, trimString: String): Column = withExpr {
StringTrimRight(e.expr, Literal(trimString))
}
/**
* Returns the soundex code for the specified expression.
*
* @group string_funcs
* @since 1.5.0
*/
def soundex(e: Column): Column = withExpr { SoundEx(e.expr) }
/**
* Splits str around pattern (pattern is a regular expression).
*
* @note Pattern is a string representation of the regular expression.
*
* @group string_funcs
* @since 1.5.0
*/
def split(str: Column, pattern: String): Column = withExpr {
StringSplit(str.expr, lit(pattern).expr)
}
/**
* Substring starts at `pos` and is of length `len` when str is String type or
* returns the slice of byte array that starts at `pos` in byte and is of length `len`
* when str is Binary type
*
* @note The position is not zero based, but 1 based index.
*
* @group string_funcs
* @since 1.5.0
*/
def substring(str: Column, pos: Int, len: Int): Column = withExpr {
Substring(str.expr, lit(pos).expr, lit(len).expr)
}
/**
* Returns the substring from string str before count occurrences of the delimiter delim.
* If count is positive, everything the left of the final delimiter (counting from left) is
* returned. If count is negative, every to the right of the final delimiter (counting from the
* right) is returned. substring_index performs a case-sensitive match when searching for delim.
*
* @group string_funcs
*/
def substring_index(str: Column, delim: String, count: Int): Column = withExpr {
SubstringIndex(str.expr, lit(delim).expr, lit(count).expr)
}
/**
* Translate any character in the src by a character in replaceString.
* The characters in replaceString correspond to the characters in matchingString.
* The translate will happen when any character in the string matches the character
* in the `matchingString`.
*
* @group string_funcs
* @since 1.5.0
*/
def translate(src: Column, matchingString: String, replaceString: String): Column = withExpr {
StringTranslate(src.expr, lit(matchingString).expr, lit(replaceString).expr)
}
/**
* Trim the spaces from both ends for the specified string column.
*
* @group string_funcs
* @since 1.5.0
*/
def trim(e: Column): Column = withExpr { StringTrim(e.expr) }
/**
* Trim the specified character from both ends for the specified string column.
* @group string_funcs
* @since 2.3.0
*/
def trim(e: Column, trimString: String): Column = withExpr {
StringTrim(e.expr, Literal(trimString))
}
/**
* Converts a string column to upper case.
*
* @group string_funcs
* @since 1.3.0
*/
def upper(e: Column): Column = withExpr { Upper(e.expr) }
//////////////////////////////////////////////////////////////////////////////////////////////
// DateTime functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns the date that is numMonths after startDate.
*
* @group datetime_funcs
* @since 1.5.0
*/
def add_months(startDate: Column, numMonths: Int): Column = withExpr {
AddMonths(startDate.expr, Literal(numMonths))
}
/**
* Returns the current date as a date column.
*
* @group datetime_funcs
* @since 1.5.0
*/
def current_date(): Column = withExpr { CurrentDate() }
/**
* Returns the current timestamp as a timestamp column.
*
* @group datetime_funcs
* @since 1.5.0
*/
def current_timestamp(): Column = withExpr { CurrentTimestamp() }
/**
* Converts a date/timestamp/string to a value of string in the format specified by the date
* format given by the second argument.
*
* A pattern `dd.MM.yyyy` would return a string like `18.03.1993`.
* All pattern letters of `java.text.SimpleDateFormat` can be used.
*
* @note Use specialized functions like [[year]] whenever possible as they benefit from a
* specialized implementation.
*
* @group datetime_funcs
* @since 1.5.0
*/
def date_format(dateExpr: Column, format: String): Column = withExpr {
DateFormatClass(dateExpr.expr, Literal(format))
}
/**
* Returns the date that is `days` days after `start`
* @group datetime_funcs
* @since 1.5.0
*/
def date_add(start: Column, days: Int): Column = withExpr { DateAdd(start.expr, Literal(days)) }
/**
* Returns the date that is `days` days before `start`
* @group datetime_funcs
* @since 1.5.0
*/
def date_sub(start: Column, days: Int): Column = withExpr { DateSub(start.expr, Literal(days)) }
/**
* Returns the number of days from `start` to `end`.
* @group datetime_funcs
* @since 1.5.0
*/
def datediff(end: Column, start: Column): Column = withExpr { DateDiff(end.expr, start.expr) }
/**
* Extracts the year as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def year(e: Column): Column = withExpr { Year(e.expr) }
/**
* Extracts the quarter as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def quarter(e: Column): Column = withExpr { Quarter(e.expr) }
/**
* Extracts the month as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def month(e: Column): Column = withExpr { Month(e.expr) }
/**
* Extracts the day of the week as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 2.3.0
*/
def dayofweek(e: Column): Column = withExpr { DayOfWeek(e.expr) }
/**
* Extracts the day of the month as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def dayofmonth(e: Column): Column = withExpr { DayOfMonth(e.expr) }
/**
* Extracts the day of the year as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def dayofyear(e: Column): Column = withExpr { DayOfYear(e.expr) }
/**
* Extracts the hours as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def hour(e: Column): Column = withExpr { Hour(e.expr) }
/**
* Given a date column, returns the last day of the month which the given date belongs to.
* For example, input "2015-07-27" returns "2015-07-31" since July 31 is the last day of the
* month in July 2015.
*
* @group datetime_funcs
* @since 1.5.0
*/
def last_day(e: Column): Column = withExpr { LastDay(e.expr) }
/**
* Extracts the minutes as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def minute(e: Column): Column = withExpr { Minute(e.expr) }
/**
* Returns number of months between dates `date1` and `date2`.
* If `date1` is later than `date2`, then the result is positive.
* If `date1` and `date2` are on the same day of month, or both are the last day of month,
* time of day will be ignored.
*
* Otherwise, the difference is calculated based on 31 days per month, and rounded to
* 8 digits.
* @group datetime_funcs
* @since 1.5.0
*/
def months_between(date1: Column, date2: Column): Column = withExpr {
new MonthsBetween(date1.expr, date2.expr)
}
/**
* Returns number of months between dates `date1` and `date2`. If `roundOff` is set to true, the
* result is rounded off to 8 digits; it is not rounded otherwise.
* @group datetime_funcs
* @since 2.4.0
*/
def months_between(date1: Column, date2: Column, roundOff: Boolean): Column = withExpr {
MonthsBetween(date1.expr, date2.expr, lit(roundOff).expr)
}
/**
* Given a date column, returns the first date which is later than the value of the date column
* that is on the specified day of the week.
*
* For example, `next_day('2015-07-27', "Sunday")` returns 2015-08-02 because that is the first
* Sunday after 2015-07-27.
*
* Day of the week parameter is case insensitive, and accepts:
* "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
*
* @group datetime_funcs
* @since 1.5.0
*/
def next_day(date: Column, dayOfWeek: String): Column = withExpr {
NextDay(date.expr, lit(dayOfWeek).expr)
}
/**
* Extracts the seconds as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def second(e: Column): Column = withExpr { Second(e.expr) }
/**
* Extracts the week number as an integer from a given date/timestamp/string.
* @group datetime_funcs
* @since 1.5.0
*/
def weekofyear(e: Column): Column = withExpr { WeekOfYear(e.expr) }
/**
* Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
* representing the timestamp of that moment in the current system time zone in the given
* format.
* @group datetime_funcs
* @since 1.5.0
*/
def from_unixtime(ut: Column): Column = withExpr {
FromUnixTime(ut.expr, Literal("yyyy-MM-dd HH:mm:ss"))
}
/**
* Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
* representing the timestamp of that moment in the current system time zone in the given
* format.
* @group datetime_funcs
* @since 1.5.0
*/
def from_unixtime(ut: Column, f: String): Column = withExpr {
FromUnixTime(ut.expr, Literal(f))
}
/**
* Returns the current Unix timestamp (in seconds).
*
* @note All calls of `unix_timestamp` within the same query return the same value
* (i.e. the current timestamp is calculated at the start of query evaluation).
*
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(): Column = withExpr {
UnixTimestamp(CurrentTimestamp(), Literal("yyyy-MM-dd HH:mm:ss"))
}
/**
* Converts time string in format yyyy-MM-dd HH:mm:ss to Unix timestamp (in seconds),
* using the default timezone and the default locale.
* Returns `null` if fails.
*
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(s: Column): Column = withExpr {
UnixTimestamp(s.expr, Literal("yyyy-MM-dd HH:mm:ss"))
}
/**
* Converts time string with given pattern to Unix timestamp (in seconds).
* Returns `null` if fails.
*
* @see <a href="http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html">
* Customizing Formats</a>
* @group datetime_funcs
* @since 1.5.0
*/
def unix_timestamp(s: Column, p: String): Column = withExpr { UnixTimestamp(s.expr, Literal(p)) }
/**
* Convert time string to a Unix timestamp (in seconds) by casting rules to `TimestampType`.
* @group datetime_funcs
* @since 2.2.0
*/
def to_timestamp(s: Column): Column = withExpr {
new ParseToTimestamp(s.expr)
}
/**
* Convert time string to a Unix timestamp (in seconds) with a specified format
* (see [http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html])
* to Unix timestamp (in seconds), return null if fail.
* @group datetime_funcs
* @since 2.2.0
*/
def to_timestamp(s: Column, fmt: String): Column = withExpr {
new ParseToTimestamp(s.expr, Literal(fmt))
}
/**
* Converts the column into `DateType` by casting rules to `DateType`.
*
* @group datetime_funcs
* @since 1.5.0
*/
def to_date(e: Column): Column = withExpr { new ParseToDate(e.expr) }
/**
* Converts the column into a `DateType` with a specified format
* (see [http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html])
* return null if fail.
*
* @group datetime_funcs
* @since 2.2.0
*/
def to_date(e: Column, fmt: String): Column = withExpr {
new ParseToDate(e.expr, Literal(fmt))
}
/**
* Returns date truncated to the unit specified by the format.
*
* @param format: 'year', 'yyyy', 'yy' for truncate by year,
* or 'month', 'mon', 'mm' for truncate by month
*
* @group datetime_funcs
* @since 1.5.0
*/
def trunc(date: Column, format: String): Column = withExpr {
TruncDate(date.expr, Literal(format))
}
/**
* Returns timestamp truncated to the unit specified by the format.
*
* @param format: 'year', 'yyyy', 'yy' for truncate by year,
* 'month', 'mon', 'mm' for truncate by month,
* 'day', 'dd' for truncate by day,
* Other options are: 'second', 'minute', 'hour', 'week', 'month', 'quarter'
*
* @group datetime_funcs
* @since 2.3.0
*/
def date_trunc(format: String, timestamp: Column): Column = withExpr {
TruncTimestamp(Literal(format), timestamp.expr)
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders
* that time as a timestamp in the given time zone. For example, 'GMT+1' would yield
* '2017-07-14 03:40:00.0'.
* @group datetime_funcs
* @since 1.5.0
*/
def from_utc_timestamp(ts: Column, tz: String): Column = withExpr {
FromUTCTimestamp(ts.expr, Literal(tz))
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in UTC, and renders
* that time as a timestamp in the given time zone. For example, 'GMT+1' would yield
* '2017-07-14 03:40:00.0'.
* @group datetime_funcs
* @since 2.4.0
*/
def from_utc_timestamp(ts: Column, tz: Column): Column = withExpr {
FromUTCTimestamp(ts.expr, tz.expr)
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time
* zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield
* '2017-07-14 01:40:00.0'.
* @group datetime_funcs
* @since 1.5.0
*/
def to_utc_timestamp(ts: Column, tz: String): Column = withExpr {
ToUTCTimestamp(ts.expr, Literal(tz))
}
/**
* Given a timestamp like '2017-07-14 02:40:00.0', interprets it as a time in the given time
* zone, and renders that time as a timestamp in UTC. For example, 'GMT+1' would yield
* '2017-07-14 01:40:00.0'.
* @group datetime_funcs
* @since 2.4.0
*/
def to_utc_timestamp(ts: Column, tz: Column): Column = withExpr {
ToUTCTimestamp(ts.expr, tz.expr)
}
/**
* Bucketize rows into one or more time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The following example takes the average stock price for
* a one minute window every 10 seconds starting 5 seconds after the hour:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute", "10 seconds", "5 seconds"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:05-09:01:05
* 09:00:15-09:01:15
* 09:00:25-09:01:25 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers. Note that the duration is a fixed length of
* time, and does not vary over time according to a calendar. For example,
* `1 day` always means 86,400,000 milliseconds, not a calendar day.
* @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`.
* A new window will be generated every `slideDuration`. Must be less than
* or equal to the `windowDuration`. Check
* `org.apache.spark.unsafe.types.CalendarInterval` for valid duration
* identifiers. This duration is likewise absolute, and does not vary
* according to a calendar.
* @param startTime The offset with respect to 1970-01-01 00:00:00 UTC with which to start
* window intervals. For example, in order to have hourly tumbling windows that
* start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide
* `startTime` as `15 minutes`.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(
timeColumn: Column,
windowDuration: String,
slideDuration: String,
startTime: String): Column = {
withExpr {
TimeWindow(timeColumn.expr, windowDuration, slideDuration, startTime)
}.as("window")
}
/**
* Bucketize rows into one or more time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC.
* The following example takes the average stock price for a one minute window every 10 seconds:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute", "10 seconds"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:00-09:01:00
* 09:00:10-09:01:10
* 09:00:20-09:01:20 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers. Note that the duration is a fixed length of
* time, and does not vary over time according to a calendar. For example,
* `1 day` always means 86,400,000 milliseconds, not a calendar day.
* @param slideDuration A string specifying the sliding interval of the window, e.g. `1 minute`.
* A new window will be generated every `slideDuration`. Must be less than
* or equal to the `windowDuration`. Check
* `org.apache.spark.unsafe.types.CalendarInterval` for valid duration
* identifiers. This duration is likewise absolute, and does not vary
* according to a calendar.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(timeColumn: Column, windowDuration: String, slideDuration: String): Column = {
window(timeColumn, windowDuration, slideDuration, "0 second")
}
/**
* Generates tumbling time windows given a timestamp specifying column. Window
* starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
* [12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
* the order of months are not supported. The windows start beginning at 1970-01-01 00:00:00 UTC.
* The following example takes the average stock price for a one minute tumbling window:
*
* {{{
* val df = ... // schema => timestamp: TimestampType, stockId: StringType, price: DoubleType
* df.groupBy(window($"time", "1 minute"), $"stockId")
* .agg(mean("price"))
* }}}
*
* The windows will look like:
*
* {{{
* 09:00:00-09:01:00
* 09:01:00-09:02:00
* 09:02:00-09:03:00 ...
* }}}
*
* For a streaming query, you may use the function `current_timestamp` to generate windows on
* processing time.
*
* @param timeColumn The column or the expression to use as the timestamp for windowing by time.
* The time column must be of TimestampType.
* @param windowDuration A string specifying the width of the window, e.g. `10 minutes`,
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers.
*
* @group datetime_funcs
* @since 2.0.0
*/
def window(timeColumn: Column, windowDuration: String): Column = {
window(timeColumn, windowDuration, windowDuration, "0 second")
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Collection functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Returns null if the array is null, true if the array contains `value`, and false otherwise.
* @group collection_funcs
* @since 1.5.0
*/
def array_contains(column: Column, value: Any): Column = withExpr {
ArrayContains(column.expr, lit(value).expr)
}
/**
* Returns `true` if `a1` and `a2` have at least one non-null element in common. If not and both
* the arrays are non-empty and any of them contains a `null`, it returns `null`. It returns
* `false` otherwise.
* @group collection_funcs
* @since 2.4.0
*/
def arrays_overlap(a1: Column, a2: Column): Column = withExpr {
ArraysOverlap(a1.expr, a2.expr)
}
/**
* Returns an array containing all the elements in `x` from index `start` (or starting from the
* end if `start` is negative) with the specified `length`.
* @group collection_funcs
* @since 2.4.0
*/
def slice(x: Column, start: Int, length: Int): Column = withExpr {
Slice(x.expr, Literal(start), Literal(length))
}
/**
* Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
* `nullReplacement`.
* @group collection_funcs
* @since 2.4.0
*/
def array_join(column: Column, delimiter: String, nullReplacement: String): Column = withExpr {
ArrayJoin(column.expr, Literal(delimiter), Some(Literal(nullReplacement)))
}
/**
* Concatenates the elements of `column` using the `delimiter`.
* @group collection_funcs
* @since 2.4.0
*/
def array_join(column: Column, delimiter: String): Column = withExpr {
ArrayJoin(column.expr, Literal(delimiter), None)
}
/**
* Concatenates multiple input columns together into a single column.
* The function works with strings, binary and compatible array columns.
*
* @group collection_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def concat(exprs: Column*): Column = withExpr { Concat(exprs.map(_.expr)) }
/**
* Locates the position of the first occurrence of the value in the given array as long.
* Returns null if either of the arguments are null.
*
* @note The position is not zero based, but 1 based index. Returns 0 if value
* could not be found in array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_position(column: Column, value: Any): Column = withExpr {
ArrayPosition(column.expr, lit(value).expr)
}
/**
* Returns element of array at given index in value if column is array. Returns value for
* the given key in value if column is map.
*
* @group collection_funcs
* @since 2.4.0
*/
def element_at(column: Column, value: Any): Column = withExpr {
ElementAt(column.expr, lit(value).expr)
}
/**
* Sorts the input array in ascending order. The elements of the input array must be orderable.
* Null elements will be placed at the end of the returned array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_sort(e: Column): Column = withExpr { ArraySort(e.expr) }
/**
* Remove all elements that equal to element from the given array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_remove(column: Column, element: Any): Column = withExpr {
ArrayRemove(column.expr, lit(element).expr)
}
/**
* Removes duplicate values from the array.
* @group collection_funcs
* @since 2.4.0
*/
def array_distinct(e: Column): Column = withExpr { ArrayDistinct(e.expr) }
/**
* Returns an array of the elements in the union of the given two arrays, without duplicates.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_union(col1: Column, col2: Column): Column = withExpr {
ArrayUnion(col1.expr, col2.expr)
}
/**
* Creates a new row for each element in the given array or map column.
*
* @group collection_funcs
* @since 1.3.0
*/
def explode(e: Column): Column = withExpr { Explode(e.expr) }
/**
* Creates a new row for each element in the given array or map column.
* Unlike explode, if the array/map is null or empty then null is produced.
*
* @group collection_funcs
* @since 2.2.0
*/
def explode_outer(e: Column): Column = withExpr { GeneratorOuter(Explode(e.expr)) }
/**
* Creates a new row for each element with position in the given array or map column.
*
* @group collection_funcs
* @since 2.1.0
*/
def posexplode(e: Column): Column = withExpr { PosExplode(e.expr) }
/**
* Creates a new row for each element with position in the given array or map column.
* Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
*
* @group collection_funcs
* @since 2.2.0
*/
def posexplode_outer(e: Column): Column = withExpr { GeneratorOuter(PosExplode(e.expr)) }
/**
* Extracts json object from a json string based on json path specified, and returns json string
* of the extracted json object. It will return null if the input json string is invalid.
*
* @group collection_funcs
* @since 1.6.0
*/
def get_json_object(e: Column, path: String): Column = withExpr {
GetJsonObject(e.expr, lit(path).expr)
}
/**
* Creates a new row for a json column according to the given field names.
*
* @group collection_funcs
* @since 1.6.0
*/
@scala.annotation.varargs
def json_tuple(json: Column, fields: String*): Column = withExpr {
require(fields.nonEmpty, "at least 1 field name should be given.")
JsonTuple(json.expr +: fields.map(Literal.apply))
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `StructType` with the
* specified schema. Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. Accepts the same options as the
* json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType, options: Map[String, String]): Column =
from_json(e, schema.asInstanceOf[DataType], options)
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType, options: Map[String, String]): Column = withExpr {
JsonToStructs(schema, options, e.expr)
}
/**
* (Java-specific) Parses a column containing a JSON string into a `StructType` with the
* specified schema. Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType, options: java.util.Map[String, String]): Column =
from_json(e, schema, options.asScala.toMap)
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType, options: java.util.Map[String, String]): Column =
from_json(e, schema, options.asScala.toMap)
/**
* Parses a column containing a JSON string into a `StructType` with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: StructType): Column =
from_json(e, schema, Map.empty[String, String])
/**
* Parses a column containing a JSON string into a `MapType` with `StringType` as keys type,
* `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.2.0
*/
def from_json(e: Column, schema: DataType): Column =
from_json(e, schema, Map.empty[String, String])
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string as a json string. In Spark 2.1,
* the user-provided schema has to be in JSON format. Since Spark 2.2, the DDL
* format is also supported for the schema.
*
* @group collection_funcs
* @since 2.1.0
*/
def from_json(e: Column, schema: String, options: java.util.Map[String, String]): Column = {
from_json(e, schema, options.asScala.toMap)
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string as a json string, it could be a
* JSON format string or a DDL-formatted string.
*
* @group collection_funcs
* @since 2.3.0
*/
def from_json(e: Column, schema: String, options: Map[String, String]): Column = {
val dataType = try {
DataType.fromJson(schema)
} catch {
case NonFatal(_) => DataType.fromDDL(schema)
}
from_json(e, dataType, options)
}
/**
* (Scala-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
*
* @group collection_funcs
* @since 2.4.0
*/
def from_json(e: Column, schema: Column): Column = {
from_json(e, schema, Map.empty[String, String].asJava)
}
/**
* (Java-specific) Parses a column containing a JSON string into a `MapType` with `StringType`
* as keys type, `StructType` or `ArrayType` of `StructType`s with the specified schema.
* Returns `null`, in the case of an unparseable string.
*
* @param e a string column containing JSON data.
* @param schema the schema to use when parsing the json string
* @param options options to control how the json is parsed. accepts the same options and the
* json data source.
*
* @group collection_funcs
* @since 2.4.0
*/
def from_json(e: Column, schema: Column, options: java.util.Map[String, String]): Column = {
withExpr(new JsonToStructs(e.expr, schema.expr, options.asScala.toMap))
}
/**
* Parses a column containing a JSON string and infers its schema.
*
* @param e a string column containing JSON data.
*
* @group collection_funcs
* @since 2.4.0
*/
def schema_of_json(e: Column): Column = withExpr(new SchemaOfJson(e.expr))
/**
* (Scala-specific) Converts a column containing a `StructType`, `ArrayType` of `StructType`s,
* a `MapType` or `ArrayType` of `MapType`s into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct or array of the structs.
* @param options options to control how the struct column is converted into a json string.
* accepts the same options and the json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column, options: Map[String, String]): Column = withExpr {
StructsToJson(options, e.expr)
}
/**
* (Java-specific) Converts a column containing a `StructType`, `ArrayType` of `StructType`s,
* a `MapType` or `ArrayType` of `MapType`s into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct or array of the structs.
* @param options options to control how the struct column is converted into a json string.
* accepts the same options and the json data source.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column, options: java.util.Map[String, String]): Column =
to_json(e, options.asScala.toMap)
/**
* Converts a column containing a `StructType`, `ArrayType` of `StructType`s,
* a `MapType` or `ArrayType` of `MapType`s into a JSON string with the specified schema.
* Throws an exception, in the case of an unsupported type.
*
* @param e a column containing a struct or array of the structs.
*
* @group collection_funcs
* @since 2.1.0
*/
def to_json(e: Column): Column =
to_json(e, Map.empty[String, String])
/**
* Returns length of array or map.
*
* @group collection_funcs
* @since 1.5.0
*/
def size(e: Column): Column = withExpr { Size(e.expr) }
/**
* Sorts the input array for the given column in ascending order,
* according to the natural ordering of the array elements.
* Null elements will be placed at the beginning of the returned array.
*
* @group collection_funcs
* @since 1.5.0
*/
def sort_array(e: Column): Column = sort_array(e, asc = true)
/**
* Sorts the input array for the given column in ascending or descending order,
* according to the natural ordering of the array elements.
* Null elements will be placed at the beginning of the returned array in ascending order or
* at the end of the returned array in descending order.
*
* @group collection_funcs
* @since 1.5.0
*/
def sort_array(e: Column, asc: Boolean): Column = withExpr { SortArray(e.expr, lit(asc).expr) }
/**
* Returns the minimum value in the array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_min(e: Column): Column = withExpr { ArrayMin(e.expr) }
/**
* Returns the maximum value in the array.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_max(e: Column): Column = withExpr { ArrayMax(e.expr) }
/**
* Returns a reversed string or an array with reverse order of elements.
* @group collection_funcs
* @since 1.5.0
*/
def reverse(e: Column): Column = withExpr { Reverse(e.expr) }
/**
* Creates a single array from an array of arrays. If a structure of nested arrays is deeper than
* two levels, only one level of nesting is removed.
* @group collection_funcs
* @since 2.4.0
*/
def flatten(e: Column): Column = withExpr { Flatten(e.expr) }
/**
* Generate a sequence of integers from start to stop, incrementing by step.
*
* @group collection_funcs
* @since 2.4.0
*/
def sequence(start: Column, stop: Column, step: Column): Column = withExpr {
new Sequence(start.expr, stop.expr, step.expr)
}
/**
* Generate a sequence of integers from start to stop,
* incrementing by 1 if start is less than or equal to stop, otherwise -1.
*
* @group collection_funcs
* @since 2.4.0
*/
def sequence(start: Column, stop: Column): Column = withExpr {
new Sequence(start.expr, stop.expr)
}
/**
* Creates an array containing the left argument repeated the number of times given by the
* right argument.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_repeat(left: Column, right: Column): Column = withExpr {
ArrayRepeat(left.expr, right.expr)
}
/**
* Creates an array containing the left argument repeated the number of times given by the
* right argument.
*
* @group collection_funcs
* @since 2.4.0
*/
def array_repeat(e: Column, count: Int): Column = array_repeat(e, lit(count))
/**
* Returns an unordered array containing the keys of the map.
* @group collection_funcs
* @since 2.3.0
*/
def map_keys(e: Column): Column = withExpr { MapKeys(e.expr) }
/**
* Returns an unordered array containing the values of the map.
* @group collection_funcs
* @since 2.3.0
*/
def map_values(e: Column): Column = withExpr { MapValues(e.expr) }
/**
* Returns an unordered array of all entries in the given map.
* @group collection_funcs
* @since 2.4.0
*/
def map_entries(e: Column): Column = withExpr { MapEntries(e.expr) }
/**
* Returns a map created from the given array of entries.
* @group collection_funcs
* @since 2.4.0
*/
def map_from_entries(e: Column): Column = withExpr { MapFromEntries(e.expr) }
/**
* Returns a merged array of structs in which the N-th struct contains all N-th values of input
* arrays.
* @group collection_funcs
* @since 2.4.0
*/
@scala.annotation.varargs
def arrays_zip(e: Column*): Column = withExpr { ArraysZip(e.map(_.expr)) }
/**
* Returns the union of all the given maps.
* @group collection_funcs
* @since 2.4.0
*/
@scala.annotation.varargs
def map_concat(cols: Column*): Column = withExpr { MapConcat(cols.map(_.expr)) }
// scalastyle:off line.size.limit
// scalastyle:off parameter.number
/* Use the following code to generate:
(0 to 10).foreach { x =>
val types = (1 to x).foldRight("RT")((i, s) => {s"A$i, $s"})
val typeTags = (1 to x).map(i => s"A$i: TypeTag").foldLeft("RT: TypeTag")(_ + ", " + _)
val inputTypes = (1 to x).foldRight("Nil")((i, s) => {s"ScalaReflection.schemaFor(typeTag[A$i]).dataType :: $s"})
println(s"""
|/**
| * Defines a Scala closure of $x arguments as user-defined function (UDF).
| * The data types are automatically inferred based on the Scala closure's
| * signature. By default the returned UDF is deterministic. To change it to
| * nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
| *
| * @group udf_funcs
| * @since 1.3.0
| */
|def udf[$typeTags](f: Function$x[$types]): UserDefinedFunction = {
| val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
| val inputTypes = Try($inputTypes).toOption
| val udf = UserDefinedFunction(f, dataType, inputTypes)
| if (nullable) udf else udf.asNonNullable()
|}""".stripMargin)
}
(0 to 10).foreach { i =>
val extTypeArgs = (0 to i).map(_ => "_").mkString(", ")
val anyTypeArgs = (0 to i).map(_ => "Any").mkString(", ")
val anyCast = s".asInstanceOf[UDF$i[$anyTypeArgs]]"
val anyParams = (1 to i).map(_ => "_: Any").mkString(", ")
val funcCall = if (i == 0) "() => func" else "func"
println(s"""
|/**
| * Defines a Java UDF$i instance as user-defined function (UDF).
| * The caller must specify the output data type, and there is no automatic input type coercion.
| * By default the returned UDF is deterministic. To change it to nondeterministic, call the
| * API `UserDefinedFunction.asNondeterministic()`.
| *
| * @group udf_funcs
| * @since 2.3.0
| */
|def udf(f: UDF$i[$extTypeArgs], returnType: DataType): UserDefinedFunction = {
| val func = f$anyCast.call($anyParams)
| UserDefinedFunction($funcCall, returnType, inputTypes = None)
|}""".stripMargin)
}
*/
//////////////////////////////////////////////////////////////////////////////////////////////
// Scala UDF functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Defines a Scala closure of 0 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag](f: Function0[RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 1 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag](f: Function1[A1, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 2 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag](f: Function2[A1, A2, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 3 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag](f: Function3[A1, A2, A3, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 4 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag](f: Function4[A1, A2, A3, A4, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 5 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag](f: Function5[A1, A2, A3, A4, A5, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 6 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag](f: Function6[A1, A2, A3, A4, A5, A6, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: ScalaReflection.schemaFor(typeTag[A6]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 7 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag](f: Function7[A1, A2, A3, A4, A5, A6, A7, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: ScalaReflection.schemaFor(typeTag[A6]).dataType :: ScalaReflection.schemaFor(typeTag[A7]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 8 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag](f: Function8[A1, A2, A3, A4, A5, A6, A7, A8, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: ScalaReflection.schemaFor(typeTag[A6]).dataType :: ScalaReflection.schemaFor(typeTag[A7]).dataType :: ScalaReflection.schemaFor(typeTag[A8]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 9 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag](f: Function9[A1, A2, A3, A4, A5, A6, A7, A8, A9, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: ScalaReflection.schemaFor(typeTag[A6]).dataType :: ScalaReflection.schemaFor(typeTag[A7]).dataType :: ScalaReflection.schemaFor(typeTag[A8]).dataType :: ScalaReflection.schemaFor(typeTag[A9]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
/**
* Defines a Scala closure of 10 arguments as user-defined function (UDF).
* The data types are automatically inferred based on the Scala closure's
* signature. By default the returned UDF is deterministic. To change it to
* nondeterministic, call the API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 1.3.0
*/
def udf[RT: TypeTag, A1: TypeTag, A2: TypeTag, A3: TypeTag, A4: TypeTag, A5: TypeTag, A6: TypeTag, A7: TypeTag, A8: TypeTag, A9: TypeTag, A10: TypeTag](f: Function10[A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, RT]): UserDefinedFunction = {
val ScalaReflection.Schema(dataType, nullable) = ScalaReflection.schemaFor[RT]
val inputTypes = Try(ScalaReflection.schemaFor(typeTag[A1]).dataType :: ScalaReflection.schemaFor(typeTag[A2]).dataType :: ScalaReflection.schemaFor(typeTag[A3]).dataType :: ScalaReflection.schemaFor(typeTag[A4]).dataType :: ScalaReflection.schemaFor(typeTag[A5]).dataType :: ScalaReflection.schemaFor(typeTag[A6]).dataType :: ScalaReflection.schemaFor(typeTag[A7]).dataType :: ScalaReflection.schemaFor(typeTag[A8]).dataType :: ScalaReflection.schemaFor(typeTag[A9]).dataType :: ScalaReflection.schemaFor(typeTag[A10]).dataType :: Nil).toOption
val udf = UserDefinedFunction(f, dataType, inputTypes)
if (nullable) udf else udf.asNonNullable()
}
//////////////////////////////////////////////////////////////////////////////////////////////
// Java UDF functions
//////////////////////////////////////////////////////////////////////////////////////////////
/**
* Defines a Java UDF0 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF0[_], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF0[Any]].call()
UserDefinedFunction(() => func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF1 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF1[_, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF1[Any, Any]].call(_: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF2 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF2[_, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF2[Any, Any, Any]].call(_: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF3 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF3[_, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF3[Any, Any, Any, Any]].call(_: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF4 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF4[_, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF4[Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF5 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF5[_, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF5[Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF6 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF6[_, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF6[Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF7 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF7[_, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF7[Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF8 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF8[_, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF8[Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF9 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF9[_, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF9[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
/**
* Defines a Java UDF10 instance as user-defined function (UDF).
* The caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @group udf_funcs
* @since 2.3.0
*/
def udf(f: UDF10[_, _, _, _, _, _, _, _, _, _, _], returnType: DataType): UserDefinedFunction = {
val func = f.asInstanceOf[UDF10[Any, Any, Any, Any, Any, Any, Any, Any, Any, Any, Any]].call(_: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any, _: Any)
UserDefinedFunction(func, returnType, inputTypes = None)
}
// scalastyle:on parameter.number
// scalastyle:on line.size.limit
/**
* Defines a deterministic user-defined function (UDF) using a Scala closure. For this variant,
* the caller must specify the output data type, and there is no automatic input type coercion.
* By default the returned UDF is deterministic. To change it to nondeterministic, call the
* API `UserDefinedFunction.asNondeterministic()`.
*
* @param f A closure in Scala
* @param dataType The output data type of the UDF
*
* @group udf_funcs
* @since 2.0.0
*/
def udf(f: AnyRef, dataType: DataType): UserDefinedFunction = {
UserDefinedFunction(f, dataType, None)
}
/**
* Call an user-defined function.
* Example:
* {{{
* import org.apache.spark.sql._
*
* val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
* val spark = df.sparkSession
* spark.udf.register("simpleUDF", (v: Int) => v * v)
* df.select($"id", callUDF("simpleUDF", $"value"))
* }}}
*
* @group udf_funcs
* @since 1.5.0
*/
@scala.annotation.varargs
def callUDF(udfName: String, cols: Column*): Column = withExpr {
UnresolvedFunction(udfName, cols.map(_.expr), isDistinct = false)
}
}
|
tejasapatil/spark
|
sql/core/src/main/scala/org/apache/spark/sql/functions.scala
|
Scala
|
apache-2.0
| 136,266 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.treode.store.catalog
import scala.language.postfixOps
import com.treode.async.{Backoff, Fiber}
import com.treode.async.implicits._
import com.treode.async.misc.RichInt
import com.treode.cluster.{MessageDescriptor, Peer, ReplyTracker}
import com.treode.store.{Atlas, BallotNumber, CatalogId, TimeoutException}
private class Proposer (key: CatalogId, version: Int, kit: CatalogKit) {
import kit.proposers.remove
import kit.{cluster, library, random, scheduler}
import kit.config.{closedLifetime, proposingBackoff}
private val fiber = new Fiber
var state: State = Opening
trait State {
def open (ballot: Long, patch: Patch) = ()
def learn (k: Learner)
def refuse (ballot: Long)
def grant (from: Peer, ballot: Long, proposal: Proposal)
def accept (from: Peer, ballot: Long)
def chosen (value: Patch)
def timeout()
def shutdown() = state = Shutdown
}
private def max (x: Proposal, y: Proposal) = {
if (x.isDefined && y.isDefined) {
if (x.get._1 > y.get._1) x else y
} else if (x.isDefined) {
x
} else if (y.isDefined) {
y
} else {
None
}}
private def agreement (x: Proposal, patch: Patch) = {
x match {
case Some ((_, patch)) => patch
case None => patch
}}
private def track (atlas: Atlas): ReplyTracker =
atlas.locate (0) .track
object Opening extends State {
override def open (ballot: Long, patch: Patch) =
state = new Open (ballot, patch)
def learn (k: Learner) = throw new IllegalStateException
def refuse (ballot: Long) = ()
def grant (from: Peer, ballot: Long, proposal: Proposal) = ()
def accept (from: Peer, ballot: Long) = ()
def chosen (v: Patch): Unit =
state = new Closed (v)
def timeout() = ()
override def toString = "Proposer.Opening (%s)" format (key.toString)
}
class Open (_ballot: Long, patch: Patch) extends State {
var learners = List.empty [Learner]
var ballot = _ballot
var refused = ballot
var proposed = Option.empty [(BallotNumber, Patch)]
var atlas = library.atlas
var granted = track (atlas)
var accepted = track (atlas)
// Ballot number zero was implicitly accepted.
if (ballot == 0)
Acceptor.propose (atlas.version, key, version, ballot, patch) (granted)
else
Acceptor.ask (atlas.version, key, version, ballot, patch) (granted)
val backoff = proposingBackoff.iterator
fiber.delay (backoff.next) (state.timeout())
def learn (k: Learner) =
learners ::= k
def refuse (ballot: Long) = {
refused = math.max (refused, ballot)
granted = track (atlas)
accepted = track (atlas)
}
def grant (from: Peer, ballot: Long, proposal: Proposal) {
if (ballot == this.ballot) {
granted += from
proposed = max (proposed, proposal)
if (granted.quorum) {
val v = agreement (proposed, patch)
Acceptor.propose (atlas.version, key, version, ballot, v) (accepted)
}}}
def accept (from: Peer, ballot: Long) {
if (ballot == this.ballot) {
accepted += from
if (accepted.quorum) {
val v = agreement (proposed, patch)
Acceptor.choose (key, version, v) (track (atlas))
learners foreach (_.pass (v))
state = new Closed (v)
}}}
def chosen (v: Patch) {
learners foreach (_.pass (v))
state = new Closed (v)
}
def timeout() {
if (backoff.hasNext) {
atlas = library.atlas
granted = track (atlas)
accepted = track (atlas)
ballot = refused + random.nextInt (17) + 1
refused = ballot
Acceptor.ask (atlas.version, key, version, ballot, patch) (granted)
fiber.delay (backoff.next) (state.timeout())
} else {
remove (key, version, Proposer.this)
learners foreach (_.fail (new TimeoutException))
}}
override def toString = "Proposer.Open " + (key, ballot, patch)
}
class Closed (patch: Patch) extends State {
fiber.delay (closedLifetime) (remove (key, version, Proposer.this))
def learn (k: Learner) =
k.pass (patch)
def chosen (v: Patch) =
require (v == patch, "Paxos disagreement")
def refuse (ballot: Long) = ()
def grant (from: Peer, ballot: Long, proposal: Proposal) = ()
def accept (from: Peer, ballot: Long) = ()
def timeout() = ()
override def toString = "Proposer.Closed " + (key, patch)
}
object Shutdown extends State {
def learn (k: Learner) = ()
def refuse (ballot: Long) = ()
def grant (from: Peer, ballot: Long, proposal: Proposal) = ()
def accept (from: Peer, ballot: Long) = ()
def chosen (v: Patch) = ()
def timeout() = ()
override def toString = "Proposer.Shutdown (%s)" format (key)
}
def open (ballot: Long, patch: Patch) =
fiber.execute (state.open (ballot, patch))
def learn (k: Learner) =
fiber.execute (state.learn (k))
def refuse (ballot: Long) =
fiber.execute (state.refuse (ballot))
def grant (from: Peer, ballot: Long, proposal: Proposal) =
fiber.execute (state.grant (from, ballot, proposal))
def accept (from: Peer, ballot: Long) =
fiber.execute (state.accept (from, ballot))
def chosen (patch: Patch) =
fiber.execute (state.chosen (patch))
def shutdown() =
fiber.execute (state.shutdown())
override def toString = state.toString
}
private object Proposer {
val refuse = {
import CatalogPicklers._
MessageDescriptor (0xFF8562E9071168EAL, tuple (catId, uint, ulong))
}
val grant = {
import CatalogPicklers._
MessageDescriptor (0xFF3F6FFC9993CD75L, tuple (catId, uint, ulong, proposal))
}
val accept = {
import CatalogPicklers._
MessageDescriptor (0xFF0E7973CC65E95FL, tuple (catId, uint, ulong))
}
val chosen = {
import CatalogPicklers._
MessageDescriptor (0xFF2259321F9D4EF9L, tuple (catId, uint, patch))
}}
|
Treode/store
|
store/src/com/treode/store/catalog/Proposer.scala
|
Scala
|
apache-2.0
| 6,579 |
package chee.metadata
import chee.UserError
import chee.query.Transform
import chee.properties._
import MetadataMacro._
import com.typesafe.scalalogging.LazyLogging
class MetadataMacro(f: Condition => Traversable[String]) extends Transform with LazyLogging {
def this(mf: MetadataFile) = this(mf.findIds _)
def mapCondition(c: Condition): Condition = {
logger.trace(s"Search metadata for $c")
val ids = f(c)
logger.debug(s"Found ${ids.size} metadata results for condition ${c}")
if (ids.isEmpty) Not(TrueCondition)
else In(Ident.checksum, ids.toSeq)
}
def apply(c: Condition): Condition = Condition.mapAll ({
case p@Prop(_, Property(id, _)) if metadataId(id) =>
mapCondition(p)
case IdentProp(comp, id1, id2) if metadataId(id1, id2) =>
UserError(s"Cannot compare ${id1.name} with ${id2.name} (not supported)")
case e@Exists(id) if metadataId(id) =>
mapCondition(e)
case c@In(id, _) if metadataId(id) =>
mapCondition(c)
case n => n
})(c)
}
object MetadataMacro {
val metaIdents = idents.all.toSet
def metadataId(id: Ident*): Boolean =
id.exists(metaIdents)
}
|
eikek/chee
|
src/main/scala/chee/metadata/MetadataMacro.scala
|
Scala
|
gpl-3.0
| 1,147 |
package ai.dragonfly.img
import ai.dragonfly.color.Color
object Img {
def apply(width: Int, height: Int): Img = new ai.dragonfly.img.native.Img(
width, Array.fill[Int](width * height)(Color.CLEAR.argb)
)
}
|
dragonfly-ai/img
|
img/shared/src/main/scala/ai/dragonfly/img/Img.scala
|
Scala
|
apache-2.0
| 216 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2006-2015, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala
package util
import scala.reflect.ClassTag
import scala.math.Ordering
/** The `Sorting` object provides convenience wrappers for `java.util.Arrays.sort`.
* Methods that defer to `java.util.Arrays.sort` say that they do or under what
* conditions that they do.
*
* `Sorting` also implements a general-purpose quicksort and stable (merge) sort
* for those cases where `java.util.Arrays.sort` could only be used at the cost
* of a large memory penalty. If performance rather than memory usage is the
* primary concern, one may wish to find alternate strategies to use
* `java.util.Arrays.sort` directly e.g. by boxing primitives to use
* a custom ordering on them.
*
* `Sorting` provides methods where you can provide a comparison function, or
* can request a sort of items that are [[scala.math.Ordered]] or that
* otherwise have an implicit or explicit [[scala.math.Ordering]].
*
* Note also that high-performance non-default sorts for numeric types
* are not provided. If this is required, it is advisable to investigate
* other libraries that cover this use case.
*
* @author Ross Judson
* @author Adriaan Moors
* @author Rex Kerr
* @version 1.1
*/
object Sorting {
/** Sort an array of Doubles using `java.util.Arrays.sort`. */
def quickSort(a: Array[Double]): Unit = java.util.Arrays.sort(a)
/** Sort an array of Ints using `java.util.Arrays.sort`. */
def quickSort(a: Array[Int]): Unit = java.util.Arrays.sort(a)
/** Sort an array of Floats using `java.util.Arrays.sort`. */
def quickSort(a: Array[Float]): Unit = java.util.Arrays.sort(a)
private final val qsortThreshold = 16
/** Sort array `a` with quicksort, using the Ordering on its elements.
* This algorithm sorts in place, so no additional memory is used aside from
* what might be required to box individual elements during comparison.
*/
def quickSort[K: Ordering](a: Array[K]): Unit = {
// Must have iN >= i0 or math will fail. Also, i0 >= 0.
def inner(a: Array[K], i0: Int, iN: Int, ord: Ordering[K]): Unit = {
if (iN - i0 < qsortThreshold) insertionSort(a, i0, iN, ord)
else {
val iK = (i0 + iN) >>> 1 // Unsigned div by 2
// Find index of median of first, central, and last elements
var pL =
if (ord.compare(a(i0), a(iN - 1)) <= 0)
if (ord.compare(a(i0), a(iK)) < 0)
if (ord.compare(a(iN - 1), a(iK)) < 0) iN - 1 else iK
else i0
else
if (ord.compare(a(i0), a(iK)) < 0) i0
else
if (ord.compare(a(iN - 1), a(iK)) <= 0) iN - 1
else iK
val pivot = a(pL)
// pL is the start of the pivot block; move it into the middle if needed
if (pL != iK) { a(pL) = a(iK); a(iK) = pivot; pL = iK }
// Elements equal to the pivot will be in range pL until pR
var pR = pL + 1
// Items known to be less than pivot are below iA (range i0 until iA)
var iA = i0
// Items known to be greater than pivot are at or above iB (range iB until iN)
var iB = iN
// Scan through everything in the buffer before the pivot(s)
while (pL - iA > 0) {
val current = a(iA)
ord.compare(current, pivot) match {
case 0 =>
// Swap current out with pivot block
a(iA) = a(pL - 1)
a(pL - 1) = current
pL -= 1
case x if x < 0 =>
// Already in place. Just update indices.
iA += 1
case _ if iB > pR =>
// Wrong side. There's room on the other side, so swap
a(iA) = a(iB - 1)
a(iB - 1) = current
iB -= 1
case _ =>
// Wrong side and there is no room. Swap by rotating pivot block.
a(iA) = a(pL - 1)
a(pL - 1) = a(pR - 1)
a(pR - 1) = current
pL -= 1
pR -= 1
iB -= 1
}
}
// Get anything remaining in buffer after the pivot(s)
while (iB - pR > 0) {
val current = a(iB - 1)
ord.compare(current, pivot) match {
case 0 =>
// Swap current out with pivot block
a(iB - 1) = a(pR)
a(pR) = current
pR += 1
case x if x > 0 =>
// Already in place. Just update indices.
iB -= 1
case _ =>
// Wrong side and we already know there is no room. Swap by rotating pivot block.
a(iB - 1) = a(pR)
a(pR) = a(pL)
a(pL) = current
iA += 1
pL += 1
pR += 1
}
}
// Use tail recursion on large half (Sedgewick's method) so we don't blow up the stack if pivots are poorly chosen
if (iA - i0 < iN - iB) {
inner(a, i0, iA, ord) // True recursion
inner(a, iB, iN, ord) // Should be tail recursion
}
else {
inner(a, iB, iN, ord) // True recursion
inner(a, i0, iA, ord) // Should be tail recursion
}
}
}
inner(a, 0, a.length, implicitly[Ordering[K]])
}
private final val mergeThreshold = 32
// Ordering[T] might be slow especially for boxed primitives, so use binary search variant of insertion sort
// Caller must pass iN >= i0 or math will fail. Also, i0 >= 0.
private def insertionSort[@specialized T](a: Array[T], i0: Int, iN: Int, ord: Ordering[T]): Unit = {
val n = iN - i0
if (n < 2) return
if (ord.compare(a(i0), a(i0+1)) > 0) {
val temp = a(i0)
a(i0) = a(i0+1)
a(i0+1) = temp
}
var m = 2
while (m < n) {
// Speed up already-sorted case by checking last element first
val next = a(i0 + m)
if (ord.compare(next, a(i0+m-1)) < 0) {
var iA = i0
var iB = i0 + m - 1
while (iB - iA > 1) {
val ix = (iA + iB) >>> 1 // Use bit shift to get unsigned div by 2
if (ord.compare(next, a(ix)) < 0) iB = ix
else iA = ix
}
val ix = iA + (if (ord.compare(next, a(iA)) < 0) 0 else 1)
var i = i0 + m
while (i > ix) {
a(i) = a(i-1)
i -= 1
}
a(ix) = next
}
m += 1
}
}
// Caller is required to pass iN >= i0, else math will fail. Also, i0 >= 0.
private def mergeSort[@specialized T: ClassTag](a: Array[T], i0: Int, iN: Int, ord: Ordering[T], scratch: Array[T] = null): Unit = {
if (iN - i0 < mergeThreshold) insertionSort(a, i0, iN, ord)
else {
val iK = (i0 + iN) >>> 1 // Bit shift equivalent to unsigned math, no overflow
val sc = if (scratch eq null) new Array[T](iK - i0) else scratch
mergeSort(a, i0, iK, ord, sc)
mergeSort(a, iK, iN, ord, sc)
mergeSorted(a, i0, iK, iN, ord, sc)
}
}
// Must have 0 <= i0 < iK < iN
private def mergeSorted[@specialized T](a: Array[T], i0: Int, iK: Int, iN: Int, ord: Ordering[T], scratch: Array[T]): Unit = {
// Check to make sure we're not already in order
if (ord.compare(a(iK-1), a(iK)) > 0) {
var i = i0
val jN = iK - i0
var j = 0
while (i < iK) {
scratch (j) = a(i)
i += 1
j += 1
}
var k = i0
j = 0
while (i < iN && j < jN) {
if (ord.compare(a(i), scratch(j)) < 0) { a(k) = a(i); i += 1 }
else { a(k) = scratch(j); j += 1 }
k += 1
}
while (j < jN) { a(k) = scratch(j); j += 1; k += 1 }
// Don't need to finish a(i) because it's already in place, k = i
}
}
// Why would you even do this?
private def booleanSort(a: Array[Boolean]): Unit = {
var i = 0
var n = 0
while (i < a.length) {
if (!a(i)) n += 1
i += 1
}
i = 0
while (i < n) {
a(i) = false
i += 1
}
while (i < a.length) {
a(i) = true
i += 1
}
}
// TODO: add upper bound: T <: AnyRef, propagate to callers below (not binary compatible)
// Maybe also rename all these methods to `sort`.
@inline private def sort[T](a: Array[T], ord: Ordering[T]): Unit = a match {
case _: Array[AnyRef] =>
// Note that runtime matches are covariant, so could actually be any Array[T] s.t. T is not primitive (even boxed value classes)
if (a.length > 1 && (ord eq null)) throw new NullPointerException("Ordering")
java.util.Arrays.sort(a, ord)
case a: Array[Int] => if (ord eq Ordering.Int) java.util.Arrays.sort(a) else mergeSort[Int](a, 0, a.length, ord)
case a: Array[Double] => mergeSort[Double](a, 0, a.length, ord) // Because not all NaNs are identical, stability is meaningful!
case a: Array[Long] => if (ord eq Ordering.Long) java.util.Arrays.sort(a) else mergeSort[Long](a, 0, a.length, ord)
case a: Array[Float] => mergeSort[Float](a, 0, a.length, ord) // Because not all NaNs are identical, stability is meaningful!
case a: Array[Char] => if (ord eq Ordering.Char) java.util.Arrays.sort(a) else mergeSort[Char](a, 0, a.length, ord)
case a: Array[Byte] => if (ord eq Ordering.Byte) java.util.Arrays.sort(a) else mergeSort[Byte](a, 0, a.length, ord)
case a: Array[Short] => if (ord eq Ordering.Short) java.util.Arrays.sort(a) else mergeSort[Short](a, 0, a.length, ord)
case a: Array[Boolean] => if (ord eq Ordering.Boolean) booleanSort(a) else mergeSort[Boolean](a, 0, a.length, ord)
// Array[Unit] is matched as an Array[AnyRef] due to covariance in runtime matching. Not worth catching it as a special case.
case null => throw new NullPointerException
}
/** Sort array `a` using the Ordering on its elements, preserving the original ordering where possible. Uses `java.util.Arrays.sort` unless `K` is a primitive type. */
def stableSort[K: Ordering](a: Array[K]): Unit = sort(a, Ordering[K])
// TODO: make this fast for primitive K (could be specialized if it didn't go through Ordering)
/** Sort array `a` using function `f` that computes the less-than relation for each element. Uses `java.util.Arrays.sort` unless `K` is a primitive type. */
def stableSort[K](a: Array[K], f: (K, K) => Boolean): Unit = sort(a, Ordering fromLessThan f)
/** A sorted Array, using the Ordering for the elements in the sequence `a`. Uses `java.util.Arrays.sort` unless `K` is a primitive type. */
def stableSort[K: ClassTag: Ordering](a: scala.collection.Seq[K]): Array[K] = {
val ret = a.toArray
sort(ret, Ordering[K])
ret
}
// TODO: make this fast for primitive K (could be specialized if it didn't go through Ordering)
/** A sorted Array, given a function `f` that computes the less-than relation for each item in the sequence `a`. Uses `java.util.Arrays.sort` unless `K` is a primitive type. */
def stableSort[K: ClassTag](a: scala.collection.Seq[K], f: (K, K) => Boolean): Array[K] = {
val ret = a.toArray
sort(ret, Ordering fromLessThan f)
ret
}
/** A sorted Array, given an extraction function `f` that returns an ordered key for each item in the sequence `a`. Uses `java.util.Arrays.sort` unless `K` is a primitive type. */
def stableSort[K: ClassTag, M: Ordering](a: scala.collection.Seq[K], f: K => M): Array[K] = {
val ret = a.toArray
sort(ret, Ordering[M] on f)
ret
}
}
|
rorygraves/perf_tester
|
corpus/scala-library/src/main/scala/util/Sorting.scala
|
Scala
|
apache-2.0
| 11,943 |
package ore.rest
import play.api.libs.json.Json.obj
import play.api.libs.json._
import ore.db.Model
import ore.models.api.ProjectApiKey
import ore.models.project._
/**
* Contains implicit JSON [[Writes]] for the Ore API.
*/
trait OreWrites {
implicit val projectApiKeyWrites: Writes[Model[ProjectApiKey]] = (key: Model[ProjectApiKey]) =>
obj(
"id" -> key.id.value,
"createdAt" -> key.createdAt.value,
"keyType" -> obj("id" -> 0, "name" -> "deployment"),
"projectId" -> key.projectId,
"value" -> key.value
)
implicit val pageWrites: Writes[Model[Page]] = (page: Model[Page]) =>
obj(
"id" -> page.id.value,
"createdAt" -> page.createdAt.toString,
"parentId" -> page.parentId,
"name" -> page.name,
"slug" -> page.slug
)
implicit val channelWrites: Writes[Channel] = (channel: Channel) =>
obj("name" -> channel.name, "color" -> channel.color.hex, "nonReviewed" -> channel.isNonReviewed)
implicit val tagWrites: Writes[Model[VersionTag]] = (tag: Model[VersionTag]) => {
obj(
"id" -> tag.id.value,
"name" -> tag.name,
"data" -> tag.data,
"backgroundColor" -> tag.color.background,
"foregroundColor" -> tag.color.foreground
)
}
implicit val tagColorWrites: Writes[TagColor] = (tagColor: TagColor) => {
obj(
"id" -> tagColor.value,
"backgroundColor" -> tagColor.background,
"foregroundColor" -> tagColor.foreground
)
}
}
object OreWrites extends OreWrites
|
SpongePowered/Ore
|
ore/app/ore/rest/OreWrites.scala
|
Scala
|
mit
| 1,597 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeInference.TypeInferenceTestBase
import org.junit.experimental.categories.Category
/**
* @author Alefas
* @since 25/03/16
*/
@Category(Array(classOf[PerfCycleTests]))
class AnonymousFunctionsTest extends TypeInferenceTestBase {
override def folderPath: String = super.folderPath + "bugs5/"
def testSCL8267(): Unit = doTest()
def testSCL9432(): Unit = doTest {
"""
|object SCL9432 {
| def f(int: Int): Option[Int] = if (int % 2 == 0) Some(int) else None
| def g(as: List[Int])(b: Int): Option[Int] = if (as contains b) None else f(b)
| /*start*/List(1) flatMap g(List(2, 4))/*end*/
|}
|//List[Int]
""".stripMargin.trim
}
}
|
jeantil/intellij-scala
|
test/org/jetbrains/plugins/scala/failed/typeInference/AnonymousFunctionsTest.scala
|
Scala
|
apache-2.0
| 837 |
package mot.protocol
import mot.buffer.WriteBuffer
import mot.buffer.ReadBuffer
/**
* Special frame type, which represents an unknown received frame. It is never written.
*/
case class UnknownFrame(messageType: Byte, length: Int) extends Frame {
def writeSpecific(writeBuffer: WriteBuffer) = throw new UnsupportedOperationException
def dump() = ""
}
object UnknownFrame extends FrameFactory[UnknownFrame] {
def build(readBuffer: ReadBuffer, messageType: Byte, length: Int) = {
readBuffer.discard(length)
UnknownFrame(messageType, length)
}
}
|
marianobarrios/mot
|
src/main/scala/mot/protocol/UnknownFrame.scala
|
Scala
|
bsd-2-clause
| 568 |
package org.jetbrains.plugins.scala.conversion.copy
import java.awt.datatransfer.Transferable
import java.lang.Boolean
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.daemon.impl.CollectHighlightsUtil
import com.intellij.diagnostic.LogMessageEx
import com.intellij.openapi.diagnostic.{Attachment, Logger}
import com.intellij.openapi.editor.{Editor, RangeMarker}
import com.intellij.openapi.progress.util.AbstractProgressIndicatorBase
import com.intellij.openapi.progress.{ProcessCanceledException, ProgressManager}
import com.intellij.openapi.project.{DumbService, Project}
import com.intellij.openapi.ui.DialogWrapper
import com.intellij.openapi.util.{Ref, TextRange}
import com.intellij.psi._
import com.intellij.util.ExceptionUtil
import org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.dependency.Dependency
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.settings._
import scala.collection.JavaConversions._
import scala.util.control.Breaks._
/**
* Pavel Fatin
*/
class ScalaCopyPastePostProcessor extends SingularCopyPastePostProcessor[Associations] {
private val Log = Logger.getInstance(getClass)
private val Timeout = 3000L
protected def collectTransferableData0(file: PsiFile, editor: Editor,
startOffsets: Array[Int], endOffsets: Array[Int]): Associations = {
if (DumbService.getInstance(file.getProject).isDumb) return null
if(!file.isInstanceOf[ScalaFile]) return null
val timeBound = System.currentTimeMillis + Timeout
var associations: List[Association] = Nil
try {
ProgressManager.getInstance().runProcess(new Runnable {
override def run(): Unit = {
breakable {
for ((startOffset, endOffset) <- startOffsets.zip(endOffsets);
element <- getElementsStrictlyInRange(file, startOffset, endOffset);
reference <- element.asOptionOf[ScReferenceElement];
dependency <- Dependency.dependencyFor(reference) if dependency.isExternal;
range = dependency.source.getTextRange.shiftRight(-startOffset)) {
if (System.currentTimeMillis > timeBound) {
Log.warn("Time-out while collecting dependencies in %s:\\n%s".format(
file.getName, file.getText.substring(startOffset, endOffset)))
break()
}
associations ::= Association(dependency.kind, range, dependency.path)
}
}
}
}, new AbstractProgressIndicatorBase {
override def isCanceled: scala.Boolean = {
System.currentTimeMillis > timeBound || super.isCanceled
}
})
} catch {
case p: ProcessCanceledException =>
Log.warn("Time-out while collecting dependencies in %s:\\n%s".format(
file.getName, file.getText.substring(startOffsets(0), endOffsets(0))))
case e: Exception =>
val selections = (startOffsets, endOffsets).zipped.map((a, b) => file.getText.substring(a, b))
val attachments = selections.zipWithIndex.map(p => new Attachment(s"Selection-${p._2 + 1}.scala", p._1))
Log.error(LogMessageEx.createEvent(e.getMessage, ExceptionUtil.getThrowableText(e), attachments: _*))
}
new Associations(associations.reverse)
}
protected def extractTransferableData0(content: Transferable) = {
content.isDataFlavorSupported(Associations.Flavor)
.ifTrue(content.getTransferData(Associations.Flavor).asInstanceOf[Associations])
.orNull
}
protected def processTransferableData0(project: Project, editor: Editor, bounds: RangeMarker,
caretColumn: Int, indented: Ref[Boolean], value: Associations) {
if (DumbService.getInstance(project).isDumb) return
if (ScalaApplicationSettings.getInstance().ADD_IMPORTS_ON_PASTE == CodeInsightSettings.NO) return
val file = PsiDocumentManager.getInstance(project).getPsiFile(editor.getDocument)
if (!file.isInstanceOf[ScalaFile]) return
PsiDocumentManager.getInstance(project).commitAllDocuments()
val offset = bounds.getStartOffset
doRestoreAssociations(value, file, offset, project) { bindingsToRestore =>
if (ScalaApplicationSettings.getInstance().ADD_IMPORTS_ON_PASTE == CodeInsightSettings.ASK) {
val dialog = new RestoreReferencesDialog(project, bindingsToRestore.map(_.path.toOption.getOrElse("")).sorted.toArray)
dialog.show()
val selectedPahts = dialog.getSelectedElements
if (dialog.getExitCode == DialogWrapper.OK_EXIT_CODE)
bindingsToRestore.filter(it => selectedPahts.contains(it.path))
else
Seq.empty
} else {
bindingsToRestore
}
}
}
def restoreAssociations(value: Associations, file: PsiFile, offset: Int, project: Project) {
doRestoreAssociations(value, file, offset, project)(identity)
}
private def doRestoreAssociations(value: Associations, file: PsiFile, offset: Int, project: Project)
(filter: Seq[Binding] => Seq[Binding]) {
val bindings =
(for {
association <- value.associations
element <- elementFor(association, file, offset)
if !association.isSatisfiedIn(element)
} yield Binding(element, association.path.asString(ScalaCodeStyleSettings.getInstance(project).
isImportMembersUsingUnderScore))).filter {
case Binding(_, path) =>
val index = path.lastIndexOf('.')
index != -1 && !Set("scala", "java.lang", "scala.Predef").contains(path.substring(0, index))
}
if (bindings.isEmpty) return
val bindingsToRestore = filter(bindings.distinctBy(_.path))
if (bindingsToRestore.isEmpty) return
inWriteAction {
for (Binding(ref, path) <- bindingsToRestore;
holder = ScalaImportTypeFix.getImportHolder(ref, file.getProject))
holder.addImportForPath(path, ref)
}
}
private def elementFor(dependency: Association, file: PsiFile, offset: Int): Option[PsiElement] = {
val range = dependency.range.shiftRight(offset)
for(ref <- Option(file.findElementAt(range.getStartOffset));
parent <- ref.parent if parent.getTextRange == range) yield parent
}
private def getElementsStrictlyInRange(file: PsiFile, startOffset: Int, endOffset: Int): Seq[PsiElement] = {
val range = TextRange.create(startOffset, endOffset)
CollectHighlightsUtil.getElementsInRange(file, startOffset, endOffset)
.filter(e => range.contains(e.getTextRange))
}
private case class Binding(element: PsiElement, path: String)
}
|
katejim/intellij-scala
|
src/org/jetbrains/plugins/scala/conversion/copy/ScalaCopyPastePostProcessor.scala
|
Scala
|
apache-2.0
| 6,946 |
package scalan.primitives
import scalan.{ScalanDslExp, BaseTests}
class RewriteSuite extends BaseTests {
val scalan = new ScalanDslExp
import scalan._
// TODO will be a describe block
{
val rand1 = random(5)
val rand2 = random(5)
test("A random should be equal to itself") {
(rand1 === rand1) shouldEqual toRep(true)
}
test("Different randoms shouldn't be equal") {
(rand1 === rand2) should not be a[Const[_]]
}
}
test("Constants should propagate") {
(toRep(4) + 5 > toRep(1) * 3) shouldEqual toRep(true)
}
test("One-sided constant propagation") {
val x = fresh[Boolean]
(x && true) shouldEqual x
(toRep(false) || x) shouldEqual x
(x === false) shouldEqual !x
val num = fresh[Double]
(num * 1.0) shouldEqual num
(toRep(0.0) * num) shouldEqual toRep(0.0)
}
}
|
scalan/scalan
|
core/src/test/scala/scalan/primitives/RewriteSuite.scala
|
Scala
|
apache-2.0
| 856 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.concurrent.atomic.AtomicLong
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext.Implicits.global
import org.apache.spark.{ComplexFutureAction, FutureAction, Logging}
/**
* A set of asynchronous RDD actions available through an implicit conversion.
* Import `org.apache.spark.SparkContext._` at the top of your program to use these functions.
*/
class AsyncRDDActions[T: ClassManifest](self: RDD[T]) extends Serializable with Logging {
/**
* Returns a future for counting the number of elements in the RDD.
*/
def countAsync(): FutureAction[Long] = {
val totalCount = new AtomicLong
self.context.submitJob(
self,
(iter: Iterator[T]) => {
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
},
Range(0, self.partitions.size),
(index: Int, data: Long) => totalCount.addAndGet(data),
totalCount.get())
}
/**
* Returns a future for retrieving all elements of this RDD.
*/
def collectAsync(): FutureAction[Seq[T]] = {
val results = new Array[Array[T]](self.partitions.size)
self.context.submitJob[T, Array[T], Seq[T]](self, _.toArray, Range(0, self.partitions.size),
(index, data) => results(index) = data, results.flatten.toSeq)
}
/**
* Returns a future for retrieving the first num elements of the RDD.
*/
def takeAsync(num: Int): FutureAction[Seq[T]] = {
val f = new ComplexFutureAction[Seq[T]]
f.run {
val results = new ArrayBuffer[T](num)
val totalParts = self.partitions.length
var partsScanned = 0
while (results.size < num && partsScanned < totalParts) {
// The number of partitions to try in this iteration. It is ok for this number to be
// greater than totalParts because we actually cap it at totalParts in runJob.
var numPartsToTry = 1
if (partsScanned > 0) {
// If we didn't find any rows after the first iteration, just try all partitions next.
// Otherwise, interpolate the number of partitions we need to try, but overestimate it
// by 50%.
if (results.size == 0) {
numPartsToTry = totalParts - 1
} else {
numPartsToTry = (1.5 * num * partsScanned / results.size).toInt
}
}
numPartsToTry = math.max(0, numPartsToTry) // guard against negative num of partitions
val left = num - results.size
val p = partsScanned until math.min(partsScanned + numPartsToTry, totalParts)
val buf = new Array[Array[T]](p.size)
f.runJob(self,
(it: Iterator[T]) => it.take(left).toArray,
p,
(index: Int, data: Array[T]) => buf(index) = data,
Unit)
buf.foreach(results ++= _.take(num - results.size))
partsScanned += numPartsToTry
}
results.toSeq
}
f
}
/**
* Applies a function f to all elements of this RDD.
*/
def foreachAsync(f: T => Unit): FutureAction[Unit] = {
self.context.submitJob[T, Unit, Unit](self, _.foreach(f), Range(0, self.partitions.size),
(index, data) => Unit, Unit)
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartitionAsync(f: Iterator[T] => Unit): FutureAction[Unit] = {
self.context.submitJob[T, Unit, Unit](self, f, Range(0, self.partitions.size),
(index, data) => Unit, Unit)
}
}
|
windeye/spark
|
core/src/main/scala/org/apache/spark/rdd/AsyncRDDActions.scala
|
Scala
|
apache-2.0
| 4,306 |
import sbt._
import Keys._
/**
* This object includes the publishing mechanism. We simply publish to the ``artifacts`` directory,
* which Jenkins build uses to automatically push the built artefacts to Artifactory.
*/
object PublishSettings {
lazy val publishSettings: Seq[Def.Setting[_]] = Seq(
publishArtifact in (Compile, packageDoc) := false,
publishMavenStyle := true,
pomIncludeRepository := { _ => false },
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
pomExtra := (
<url>https://github.com/eigengo</url>
<licenses>
<license>
<name>BSD-style</name>
<url>http://www.opensource.org/licenses/bsd-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>[email protected]:eigengo/lift.git</url>
<connection>scm:git:[email protected]:eigengo/lift.git</connection>
</scm>
<developers>
<developer>
<id>janm399</id>
<name>Jan Machacek</name>
<url>http://www.eigengo.com</url>
</developer>
</developers>),
credentials ++= (for {
username <- Option(System.getenv().get("SONATYPE_USERNAME"))
password <- Option(System.getenv().get("SONATYPE_PASSWORD"))
} yield Credentials("Sonatype Nexus Repository Manager", "oss.sonatype.org", username, password)).toSeq
)
}
|
imace/open-muvr
|
server/project/PublishSettings.scala
|
Scala
|
apache-2.0
| 1,588 |
package io.github.kender.spray.eureka.client
import org.json4s.jackson.Serialization
import org.json4s.{NoTypeHints, DefaultFormats, Formats}
import spray.httpx.Json4sJacksonSupport
import scala.concurrent._
import akka.actor.ActorSystem
import spray.client.pipelining._
import spray.http._
import io.github.kender.spray.eureka.{Port, Registration, DataCenterInfo, InstanceInfo}
import org.slf4j.LoggerFactory
/**
* A client for managing a service instance with Eureka
* @param config EurekaConfig
* @param actorSystem ActorSystem
*/
class InstanceClient(config: EurekaConfig)(implicit actorSystem: ActorSystem) extends Json4sJacksonSupport {
val instanceUrl = s"${config.serverUrl}/v2/apps/${config.instance.appId}"
import actorSystem.dispatcher
import io.github.kender.spray.eureka.client.Loggable._
override implicit def json4sJacksonFormats: Formats =
EurekaSerialization.Implicits.eurekaFormats
type InstanceId = String
val logger = LoggerFactory.getLogger(classOf[InstanceClient])
private implicit object RequestLogger extends Loggable[HttpRequest] {
override def asLogMessage(it: HttpRequest): String = s"httpRequest $it"
}
private def pipeline: HttpRequest ⇒ Future[HttpResponse] = {
sendReceive
}
/**
* de-register the instance from Eureka
* @return A future which completes when after the call is complete.
*/
def deRegister(): Future[Unit] = {
pipeline(Delete(instanceUrl)).map(_ ⇒ Unit)
}
/**
* register the instance with Eureka
* @return A future containing the instance id which completes when after the call is complete.
*/
def register(status: String = "UP", dataCenterInfo: DataCenterInfo = DataCenterInfo.myOwn): Future[InstanceId] = {
logger.info("registering instance: {}", config.instance.appId)
val instance = InstanceInfo(
config.instance.hostName,
config.instance.appId,
config.instance.ipAddress,
config.instance.vipAddress,
config.instance.secureVipAddress,
status,
Some(Port(config.instance.port.toString)),
Port(config.instance.securePort.toString),
config.instance.homePageUrl,
config.instance.statusPageUrl,
config.instance.healthCheckUrl,
dataCenterInfo
)
pipeline(debugIt(logger) {
Post(
instanceUrl,
Registration(instance))
}) map { _ ⇒ dataCenterInfo.metadata.map(_.`instance-id`) getOrElse config.instance.hostName }
}
}
|
kender/spray-eureka-client
|
src/main/scala/io/github/kender/spray/eureka/client/InstanceClient.scala
|
Scala
|
mit
| 2,461 |
package monocle.std
import monocle.{Iso, Prism}
import monocle.catssupport.Implicits._
import cats.instances.list._
import cats.instances.option._
import cats.syntax.traverse._
object string extends StringOptics
trait StringOptics {
val stringToList: Iso[String, List[Char]] =
Iso((_: String).toList)(_.mkString)
val stringToBoolean: Prism[String, Boolean] =
Prism{s: String => parseCaseSensitiveBoolean(s)}(_.toString)
val stringToLong: Prism[String, Long] =
Prism(parseLong)(_.toString)
val stringToInt: Prism[String, Int] =
stringToLong composePrism long.longToInt
val stringToByte: Prism[String, Byte] =
stringToLong composePrism long.longToByte
private def parseLong(s: String): Option[Long] = {
// we reject cases where String will be an invalid Prism according 2nd Prism law
// * String starts with +
// * String starts with 0 and has multiple digits
def inputBreaksPrismLaws(input: String): Boolean =
s.isEmpty || s.startsWith("+") || (s.startsWith("0") && s.length > 1)
if (inputBreaksPrismLaws(s)) None
else s.toList match {
case '-' :: xs => parseLongUnsigned(xs).map(-_)
case xs => parseLongUnsigned(xs)
}
}
private def parseLongUnsigned(s: List[Char]): Option[Long] =
if(s.isEmpty) None
else s.traverse(charToDigit).map(_.foldl(0L)(n => d => n * 10 + d))
private def charToDigit(c: Char): Option[Int] =
if (c >= '0' && c <= '9') Some(c - '0')
else None
private def parseCaseSensitiveBoolean(stringBoolean: String): Option[Boolean] = stringBoolean match {
case "true" => Some(true)
case "false" => Some(false)
case _ => None
}
}
|
fkz/Monocle
|
core/shared/src/main/scala/monocle/std/String.scala
|
Scala
|
mit
| 1,679 |
package commons
package object scala {
case class Config(notificationEnabled: Boolean)
case class User(firstName: String, lastName: String) {
def fullName: String = s"$firstName $lastName"
}
val config = Config(notificationEnabled = true)
val currentUser = User("Jane", "Doe")
def notifyUser(user: User, message: String): Unit = println(s"Notified ${user.fullName} with [$message]")
}
|
naderghanbari/scala-vs-java
|
src/main/scala/commons/scala/package.scala
|
Scala
|
apache-2.0
| 407 |
package com.twitter.util.jackson.caseclass
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
/**
* "Serde" (technically "SerDe") is a portmanteau of "Serialization and Deserialization"
*
* Mix this trait into a case class to get helpful logging methods.
* This trait adds a `@JsonIgnoreProperties` for fields which are
* defined in the [[com.twitter.util.logging.Logging]] trait so that
* they are not included in JSON serde operations.
*
*/
@JsonIgnoreProperties(
Array(
"logger_name",
"trace_enabled",
"debug_enabled",
"error_enabled",
"info_enabled",
"warn_enabled"
)
)
trait SerdeLogging extends com.twitter.util.logging.Logging
|
twitter/util
|
util-jackson/src/main/scala/com/twitter/util/jackson/caseclass/SerdeLogging.scala
|
Scala
|
apache-2.0
| 682 |
/*
* Copyright (C) 2011 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.sampling
import org.openmole.core.context.{ PrototypeSet, Val, Variable }
import org.openmole.core.expansion.{ FromContext, Validate }
/**
* An explicit sampling associates a prototype to an explicit set of values given through an iterable.
* @param prototype Val to be sampled
* @param data Iterable[T] explicit values of the sampling
* @tparam T type of the Val
*/
object ExplicitSampling {
implicit def isSampling[T]: IsSampling[ExplicitSampling[T]] = s ⇒
Sampling(
s.data.map { v ⇒ List(Variable(s.prototype, v)) }.iterator,
Seq(s.prototype)
)
}
case class ExplicitSampling[T](prototype: Val[T], data: Iterable[T])
|
openmole/openmole
|
openmole/core/org.openmole.core.workflow/src/main/scala/org/openmole/core/workflow/sampling/ExplicitSampling.scala
|
Scala
|
agpl-3.0
| 1,406 |
package com.github.romangrebennikov.columnize.protocol.cql.types
import java.nio.ByteBuffer
/**
* Created by shutty on 10/12/15.
*/
case object DoubleType extends CQL.Type {
def deserialize(raw:ByteBuffer) = DoubleValue(raw)
}
case class DoubleValue(data:Double) extends CQL.Value
object DoubleValue {
def apply(raw:ByteBuffer) = new DoubleValue(raw.getDouble)
}
|
shuttie/columnize
|
src/main/scala/com/github/romangrebennikov/columnize/protocol/cql/types/DoubleType.scala
|
Scala
|
bsd-2-clause
| 371 |
package scala.lms
package common
import internal._
import scala.reflect.SourceContext
/**
* This trait automatically lifts any concrete instance to a representation.
*/
trait LiftAll extends Base {
protected implicit def __unit[T:Manifest](x: T) = unit(x)
}
/**
* The Base trait defines the type constructor Rep, which is the higher-kinded type that allows for other DSL types to be
* polymorphically embedded.
*
* @since 0.1
*/
trait Base extends EmbeddedControls {
type API <: Base
type Rep[+T]
protected def unit[T:Manifest](x: T): Rep[T]
// always lift Unit and Null (for now)
implicit def unitToRepUnit(x: Unit) = unit(x)
implicit def nullToRepNull(x: Null) = unit(x)
}
/**
* This trait sets the representation to be based on AST Expression nodes.
*
* @since 0.1
*/
trait BaseExp extends Base with Expressions with Blocks with Transforming {
type Rep[+T] = Exp[T]
protected def unit[T:Manifest](x: T) = Const(x)
}
trait BlockExp extends BaseExp with Blocks
trait EffectExp extends BaseExp with Effects {
def mapOver(t: Transformer, u: Summary) = { // TODO: move to effects class?
u.copy(mayRead = t.onlySyms(u.mayRead), mstRead = t.onlySyms(u.mstRead),
mayWrite = t.onlySyms(u.mayWrite), mstWrite = t.onlySyms(u.mstWrite))
}
override def mirrorDef[A:Manifest](e: Def[A], f: Transformer)(implicit pos: SourceContext): Def[A] = e match {
case Reflect(x, u, es) => Reflect(mirrorDef(x,f), mapOver(f,u), f(es))
case Reify(x, u, es) => Reify(f(x), mapOver(f,u), f(es))
case _ => super.mirrorDef(e,f)
}
override def mirror[A:Manifest](e: Def[A], f: Transformer)(implicit pos: SourceContext): Exp[A] = e match {
case Reflect(x, u, es) => reflectMirrored(mirrorDef(e,f).asInstanceOf[Reflect[A]])
case Reify(x, u, es) => Reify(f(x), mapOver(f,u), f(es))
case _ => super.mirror(e,f)
}
}
trait BaseFatExp extends BaseExp with FatExpressions with FatTransforming
// The traits below provide an interface to codegen so that clients do
// not need to depend on internal._
trait ScalaGenBase extends ScalaCodegen
trait ScalaGenEffect extends ScalaNestedCodegen with ScalaGenBase
trait ScalaGenFat extends ScalaFatCodegen with ScalaGenBase
trait CLikeGenBase extends CLikeCodegen
trait CLikeGenEffect extends CLikeNestedCodegen with CLikeGenBase
trait CLikeGenFat extends CLikeFatCodegen with CLikeGenBase
trait GPUGenBase extends GPUCodegen
trait GPUGenEffect extends GPUGenBase with CLikeNestedCodegen
trait GPUGenFat extends GPUGenBase with CLikeFatCodegen
trait CudaGenBase extends CudaCodegen
trait CudaGenEffect extends CudaNestedCodegen with CudaGenBase
trait CudaGenFat extends CudaFatCodegen with CudaGenBase
trait OpenCLGenBase extends OpenCLCodegen
trait OpenCLGenEffect extends OpenCLNestedCodegen with OpenCLGenBase
trait OpenCLGenFat extends OpenCLFatCodegen with OpenCLGenBase
trait CGenBase extends CCodegen
trait CGenEffect extends CNestedCodegen with CGenBase
trait CGenFat extends CFatCodegen with CGenBase
|
scalan/virtualization-lms-core
|
src/common/Base.scala
|
Scala
|
bsd-3-clause
| 3,022 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.finders
import org.scalatest.WordSpec
class WordSpecFinderSuite extends FinderSuite {
test("WordSpecFinder should find test name for tests written in test suite that extends org.scalatest.FeatureSpec, using should and in") {
class TestingWordSpec1 extends WordSpec {
"A Stack" should {
"pop values in last-in-first-out order" in {
}
"throw NoSuchElementException if an empty stack is popped" in {
println("nested")
}
}
}
val suiteClass = classOf[TestingWordSpec1]
val finders = LocationUtils.getFinders(suiteClass)
assert(finders.size == 1, "org.scalatest.WordSpec should have 1 finder, but we got: " + finders.size)
val finder = finders.get(0)
assert(finder.getClass == classOf[WordSpecFinder], "Suite that uses org.scalatest.WordSpec should use WordSpecFinder.")
val classDef = new ClassDefinition(suiteClass.getName, null, Array.empty, "TestingWordSpec1")
val constructorBlock = new ConstructorBlock(suiteClass.getName, classDef, Array.empty)
val aStack = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "A Stack"), constructorBlock, Array.empty, "should", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val popValuesInLifo = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "pop values in last-in-first-out order"), aStack, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val throwNsee = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throw NoSuchElementException if an empty stack is popped"), aStack, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val nested = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "{Predef}"), throwNsee, Array.empty, "println", new StringLiteral(suiteClass.getName, null, "nested"))
List[AstNode](constructorBlock, aStack, popValuesInLifo, throwNsee, nested).foreach(_.parent)
val aStackSelection = finder.find(aStack)
expectSelection(aStackSelection, suiteClass.getName, "A Stack", Array("A Stack should pop values in last-in-first-out order", "A Stack should throw NoSuchElementException if an empty stack is popped"))
val popValuesInLifoSelection = finder.find(popValuesInLifo)
expectSelection(popValuesInLifoSelection, suiteClass.getName, "A Stack should pop values in last-in-first-out order", Array("A Stack should pop values in last-in-first-out order"))
val throwNseeSelection = finder.find(throwNsee)
expectSelection(throwNseeSelection, suiteClass.getName, "A Stack should throw NoSuchElementException if an empty stack is popped", Array("A Stack should throw NoSuchElementException if an empty stack is popped"))
val nestedSelection = finder.find(nested)
expectSelection(nestedSelection, suiteClass.getName, "A Stack should throw NoSuchElementException if an empty stack is popped", Array("A Stack should throw NoSuchElementException if an empty stack is popped"))
}
test("WordSpecFinder should find test name for tests written in test suite that extends org.scalatest.FeatureSpec, using must and in") {
class TestingWordSpec1 extends WordSpec {
"A Stack" must {
"pop values in last-in-first-out order" in {
}
"throw NoSuchElementException if an empty stack is popped" in {
println("nested")
}
}
}
val suiteClass = classOf[TestingWordSpec1]
val finders = LocationUtils.getFinders(suiteClass)
assert(finders.size == 1, "org.scalatest.WordSpec should have 1 finder, but we got: " + finders.size)
val finder = finders.get(0)
assert(finder.getClass == classOf[WordSpecFinder], "Suite that uses org.scalatest.WordSpec should use WordSpecFinder.")
val classDef = new ClassDefinition(suiteClass.getName, null, Array.empty, "TestingWordSpec1")
val constructorBlock = new ConstructorBlock(suiteClass.getName, classDef, Array.empty)
val aStack = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "A Stack"), constructorBlock, Array.empty, "must", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val popValuesInLifo = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "pop values in last-in-first-out order"), aStack, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val throwNsee = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throw NoSuchElementException if an empty stack is popped"), aStack, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val nested = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "{Predef}"), throwNsee, Array.empty, "println", new StringLiteral(suiteClass.getName, null, "nested"))
List[AstNode](constructorBlock, aStack, popValuesInLifo, throwNsee, nested).foreach(_.parent)
val aStackSelection = finder.find(aStack)
expectSelection(aStackSelection, suiteClass.getName, "A Stack", Array("A Stack must pop values in last-in-first-out order", "A Stack must throw NoSuchElementException if an empty stack is popped"))
val popValuesInLifoSelection = finder.find(popValuesInLifo)
expectSelection(popValuesInLifoSelection, suiteClass.getName, "A Stack must pop values in last-in-first-out order", Array("A Stack must pop values in last-in-first-out order"))
val throwNseeSelection = finder.find(throwNsee)
expectSelection(throwNseeSelection, suiteClass.getName, "A Stack must throw NoSuchElementException if an empty stack is popped", Array("A Stack must throw NoSuchElementException if an empty stack is popped"))
val nestedSelection = finder.find(nested)
expectSelection(nestedSelection, suiteClass.getName, "A Stack must throw NoSuchElementException if an empty stack is popped", Array("A Stack must throw NoSuchElementException if an empty stack is popped"))
}
test("WordSpecFinder should find test name for tests written in test suite that extends org.scalatest.FeatureSpec, using when, must, can and in") {
class TestingWordSpec2 extends WordSpec {
"A Stack" when {
"empty" must {
"be empty" in {
}
"complain on peek" in {
println("nested")
}
"complain on pop" in {
}
}
"full" can {
"be full" in {
}
"complain on push" in {
}
}
}
}
val suiteClass = classOf[TestingWordSpec2]
val finders = LocationUtils.getFinders(suiteClass)
assert(finders.size == 1, "org.scalatest.WordSpec should have 1 finder, but we got: " + finders.size)
val finder = finders.get(0)
assert(finder.getClass == classOf[WordSpecFinder], "Suite that uses org.scalatest.WordSpec should use WordSpecFinder.")
val classDef = new ClassDefinition(suiteClass.getName, null, Array.empty, "TestingWordSpec2")
val constructorBlock = new ConstructorBlock(suiteClass.getName, classDef, Array.empty)
val aStack = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "A Stack"), constructorBlock, Array.empty, "when", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val empty = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "empty"), aStack, Array.empty, "must", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val beEmpty = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "be empty"), empty, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val complainOnPeek = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "complain on peek"), empty, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val nested = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "{Predef}"), complainOnPeek, Array.empty, "println", new StringLiteral(suiteClass.getName, null, "nested"))
val complainOnPop = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "complain on pop"), empty, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val full = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "full"), aStack, Array.empty, "can", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val beFull = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "be full"), full, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val complainOnPush = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "complain on push"), full, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
List[AstNode](constructorBlock, aStack, empty, beEmpty, complainOnPeek, nested, complainOnPop, full, beFull, complainOnPush).foreach(_.parent)
val aStackSelection = finder.find(aStack)
expectSelection(aStackSelection, suiteClass.getName, "A Stack", Array("A Stack when empty must be empty", "A Stack when empty must complain on peek", "A Stack when empty must complain on pop",
"A Stack when full can be full", "A Stack when full can complain on push"))
val emptySelection = finder.find(empty)
expectSelection(emptySelection, suiteClass.getName, "A Stack when empty", Array("A Stack when empty must be empty", "A Stack when empty must complain on peek", "A Stack when empty must complain on pop"))
val beEmptySelection = finder.find(beEmpty)
expectSelection(beEmptySelection, suiteClass.getName, "A Stack when empty must be empty", Array("A Stack when empty must be empty"))
val complainOnPeekSelection = finder.find(complainOnPeek)
expectSelection(complainOnPeekSelection, suiteClass.getName, "A Stack when empty must complain on peek", Array("A Stack when empty must complain on peek"))
val nestedSelection = finder.find(nested)
expectSelection(nestedSelection, suiteClass.getName, "A Stack when empty must complain on peek", Array("A Stack when empty must complain on peek"))
val complainOnPopSelection = finder.find(complainOnPop)
expectSelection(complainOnPopSelection, suiteClass.getName, "A Stack when empty must complain on pop", Array("A Stack when empty must complain on pop"))
val fullSelection = finder.find(full)
expectSelection(fullSelection, suiteClass.getName, "A Stack when full", Array("A Stack when full can be full", "A Stack when full can complain on push"))
val beFullSelection = finder.find(beFull)
expectSelection(beFullSelection, suiteClass.getName, "A Stack when full can be full", Array("A Stack when full can be full"))
val complainOnPushSelection = finder.find(complainOnPush)
expectSelection(complainOnPushSelection, suiteClass.getName, "A Stack when full can complain on push", Array("A Stack when full can complain on push"))
}
test("WordSpecFinder should find test name for tests written in test suite that extends org.scalatest.FeatureSpec, using should, which and in") {
class TestingWordSpec3 extends WordSpec {
"The ScalaTest Matchers DSL" should {
"provide an and operator" that {
"returns silently when evaluating true and true" in {}
"throws a TestFailedException when evaluating true and false" in {}
"throws a TestFailedException when evaluating false and true" in {
println("nested")
}
"throws a TestFailedException when evaluating false and false" in {}
}
"provide an or operator" that { // we'll use 'which' in the DSL below.
"returns silently when evaluating true or true" in {}
"returns silently when evaluating true or false" in {}
"returns silently when evaluating false or true" in {}
"throws a TestFailedException when evaluating false or false" in {}
}
}
}
val suiteClass = classOf[TestingWordSpec3]
val finders = LocationUtils.getFinders(suiteClass)
assert(finders.size == 1, "org.scalatest.WordSpec should have 1 finder, but we got: " + finders.size)
val finder = finders.get(0)
assert(finder.getClass == classOf[WordSpecFinder], "Suite that uses org.scalatest.WordSpec should use WordSpecFinder.")
val classDef = new ClassDefinition(suiteClass.getName, null, Array.empty, "TestingWordSpec3")
val constructorBlock = new ConstructorBlock(suiteClass.getName, classDef, Array.empty)
val scalaTestDsl = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "The ScalaTest Matchers DSL"), constructorBlock, Array.empty, "should", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val provideAndOpr = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "provide an and operator"), scalaTestDsl, Array.empty, "that", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val andSilentTrueTrue = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "returns silently when evaluating true and true"), provideAndOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val andThrowTfeTrueFalse = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throws a TestFailedException when evaluating true and false"), provideAndOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val andThrowTfeFalseTrue = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throws a TestFailedException when evaluating false and true"), provideAndOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val nested = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "${Predef}"), andThrowTfeFalseTrue, Array.empty, "println", new StringLiteral(suiteClass.getName, null, "nested"))
val andThrowTfeFalseFalse = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throws a TestFailedException when evaluating false and false"), provideAndOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val provideOrOpr = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "provide an or operator"), scalaTestDsl, Array.empty, "which", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val orSilentTrueTrue = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "returns silently when evaluating true or true"), provideOrOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val orSilentTrueFalse = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "returns silently when evaluating true or false"), provideOrOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val orSilentFalseTrue = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "returns silently when evaluating false or true"), provideOrOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
val orSilentFalseFalse = new MethodInvocation(suiteClass.getName, new ToStringTarget(suiteClass.getName, null, Array.empty, "throws a TestFailedException when evaluating false or false"), provideOrOpr, Array.empty, "in", new ToStringTarget(suiteClass.getName, null, Array.empty, "{}"))
List[AstNode](constructorBlock, scalaTestDsl, provideAndOpr, andSilentTrueTrue, andThrowTfeTrueFalse, andThrowTfeFalseTrue,
nested, andThrowTfeFalseFalse, provideOrOpr, orSilentTrueTrue, orSilentTrueFalse, orSilentFalseTrue, orSilentFalseFalse).foreach(_.parent)
val scalaTestDslSelection = finder.find(scalaTestDsl)
expectSelection(scalaTestDslSelection, suiteClass.getName, "The ScalaTest Matchers DSL",
Array("The ScalaTest Matchers DSL should provide an and operator that returns silently when evaluating true and true",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating true and false",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and false",
"The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or true",
"The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or false",
"The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating false or true",
"The ScalaTest Matchers DSL should provide an or operator which throws a TestFailedException when evaluating false or false"))
val provideAndOprSelection = finder.find(provideAndOpr)
expectSelection(provideAndOprSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator",
Array("The ScalaTest Matchers DSL should provide an and operator that returns silently when evaluating true and true",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating true and false",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true",
"The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and false"))
val andSilentTrueTrueSelection = finder.find(andSilentTrueTrue)
expectSelection(andSilentTrueTrueSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator that returns silently when evaluating true and true",
Array("The ScalaTest Matchers DSL should provide an and operator that returns silently when evaluating true and true"))
val andThrowTfeTrueFalseSelection = finder.find(andThrowTfeTrueFalse)
expectSelection(andThrowTfeTrueFalseSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating true and false",
Array("The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating true and false"))
val andThrowTfeFalseTrueSelection = finder.find(andThrowTfeFalseTrue)
expectSelection(andThrowTfeFalseTrueSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true",
Array("The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true"))
val nestedSelection = finder.find(nested)
expectSelection(nestedSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true",
Array("The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and true"))
val andThrowTfeFalseFalseSelection = finder.find(andThrowTfeFalseFalse)
expectSelection(andThrowTfeFalseFalseSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and false",
Array("The ScalaTest Matchers DSL should provide an and operator that throws a TestFailedException when evaluating false and false"))
val provideOrOprSelection = finder.find(provideOrOpr)
expectSelection(provideOrOprSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an or operator",
Array("The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or true",
"The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or false",
"The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating false or true",
"The ScalaTest Matchers DSL should provide an or operator which throws a TestFailedException when evaluating false or false"))
val orSilentTrueTrueSelection = finder.find(orSilentTrueTrue)
expectSelection(orSilentTrueTrueSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or true",
Array("The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or true"))
val orSilentTrueFalseSelection = finder.find(orSilentTrueFalse)
expectSelection(orSilentTrueFalseSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or false",
Array("The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating true or false"))
val orSilentFalseTrueSelection = finder.find(orSilentFalseTrue)
expectSelection(orSilentFalseTrueSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating false or true",
Array("The ScalaTest Matchers DSL should provide an or operator which returns silently when evaluating false or true"))
val orSilentFalseFalseSelection = finder.find(orSilentFalseFalse)
expectSelection(orSilentFalseFalseSelection, suiteClass.getName, "The ScalaTest Matchers DSL should provide an or operator which throws a TestFailedException when evaluating false or false",
Array("The ScalaTest Matchers DSL should provide an or operator which throws a TestFailedException when evaluating false or false"))
}
}
|
scalatest/scalatest-finders
|
src/test/scala/org/scalatest/finders/WordSpecFinderSuite.scala
|
Scala
|
apache-2.0
| 24,201 |
package mesosphere.marathon
import java.util.concurrent.locks.ReentrantLock
import com.typesafe.config.Config
import scala.concurrent.Future
import scala.language.implicitConversions
package object util {
type Success[T] = scala.util.Success[T]
val Success = scala.util.Success
type Failure[T] = scala.util.Failure[T]
val Failure = scala.util.Failure
implicit def toRichFuture[T](f: Future[T]): RichFuture[T] = new RichFuture(f)
implicit def toRichLock[T](l: ReentrantLock): RichLock = new RichLock(l)
implicit def toRichConfig[T](c: Config): RichConfig = new RichConfig(c)
}
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/util/package.scala
|
Scala
|
apache-2.0
| 595 |
//
// PrettyPrint.scala -- Scala class PrettyPrint
// Project OrcScala
//
// Created by dkitchin on Jun 7, 2010.
//
// Copyright (c) 2017 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.ast.orctimizer.named
import scala.collection.mutable.{ HashMap, Map }
import orc.util.{ FragmentAppender, PrettyPrintInterpolator }
import orc.values.{ Field, Format }
/** Nicer printing for named OIL syntax trees.
*
* @author dkitchin, amp
*/
class PrettyPrint {
class MyPrettyPrintInterpolator extends PrettyPrintInterpolator {
implicit def implicitInterpolator(sc: StringContext) = new MyInterpolator(sc)
class MyInterpolator(sc: StringContext) extends Interpolator(sc) {
override val processValue: PartialFunction[Any, FragmentAppender] = {
case a: NamedAST =>
reduce(a)
}
}
}
val interpolator = new MyPrettyPrintInterpolator
import interpolator._
val vars: Map[BoundVar, String] = new HashMap()
var varCounter: Int = 0
def newVarName(): String = {
varCounter += 1
val s = "`t" + varCounter
//Logger.log(Level.FINE, s"Unnamed variable printed with name $s", new Exception(s).fillInStackTrace())
s
}
def lookup(temp: BoundVar) = vars.getOrElseUpdate(temp, newVarName())
val typevars: Map[BoundTypevar, String] = new HashMap()
var typevarCounter: Int = 0
def newTypevarName(): String = { typevarCounter += 1; "`T" + typevarCounter }
def lookup(temp: BoundTypevar) = typevars.getOrElseUpdate(temp, newVarName())
def commasep(l: Seq[NamedAST]): FragmentAppender = {
FragmentAppender.mkString(l.map(reduce), ", ")
}
def reduce(ast: NamedAST): FragmentAppender = {
val exprStr: FragmentAppender = ast match {
case Stop() => pp"stop"
case Call(target, args, typeargs) => {
val typePar = typeargs match {
case Some(ts) => pp"[${commasep(ts)}]"
case None => ""
}
pp"call $target$typePar(${commasep(args)})"
}
case GetMethod(o) => pp"method $o"
case left Parallel right => pp"($left | $right)"
case Branch(left, x, right) => pp"$left >$x>\\n$right"
case Trim(f) => pp"{| $f |}"
case Resolve(futures, e) => pp"resolve ${commasep(futures)} in $e"
case Future(f) => pp"future { $StartIndent$f$EndIndent }"
case Force(xs, vs, e) => pp"force ${commasep(xs)} = ${commasep(vs)} #\\n$e"
case left Otherwise right => pp"($left ; $right)"
case IfLenientMethod(a, l, r) => pp"iflenient $a then$StartIndent\\n$l$EndIndent\\nelse$StartIndent\\n$r$EndIndent"
case DeclareMethods(defs, body) => pp"-- group of ${defs.size} defs/sites\\n${FragmentAppender.mkString(defs.map(reduce))}\\n$body"
case Routine(f, formals, body, typeformals, argtypes, returntype) => {
val name = f.optionalVariableName.getOrElse(lookup(f))
val retT = returntype match {
case Some(t) => pp" :: t"
case None => ""
}
pp"""routine $name[${commasep(typeformals)}](${commasep(argtypes.getOrElse(Nil))})$retT
"routine $name(${commasep(formals)}) = $StartIndent$body$EndIndent
|"""
}
case Service(f, formals, body, typeformals, argtypes, returntype) => {
val name = f.optionalVariableName.getOrElse(lookup(f))
val retT = returntype match {
case Some(t) => pp" :: t"
case None => ""
}
pp"""service $name[${commasep(typeformals)}](${commasep(argtypes.getOrElse(Nil))})$retT
"service $name(${commasep(formals)}) = $StartIndent$body$EndIndent
|"""
}
case New(self, st, bindings, t) => {
def reduceField(f: (Field, FieldValue)) = {
val (name, expr) = f
pp"$name = $StartIndent$expr$EndIndent"
}
def fields = pp" #$StartIndent\\n${FragmentAppender.mkString(bindings.map(reduceField), " #\\n")}$EndIndent\\n"
pp"new ${t.map(reduce).getOrElse("")} { $self ${() => st.map(t => pp": $t").getOrElse(pp"")} ${() => if (bindings.nonEmpty) fields else pp""} }"
}
case FieldFuture(e) => pp"future{ $StartIndent$e$EndIndent }"
case FieldArgument(e) => reduce(e)
case HasType(body, expectedType) => pp"($body :: $expectedType)"
case DeclareType(u, t, body) => pp"type $u = $t\\n$body"
//case VtimeZone(timeOrder, body) => "VtimeZone($timeOrder, $body)"
case GetField(o, f) => pp"$o.${f.name}"
case Constant(v) => FragmentAppender(Format.formatValue(v))
case (x: BoundVar) => FragmentAppender(x.optionalVariableName.getOrElse(lookup(x)))
case UnboundVar(s) => pp"?$s"
case u: BoundTypevar => FragmentAppender(u.optionalVariableName.getOrElse(lookup(u)))
case UnboundTypevar(s) => pp"?$s"
case Top() => pp"Top"
case Bot() => pp"Bot"
case FunctionType(typeformals, argtypes, returntype) => {
pp"lambda[${commasep(typeformals)}](${commasep(argtypes)}) :: $returntype"
}
case TupleType(elements) => pp"(${commasep(elements)})"
case TypeApplication(tycon, typeactuals) => pp"$tycon[${commasep(typeactuals)}]"
case AssertedType(assertedType) => pp"$assertedType!"
case TypeAbstraction(typeformals, t) => pp"[${commasep(typeformals)}]($t)"
case ImportedType(classname) => FragmentAppender(classname)
case ClassType(classname) => FragmentAppender(classname)
case VariantType(_, typeformals, variants) => {
val variantSeq =
for ((name, variant) <- variants) yield {
pp"$name(${commasep(variant)})"
}
pp"[${commasep(typeformals)}](${FragmentAppender.mkString(variantSeq, " | ")})"
}
case IntersectionType(a, b) => pp"$a & $b"
case UnionType(a, b) => pp"$a | $b"
case NominalType(t) => pp"nominal[$t]"
case RecordType(mems) => {
val m = FragmentAppender.mkString(mems.mapValues(reduce).map(p => pp"${p._1} :: ${p._2}"), " # ")
pp"{. $m .}"
}
case StructuralType(mems) => {
val m = FragmentAppender.mkString(mems.mapValues(reduce).map(p => pp"${p._1} :: ${p._2}"), " # ")
pp"{ $m }"
}
//case _ => "???"
}
exprStr
}
}
|
orc-lang/orc
|
OrcScala/src/orc/ast/orctimizer/named/PrettyPrint.scala
|
Scala
|
bsd-3-clause
| 6,418 |
package uk.gov.gds.ier.transaction.ordinary.postalVote
import uk.gov.gds.ier.step.StepTemplate
import uk.gov.gds.ier.transaction.ordinary.InprogressOrdinary
import uk.gov.gds.ier.model.PostalVoteOption
trait PostalVoteMustache extends StepTemplate[InprogressOrdinary] {
case class PostalVoteModel(
question: Question,
postCheckboxYes: Field,
postCheckboxNoAndVoteInPerson: Field,
postCheckboxNoAndAlreadyHave: Field,
deliveryByEmail: Field,
deliveryByPost: Field,
emailField: Field,
deliveryMethodValid: String
) extends MustacheData
val mustache = MultilingualTemplate("ordinary/postalVote") { implicit lang =>
(form, postUrl) =>
implicit val progressForm = form
val emailAddress = form(keys.contact.email.detail).value
val deliveryMethodValidation =
if (form(keys.postalVote.deliveryMethod.methodName).hasErrors) "invalid" else ""
PostalVoteModel(
question = Question(
postUrl = postUrl.url,
number = s"10 ${Messages("step_of")} 11",
title = Messages("ordinary_postalVote_title"),
errorMessages = Messages.translatedGlobalErrors(form)),
postCheckboxYes = RadioField(
key = keys.postalVote.optIn,
value = PostalVoteOption.Yes.name),
postCheckboxNoAndVoteInPerson = RadioField(
key = keys.postalVote.optIn,
value = PostalVoteOption.NoAndVoteInPerson.name),
postCheckboxNoAndAlreadyHave = RadioField(
key = keys.postalVote.optIn,
value = PostalVoteOption.NoAndAlreadyHave.name),
deliveryByEmail = RadioField(
key = keys.postalVote.deliveryMethod.methodName,
value = "email"),
deliveryByPost = RadioField(
key = keys.postalVote.deliveryMethod.methodName,
value = "post"),
emailField = TextField(
key = keys.postalVote.deliveryMethod.emailAddress,
default = emailAddress
),
deliveryMethodValid = deliveryMethodValidation
)
}
}
|
michaeldfallen/ier-frontend
|
app/uk/gov/gds/ier/transaction/ordinary/postalVote/PostalVoteMustache.scala
|
Scala
|
mit
| 1,987 |
package gg.uhc.hosts.endpoints.matches
import java.time.format.DateTimeFormatter
import java.time.temporal.ChronoUnit
import java.time.{Instant, ZoneOffset}
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives.{entity, _}
import akka.http.scaladsl.server._
import gg.uhc.hosts._
import gg.uhc.hosts.database.{Database, MatchRow}
import gg.uhc.hosts.endpoints.{BasicCache, CustomDirectives, EndpointRejectionHandler}
import doobie.free.connection.delay
import doobie._
import gg.uhc.hosts.endpoints.matches.websocket.MatchesWebsocket
import gg.uhc.hosts.endpoints.versions.Version
/**
* Creates a new Match object. Requires login + 'host' permission
*/
class CreateMatch(customDirectives: CustomDirectives, database: Database, cache: BasicCache, websocket: MatchesWebsocket) {
import Alerts._
import CustomJsonCodec._
import customDirectives._
case class CreateMatchPayload(
opens: Instant,
address: Option[String],
ip: Option[String],
scenarios: List[String],
tags: List[String],
teams: String,
size: Option[Int],
customStyle: Option[String],
count: Int,
content: String,
region: String,
location: String,
mainVersion: String,
version: String,
slots: Int,
length: Int,
mapSize: Int,
pvpEnabledAt: Int,
hostingName: Option[String],
tournament: Boolean)
// allowed regions
private[this] val regions = List("NA", "SA", "AS", "EU", "AF", "OC")
/**
* Converts the payload into an insertable MatchRow, does not validate any input
*/
private[this] def convertPayload(payload: CreateMatchPayload, author: String): Directive1[MatchRow] = {
var row = MatchRow(
address = payload.address,
content = payload.content,
count = payload.count,
customStyle = if (payload.teams == "custom") payload.customStyle else None, // remove if not custom
ip = payload.ip,
// Replace time with the UTC offset and set everything sub-minute accuracy to 0
opens = payload.opens.atOffset(ZoneOffset.UTC).withSecond(0).withNano(0).toInstant,
region = payload.region,
teams = payload.teams,
size = if (TeamStyles.byCode.get(payload.teams).exists(_.isInstanceOf[SizedTeamStyle])) payload.size else None, // remove size if not required
location = payload.location,
mainVersion = payload.mainVersion,
version = payload.version,
slots = payload.slots,
length = payload.length,
mapSize = payload.mapSize,
pvpEnabledAt = payload.pvpEnabledAt,
scenarios = payload.scenarios.distinctBy(_.toLowerCase), // removes duplicates
tags = payload.tags.distinctBy(_.toLowerCase), // removes duplicates
tournament = payload.tournament,
// non-user payload.vars below
id = -1,
created = Instant.now(),
author = author,
removed = false,
removedBy = None,
removedReason = None,
approvedBy = None,
hostingName = payload.hostingName.filter(!_.isEmpty)
)
// Automatically add the 'rush' scenario for games < 45 minutes long if it doesn't already have it and isn't a tournament
if (!row.tournament && row.length < 45 && !row.scenarios.exists(_.toLowerCase == "rush")) {
row = row.copy(scenarios = row.scenarios :+ "Rush")
}
provide(row)
}
private[this] def overhostCheck(row: MatchRow): Directive0 =
requireSucessfulQuery(
database.getPotentialConflicts(start = row.opens, end = row.opens, version = row.version, region = row.region)
) flatMap {
// Its valid if:
// - there are no conflicts
// - this is a non-tournament and conflicts are all tournaments
case conflicts if conflicts.isEmpty =>
pass
case conflicts if !row.tournament && conflicts.forall(_.tournament) =>
pass
case conflicts =>
val hours = row.opens.atOffset(ZoneOffset.UTC).format(DateTimeFormatter.ofPattern("HH:mm"))
// Try to find a non-tournament to tell, otherwise just give whatever was returned first
val best = conflicts.find(!_.tournament).getOrElse(conflicts.head)
reject(ValidationRejection(s"Conflicts with /u/${best.author}'s #${best.count} (${best.region} - $hours) in ${best.mainVersion}"))
}
private[this] def optionalValidate[T](data: Option[T], message: String)(p: T => Boolean) =
data
.map { item =>
validate(p(item), message)
}
.getOrElse(pass)
private[this] def ipChecks(row: MatchRow): Directive0 = {
// treat empty strings as non-provided
val valIp = row.ip.filter(_.nonEmpty)
val valAddress = row.address.filter(_.nonEmpty)
if (valIp.isEmpty && valAddress.isEmpty)
reject(ValidationRejection("Either an IP or an address must be provided (or both)"))
val ipCheck = optionalValidate(valIp, "Invalid IP supplied, expected format 111.222.333.444[:55555]") { ip =>
"""^(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})(?::(\\d{1,5}))?$""".r.findFirstMatchIn(ip).exists { m =>
val octets = (1 to 4).map(m.group(_).toInt).forall(i => i >= 0 && i <= 255)
val port = Option(m.group(5)).map(_.toInt)
octets && (port.isEmpty || port.exists(p => p <= 65535 && p >= 1))
}
}
val addressCheck = optionalValidate(valAddress.map(_.trim), "Address must be at least 5 chars") { address =>
address.length >= 5
}
ipCheck & addressCheck
}
/**
* Runs full validation of input payload including DB calls for overhost protection. Rejects with ValidationRejection
* if something fails, otherwise payload.the validated MatchRow ready for inserting into the DB
*/
private[this] def validateRow(row: MatchRow): Directive0 =
validate(
row.opens.isAfter(Instant.now().plus(30, ChronoUnit.MINUTES)),
"Must be at least 30 minutes in advance"
) &
validate(row.opens.isBefore(Instant.now().plus(30, ChronoUnit.DAYS)), "Must be at most 30 days in advance") &
validate(
row.opens.atOffset(ZoneOffset.UTC).getMinute % 15 == 0,
"Minutes must be on exactly xx:00 xx:15 xx:30 or xx:45 in an hour (UTC)"
) &
ipChecks(row) &
validate(row.location.nonEmpty, "Must supply a location") &
validate(
Version.options.exists(v => v.displayName == row.mainVersion),
s"Invalid main version, expected one of: ${Version.options.map(v => v.displayName).mkString(", ")}"
) &
validate(row.version.nonEmpty, "Must supply a version") &
validate(row.slots >= 2, "Slots must be at least 2") &
validate(row.length >= 30, "Matches must be at least 30 minutes") &
validate(row.mapSize > 0, "Map size must be positive") &
validate(row.pvpEnabledAt >= 0, "PVP enabled at must be positive") &
validate(row.scenarios.nonEmpty, "Must supply at least 1 scenario") &
validate(row.scenarios.length <= 25, "Must supply at most 25 scenarios") &
validate(row.tags.length <= 5, "Must supply at most 5 tags") &
validate(TeamStyles.byCode.contains(row.teams), "Unknown team style") &
validate(row.content.nonEmpty, "Must provide some post content") &
validate(regions.contains(row.region), "Invalid region supplied") &
validate(
// either doesn't require size or size is within range
!TeamStyles.byCode(row.teams).isInstanceOf[SizedTeamStyle]
|| row.size.exists(size => size >= 0 && size <= 32767),
"Invalid value for size"
) &
validate(
row.teams != "custom" || row.customStyle.exists(_.nonEmpty),
"A custom style must be given when 'custom' is picked"
) &
validate(row.count >= 1, "Count must be at least 1") &
overhostCheck(row)
private[this] def createMatchAndAlerts(row: MatchRow): ConnectionIO[MatchRow] =
for {
id <- database.insertMatch(row)
allRules <- database.getAllAlertRules()
matchedRules = allRules.filter { _.matchesRow(row) }
addedAlertCount <- matchedRules.foldLeft(delay(0)) { (prev, rule) => // reduce to run each in series, one for each alert
prev.flatMap { count =>
database.createAlert(matchId = id, triggeredRuleId = rule.id).map(_ + count)
}
}
} yield row.copy(id = id)
def apply(): Route =
handleRejections(EndpointRejectionHandler()) {
requireAuthentication { session =>
requireAtLeastOnePermission("host" :: "trial host" :: Nil, session.username) {
// parse the entity
entity(as[CreateMatchPayload]) { entity =>
convertPayload(entity, session.username) { row =>
validateRow(row) {
requireSucessfulQuery(createMatchAndAlerts(row)) { inserted =>
cache.invalidateUpcomingMatches()
websocket.notifyMatchCreated(inserted)
complete(StatusCodes.Created -> inserted)
}
}
}
}
}
}
}
}
|
Eluinhost/hosts.uhc.gg
|
src/main/scala/gg/uhc/hosts/endpoints/matches/CreateMatch.scala
|
Scala
|
mit
| 9,031 |
package regolic.lp
import regolic.algebra.{Rational, Vector, Matrix}
import org.scalatest.FunSuite
import Simplex.{Optimal, Infeasible, Unbounded}
class SimplexSuite extends FunSuite {
def r(n: BigInt) = Rational(n)
def r(n: BigInt, d: BigInt) = Rational(n, d)
def s2t(str: String, basis: List[Int]): Tableau = {
val rows: Array[Array[Rational]] = (str.split(',').map(s => s.split(' ').map(n => Rational(n))))
val reducedCost = new Vector(rows(0).init)
val value = rows(0).last
val matrix = new Matrix(rows.tail)
val systemMatrix = matrix.subMatrix(0, matrix.nbRow, 0, matrix.nbCol - 1)
val basisSolution = Vector(matrix.subMatrix(0, matrix.nbRow, matrix.nbCol - 1, 1))
new Tableau(basis, systemMatrix, reducedCost, basisSolution, value)
}
def s2m(str: String): Matrix[Rational] = new Matrix(str.split(',').map(s => s.split(' ').map(n => Rational(n))))
def s2v(str: String): Vector[Rational] = new Vector(str.split(' ').map(n => Rational(n)))
val c1 = s2v("1 1 1 0")
val b1 = s2v("3 2 5 1")
val A1 = s2m("1 2 3 0," +
"-1 2 6 0," +
"0 4 9 0," +
"0 0 3 1")
val lp1 = new StandardLinearProgram(c1, A1, b1)
val c2 = s2v("1 0")
val b2 = s2v("2 1")
val A2 = s2m("1 1," +
"1 0")
val lp2 = new StandardLinearProgram(c2, A2, b2)
val c3 = s2v("1 0")
val b3 = s2v("2 1")
val A3 = s2m("1 1," +
"1 1")
val lp3 = new StandardLinearProgram(c3, A3, b3)
val c4 = s2v("-1 0")
val b4 = s2v("2 0")
val A4 = s2m("1 -1," +
"0 0")
val lp4 = new StandardLinearProgram(c4, A4, b4)
test("phaseOne") {
}
test("phaseTwo") {
}
test("apply") {
assert(Simplex(lp1) === Optimal(s2v("1/2 5/4 0 1")))
assert(Simplex(lp2) === Optimal(s2v("1 1")))
assert(Simplex(lp3) === Infeasible)
assert(Simplex(lp4) === Unbounded)
}
}
|
regb/scabolic
|
src/test/scala/regolic/lp/SimplexSuite.scala
|
Scala
|
mit
| 1,894 |
package kr.scala.experiments.tests.akka.examples.chap03
import akka.actor.{Props, Actor}
import akka.event.Logging
import org.scalatest.FunSuite
/**
* MyActor
* Created by debop on 2014. 3. 3.
*/
class MyActor(val name: String = "actor") extends Actor {
def this() {
this("actor")
}
val log = Logging(context.system, this)
override def receive: Actor.Receive = {
case "test" => log.info("receive test")
case _ => log.info("received unknown message")
}
}
class Chap03Test extends FunSuite {
implicit val akkaSystem = akka.actor.ActorSystem()
test("props example") {
val props1 = Props[MyActor]
val props3 = Props(classOf[MyActor], "my actor")
}
test("props with args") {
akkaSystem.actorOf(DemoActor.props("hello"))
}
}
object DemoActor {
def props(name: String): Props = Props(classOf[DemoActor], name)
}
class DemoActor(name: String) extends Actor {
override def receive: Actor.Receive = {
case x => // some behavior
}
}
|
debop/scala-experiments
|
src/test/scala/kr/scala/experiments/tests/akka/examples/chap03/MyActor.scala
|
Scala
|
apache-2.0
| 1,045 |
package com.getjenny.starchat.resources
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import akka.pattern.CircuitBreaker
import com.getjenny.starchat.entities.io.{Permissions, ReturnMessageData}
import com.getjenny.starchat.routing.{StarChatCircuitBreaker, StarChatResource}
import com.getjenny.starchat.services.ClusterNodesService
import scala.util.{Failure, Success}
/**
* Created by Angelo Leto <[email protected]> on 29/01/19.
*/
trait ClusterNodesResource extends StarChatResource {
private[this] val clusterNodesService: ClusterNodesService.type = ClusterNodesService
private[this] val routeName: String = """cluster_node"""
def clusterNodesRoutes: Route = handleExceptions(routesExceptionHandler) {
pathPrefix(routeName) {
pathEnd {
get {
authenticateBasicAsync(realm = authRealm,
authenticator = authenticator.authenticator) { user =>
authorizeAsync(_ =>
authenticator.hasPermissions(user, "admin", Permissions.read)) {
extractRequest { request =>
val breaker: CircuitBreaker = StarChatCircuitBreaker.getCircuitBreaker()
onCompleteWithBreakerFuture(breaker)(clusterNodesService.aliveNodes) {
case Success(t) =>
completeResponse(StatusCodes.OK, StatusCodes.BadRequest, t)
case Failure(e) =>
log.error(logTemplate(user.id, "", "clusterNodesRoutes", request.method, request.uri), e)
completeResponse(StatusCodes.BadRequest,
Option {
ReturnMessageData(code = 100, message = e.getMessage)
})
}
}
}
}
}
}
} ~
pathPrefix(routeName) {
get {
path(Segment) { uuid: String =>
authenticateBasicAsync(realm = authRealm,
authenticator = authenticator.authenticator) { user =>
authorizeAsync(_ =>
authenticator.hasPermissions(user, "admin", Permissions.read)) {
extractMethod { method =>
val breaker: CircuitBreaker = StarChatCircuitBreaker.getCircuitBreaker()
onCompleteWithBreakerFuture(breaker)(clusterNodesService.isAlive(uuid)) {
case Success(t) =>
completeResponse(StatusCodes.OK, StatusCodes.BadRequest, t)
case Failure(e) =>
log.error(s"Node($uuid) route=clusterNodesRoutes method=$method: ", e.getMessage)
completeResponse(StatusCodes.BadRequest,
Option {
ReturnMessageData(code = 100, message = e.getMessage)
})
}
}
}
}
}
} ~
post {
authenticateBasicAsync(realm = authRealm, authenticator = authenticator.authenticator) { user =>
authorizeAsync(_ =>
authenticator.hasPermissions(user, "admin", Permissions.read)) {
extractMethod { method =>
val breaker: CircuitBreaker = StarChatCircuitBreaker.getCircuitBreaker()
onCompleteWithBreakerFuture(breaker)(clusterNodesService.alive) {
case Success(t) =>
completeResponse(StatusCodes.OK, StatusCodes.BadRequest, t)
case Failure(e) =>
log.error(s"Node(${clusterNodesService.uuid}) route=clusterNodesRoutes method=$method: ", e.getMessage)
completeResponse(StatusCodes.BadRequest,
Option {
ReturnMessageData(code = 101, message = e.getMessage)
})
}
}
}
}
} ~
delete {
authenticateBasicAsync(realm = authRealm,
authenticator = authenticator.authenticator) { user =>
authorizeAsync(_ =>
authenticator.hasPermissions(user, "admin", Permissions.read)) {
extractMethod { method =>
val breaker: CircuitBreaker = StarChatCircuitBreaker.getCircuitBreaker()
onCompleteWithBreakerFuture(breaker)(clusterNodesService.cleanDeadNodes) {
case Success(t) =>
completeResponse(StatusCodes.OK, StatusCodes.BadRequest, t)
case Failure(e) =>
log.error(s"Node(${clusterNodesService.uuid}) route=clusterNodesRoutes method=$method: ", e.getMessage)
completeResponse(StatusCodes.BadRequest,
Option {
ReturnMessageData(code = 102, message = e.getMessage)
})
}
}
}
}
}
}
}
}
|
GetJenny/starchat
|
src/main/scala/com/getjenny/starchat/resources/ClusterNodesResource.scala
|
Scala
|
gpl-2.0
| 5,013 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.coders
import com.spotify.scio.{FeatureFlag, MacroSettings, MagnoliaMacros}
import scala.reflect.macros._
private[coders] object CoderMacros {
private[this] var verbose = true
private[this] val reported: scala.collection.mutable.Set[(String, String)] =
scala.collection.mutable.Set.empty
private[this] val BlacklistedTypes = List("org.apache.beam.sdk.values.Row")
private[this] val Warnings =
Map(
"org.apache.avro.generic.GenericRecord" ->
"""
|Using a fallback coder for Avro's GenericRecord is discouraged as it is VERY inefficient.
|It is highly recommended to define a proper Coder[GenericRecord] using:
|
| Coder.avroGenericRecordCoder(schema)
""".stripMargin
)
def issueFallbackWarning[T: c.WeakTypeTag](
c: whitebox.Context
)(lp: c.Expr[shapeless.LowPriority]): c.Tree = {
import c.universe._
val show = MacroSettings.showCoderFallback(c) == FeatureFlag.Enable
val wtt = weakTypeOf[T]
val TypeRef(_, sym, args) = wtt
val typeName = sym.name
val params = args.headOption
.map(_ => args.mkString("[", ",", "]"))
.getOrElse("")
val fullType = s"$typeName$params"
val toReport = c.enclosingPosition.toString -> wtt.toString
val alreadyReported = reported.contains(toReport)
if (!alreadyReported) reported += toReport
val shortMessage =
s"""
| Warning: No implicit Coder found for the following type:
|
| >> $wtt
|
| using Kryo fallback instead.
"""
val longMessage =
shortMessage +
s"""
|
| Scio will use a fallback Kryo coder instead.
|
| If a type is not supported, consider implementing your own implicit Coder for this type.
| It is recommended to declare this Coder in your class companion object:
|
| object $typeName {
| import com.spotify.scio.coders.Coder
| import org.apache.beam.sdk.coders.AtomicCoder
|
| implicit def coder$typeName: Coder[$fullType] =
| Coder.beam(new AtomicCoder[$fullType] {
| def decode(in: InputStream): $fullType = ???
| def encode(ts: $fullType, out: OutputStream): Unit = ???
| })
| }
|
| If you do want to use a Kryo coder, be explicit about it:
|
| implicit def coder$typeName: Coder[$fullType] = Coder.kryo[$fullType]
|
| Additional info at:
| - https://spotify.github.io/scio/internals/Coders
|
"""
val fallback = q"""_root_.com.spotify.scio.coders.Coder.kryo[$wtt]"""
(verbose, alreadyReported) match {
case _ if BlacklistedTypes.contains(wtt.toString) =>
val msg =
s"Can't use a Kryo coder for $wtt. You need to explicitly set the Coder for this type"
c.abort(c.enclosingPosition, msg)
case _ if Warnings.contains(wtt.toString) =>
c.echo(c.enclosingPosition, Warnings(wtt.toString))
fallback
case (false, false) =>
if (show) c.echo(c.enclosingPosition, shortMessage.stripMargin)
fallback
case (true, false) =>
if (show) c.echo(c.enclosingPosition, longMessage.stripMargin)
verbose = false
fallback
case (_, _) =>
fallback
}
}
// Add a level of indirection to prevent the macro from capturing
// $outer which would make the Coder serialization fail
def wrappedCoder[T: c.WeakTypeTag](c: whitebox.Context): c.Tree = {
import c.universe._
val wtt = weakTypeOf[T]
val imp = c.openImplicits match {
case Nil => None
case _ => companionImplicit(c)(wtt)
}
imp.map(_ => EmptyTree).getOrElse {
// Magnolia does not support classes with a private constructor.
// Workaround the limitation by using a fallback in that case
privateConstructor(c)(wtt).fold(MagnoliaMacros.genWithoutAnnotations[T](c)) { _ =>
q"_root_.com.spotify.scio.coders.Coder.fallback[$wtt](null)"
}
}
}
private[this] def companionImplicit(c: whitebox.Context)(tpe: c.Type): Option[c.Symbol] = {
import c.universe._
val tp = c.typecheck(tq"_root_.com.spotify.scio.coders.Coder[$tpe]", c.TYPEmode).tpe
tpe.companion.members.iterator.filter(_.isImplicit).find(_.info.resultType =:= tp)
}
private[this] def privateConstructor(c: whitebox.Context)(tpe: c.Type): Option[c.Symbol] =
tpe.decls.find(m => m.isConstructor && m.isPrivate)
}
|
spotify/scio
|
scio-macros/src/main/scala/com/spotify/scio/coders/CoderMacros.scala
|
Scala
|
apache-2.0
| 5,215 |
/*
* Copyright (c) 2015 Robert Conrad - All Rights Reserved.
* Unauthorized copying of this file, via any medium is strictly prohibited.
* This file is proprietary and confidential.
* Last modified by rconrad, 1/1/15 5:04 PM
*/
package base.entity.auth.context
import base.entity.auth.AuthRoles.AuthRole
import base.entity.auth.AuthTypes.AuthType
import base.entity.auth.context.AuthContext.ExceptionStrings
import base.entity.auth.{ AuthRoles, AuthTypes }
import base.entity.perm.{ PermException, PermSetGroup, PermSetGroups }
import base.entity.test.EntityBaseSuite
import base.entity.user.UserDataFactory
/**
* Tests that AuthContext leaves (case classes inheriting ultimately from AuthContext) have the correct attributes and
* inheritance structure.
* @author rconrad
*/
// scalastyle:off line.size.limit
class AuthContextTest extends EntityBaseSuite {
// user rows
private lazy val superUser = UserDataFactory.makeRow()
private def interceptPermException(criteria: Boolean, f: => Unit, msg: String) {
criteria match {
case true => f
case false =>
val e = intercept[PermException](f)
assert(e.getMessage == msg)
}
}
// scalastyle:off parameter.number
// scalastyle:off method.length
// scalastyle:off cyclomatic.complexity
private def assertContext(
ctx: AuthContext,
authRole: AuthRole,
authType: AuthType,
authTypeId: Option[Long],
perms: PermSetGroup) {
val isUser = authType == AuthTypes.USER
val isKey = authType == AuthTypes.KEY
val hasUser = isUser
perms.permSet.set.foreach(perm => assert(ctx.has(perm)))
assert(ctx.perms == perms)
assert(ctx.isInstanceOf[UserAuthContext] == isUser)
assert(ctx.isInstanceOf[KeyAuthContext] == isKey)
assert(ctx.isUser == isUser)
interceptPermException(isUser, ctx.assertIsUser(), ExceptionStrings.assertIsUser)
assert(ctx.hasUser == hasUser)
interceptPermException(hasUser, ctx.assertHasUser(), ExceptionStrings.userThrows)
interceptPermException(!hasUser, ctx.assertHasNoUser(), ExceptionStrings.assertHasNoUser)
if (!hasUser) {
assert(ctx.user == None)
}
interceptPermException(hasUser, ctx.userThrows, ExceptionStrings.userThrows)
assert(ctx.authRole == authRole)
assert(ctx.authType == authType)
assert(ctx.authTypeId == authTypeId)
}
// scalastyle:on parameter.number
// scalastyle:on method.length
// scalastyle:on cyclomatic.complexity
private implicit def authType2Option(authType: AuthType) = Option(authType)
private implicit def authId2Option(authId: Long) = Option(authId)
test("UserAuthContext") {
assertContext(
AuthContextDataFactory.emptyUserAuth,
AuthRoles.PUBLIC,
AuthTypes.USER,
superUser.id,
PermSetGroups.god)
}
test("KeyAuthContext") {
assertContext(
AuthContextDataFactory.emptyKeyAuth,
AuthRoles.PUBLIC,
AuthTypes.KEY,
superUser.id,
PermSetGroups.god)
}
ignore("any other auth context") {
fail("not implemented")
}
}
|
robconrad/base-api
|
project-entity/src/test/scala/base/entity/auth/context/AuthContextTest.scala
|
Scala
|
mit
| 3,067 |
package fpinscala.testing
import fpinscala.laziness.Stream
import fpinscala.state._
import fpinscala.parallelism._
import fpinscala.parallelism.Par.Par
import Gen._
import Prop._
import java.util.concurrent.{Executors,ExecutorService}
/*
The library developed in this chapter goes through several iterations. This file is just the
shell, which you can fill in and modify while working through the chapter.
*/
object Result {
private type FailedCase = String
private type SuccessCount = Int
sealed trait T
case object Passed extends T
case object Proved extends T
case class Falsified(
failure: FailedCase,
successes: SuccessCount
) extends T
def isFalsified(t: T): Boolean =
t match {
case Passed => false
case Proved => false
case Falsified(_, _) => true
}
}
object Prop {
import Result.{ Proved, Passed, Falsified }
private type TestCases = Int
private type MaxSize = Int
case class T(run: (MaxSize, TestCases, RNG) => Result.T)
private def randomStream[A](g: Gen[A])(rng: RNG): Stream[A] =
Stream.unfold(rng)(rng => Option(g.sample.run(rng)))
private def buildMsg[A](s: A, e: Exception): String =
s"test case: $s\\n" +
s"generated an exception: ${e.getMessage}\\n" +
s"stack trace:\\n ${e.getStackTrace.mkString("\\n")}"
def forAll[A](g: SGen.T[A])(f: A => Boolean): T =
forAll(g.forSize(_))(f)
def forAll[A](g: Int => Gen[A])(f: A => Boolean): T =
T { (max, n, rng) =>
val casesPerSize = (n + (max - 1)) / max
val props =
Stream.from(0).take(n.min(max) + 1).map(i => forAll(g(i))(f))
val prop =
props.map { p =>
T { (max, _, rng) => p.run(max, casesPerSize, rng) }
}.toList.reduce(and)
prop.run(max, n, rng)
}
def forAll[A](as: Gen[A])(f: A => Boolean): T =
T { (_, n, rng) =>
Stream
.zipWith(randomStream(as)(rng), Stream.from(0)) { case (a, b) =>
a -> b
}.take(n)
.map { case (a, i) =>
try {
if (f(a)) Passed else Falsified(a.toString, i)
} catch {
case e: Exception => Falsified(buildMsg(a, e), i)
}
}.find(Result.isFalsified(_)).getOrElse(Passed)
}
def check(p: => Boolean): T =
T { (_, _, _) => if (p) Proved else Falsified("()", 0) }
val S =
Gen.weighted(
Gen.choose(1, 4).map(Executors.newFixedThreadPool) -> 0.75,
Gen.unit(Executors.newCachedThreadPool) -> 0.25
)
def forAllPar[A](g: Gen[A])(f: A => Par[Boolean]): T =
forAll(S ** g) { case (s, a) => f(a)(s).get }
def and(p1: T, p2: T): T =
T { (max, n, rng) =>
val r1 = p1.run(max, n, rng)
val r2 = p2.run(max, n, rng)
(r1, r2) match {
case (Proved, Proved) => Proved
case (Passed, Passed) => Passed
case (Passed, Proved) => Passed
case (Proved, Passed) => Passed
case (Proved, Falsified(_, _)) => r2
case (Passed, Falsified(_, _)) => r2
case (Falsified(_, _), _) => r1
}
}
def or(p1: T, p2: T): T =
T { (max, n, rng) =>
val r1 = p1.run(max, n, rng)
val r2 = p2.run(max, n, rng)
(r1, r2) match {
case (Proved, _) => Proved
case (_, Proved) => Proved
case (Passed, _) => Passed
case (_, Passed) => Passed
case (_, _) => r1
}
}
def run(
t: T,
maxSize: Int = 100,
testCases: Int = 100,
rng: RNG = RNG.Simple(System.currentTimeMillis)
): Unit =
t.run(maxSize, testCases, rng) match {
case Falsified(msg, n) =>
println(s"! Falsified after $n passed tests:\\n $msg")
case Passed => println(s"+ OK, passed $testCases tests.")
case Proved => println(s"+ OK, proved property.")
}
}
object Gen {
def unit[A](a: => A): Gen[A] = Gen(State.unit(a))
def boolean: Gen[Boolean] =
choose(0, 2).map {
case 0 => false
case 1 => true
}
def listOfN[A](n: Int, g: Gen[A]): Gen[List[A]] =
Gen(State.sequence(List.fill(n)(g.sample)))
def choose(start: Int, stopExclusive: Int): Gen[Int] = {
def betweenStartStop(rng: RNG): (Int, RNG) = {
val (i, rng2) = rng.nextInt
if (start <= i && i < stopExclusive) i -> rng2
else betweenStartStop(rng2)
}
val sample = State(betweenStartStop)
Gen(sample)
}
def union[A](g1: Gen[A], g2: Gen[A]): Gen[A] =
g1.flatMap { a1 =>
g2.flatMap { a2 =>
boolean.flatMap { chooseA1 => unit(if (chooseA1) a1 else a2) }
}
}
def weighted
[A]
(g1: (Gen[A], Double), g2: (Gen[A], Double)):
Gen[A] = {
val genDouble = Gen(State(RNG.double))
val (g1Abs, g2Abs) = (g1._2.abs, g2._2.abs)
val g1Prob = g1Abs / (g1Abs + g2Abs)
g1._1.flatMap { a1 =>
g2._1.flatMap { a2 =>
genDouble.flatMap { d => unit(if (d >= g1Prob) a1 else a2) }
}
}
}
def unsized[A](gen: Gen[A]): SGen.T[A] = SGen.T { sz => gen }
}
case class Gen[A](sample: State[RNG, A]) {
def map[B](f: A => B): Gen[B] = Gen(sample.map(f))
def flatMap[B](f: A => Gen[B]): Gen[B] =
Gen(sample.flatMap(f(_).sample))
def map2[B, C](g: Gen[B])(f: (A, B) => C): Gen[C] =
Gen(sample.map2(g.sample)(f))
def listOfNDynamic(size: Gen[Int]): Gen[List[A]] =
size.flatMap(listOfN(_, this))
def **[B](g: Gen[B]): Gen[(A, B)] = map2(g)(_ -> _)
}
object SGen {
case class T[A](forSize: Int => Gen[A])
def map[A, B](t: T[A])(f: A => B): T[B] =
T(t.forSize andThen (_.map(f)))
def flatMap[A, B](t: T[A])(f: A => Gen[B]): T[B] =
T(t.forSize andThen (_.flatMap(f)))
def unit[A](a: => A): T[A] = Gen.unsized(Gen.unit(a))
def listOf[A](g: Gen[A]): T[List[A]] = T(Gen.listOfN(_, g))
def listOf1[A](g: Gen[A]): T[List[A]] =
T { sz => Gen.listOfN(sz.max(1), g) }
}
object ListProps {
private val smallInt = Gen.choose(-10, 10)
val maxProp =
Prop.forAll(SGen.listOf(smallInt)) { ns =>
val max = ns.max
!ns.exists(_ > max)
}
val maxProp1 =
Prop.forAll(SGen.listOf1(smallInt)) { ns =>
val max = ns.max
!ns.exists(_ > max)
}
val sortedProp =
Prop.forAll(SGen.listOf(smallInt)) { ns =>
val nsSorted = ns.sorted
ns.length == nsSorted.length &&
(ns.isEmpty ||
nsSorted.tail.isEmpty ||
!nsSorted
.zip(nsSorted.tail)
.exists { case (a, b) => a > b }
) &&
ns.toSet == nsSorted.toSet
}
private def isEven(n: Int) = n % 2 == 0
private val genListInt = SGen.listOf(smallInt)
val takeWhileProp = {
Prop.forAll(genListInt) { ns =>
ns.takeWhile(_ => true) == ns &&
ns.takeWhile(_ => false) == List.empty &&
ns.takeWhile(isEven).forall(isEven)
}
}
val takeWhileDropWhileProp = {
Prop.forAll(genListInt) { ns =>
val partitionedLists = ns.partition(isEven)
partitionedLists == ns.takeWhile(isEven) -> ns.dropWhile(isEven)
}
}
}
object ParProps {
def equal[A](p1: Par[A], p2: Par[A]): Par[Boolean] =
Par.map2(p1, p2) { _ == _ }
def checkPar(p: => Par[Boolean]): Prop.T =
Prop.forAllPar(Gen.unit(()))(_ => p)
val pInt = Gen.choose(0, 10).map(Par.unit(_))
val pIntPar = {
val genInt = Gen.choose(0, 10)
genInt.listOfNDynamic(genInt).map { xs =>
xs.foldLeft(Par.unit(0)) { (p, i) =>
Par.fork { Par.map2(p, Par.unit(i))(_ + _) }
}
}
}
val p2 = checkPar(equal(Par.map(Par.unit(1))(_ + 1), Par.unit(2)))
val p4 = Prop.forAllPar(pInt) { n => equal(Par.map(n)(identity), n) }
val pFork = Prop.forAllPar(pInt) { n => equal(n, Par.fork(n)) }
}
|
yawaramin/fpinscala
|
exercises/src/main/scala/fpinscala/testing/Gen.scala
|
Scala
|
mit
| 7,629 |
package chat.tox.antox.tox
import java.util
import android.app.{Notification, PendingIntent}
import android.content.{Context, Intent}
import android.preference.PreferenceManager
import android.support.v4.app.{NotificationCompat, TaskStackBuilder}
import android.util.Log
import chat.tox.antox.R
import chat.tox.antox.activities.MainActivity
import chat.tox.antox.data.State
import chat.tox.antox.utils.Constants
import chat.tox.antox.wrapper.MessageType.MessageType
import chat.tox.antox.wrapper.{MessageType, ToxKey}
import scala.collection.JavaConverters._
object MessageHelper {
val TAG = "chat.tox.antox.tox.MessageHelper"
def handleMessage(ctx: Context, friendNumber: Int, friendKey: ToxKey, message: String, messageType: MessageType): Unit = {
val db = State.db
val friendName = db.getContactNameOrAlias(friendKey)
Log.d(TAG, "friend id: " + friendKey + " activeKey: " + State.activeKey + " chatActive: " + State.chatActive)
if (!db.isContactBlocked(friendKey)) {
val chatActive = State.chatActive && State.activeKey.contains(friendKey)
db.addMessage(-1, friendKey, friendName, message, has_been_received = true,
has_been_read = chatActive, successfully_sent = true, messageType)
val preferences = PreferenceManager.getDefaultSharedPreferences(ctx)
if (preferences.getBoolean("notifications_enable_notifications", true) &&
preferences.getBoolean("notifications_new_message", true)) {
if (!chatActive) {
val mName = ToxSingleton.getAntoxFriend(friendKey).map(_.getName)
mName.foreach(name => {
val mBuilder = new NotificationCompat.Builder(ctx).setSmallIcon(R.drawable.ic_actionbar)
.setContentTitle(name)
.setContentText(message)
.setDefaults(Notification.DEFAULT_ALL)
val resultIntent = new Intent(ctx, classOf[MainActivity])
resultIntent.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP | Intent.FLAG_ACTIVITY_CLEAR_TOP)
resultIntent.setAction(Constants.SWITCH_TO_FRIEND)
resultIntent.putExtra("key", friendKey.toString)
resultIntent.putExtra("name", name)
val stackBuilder = TaskStackBuilder.create(ctx)
stackBuilder.addParentStack(classOf[MainActivity])
stackBuilder.addNextIntent(resultIntent)
val resultPendingIntent = stackBuilder.getPendingIntent(0, PendingIntent.FLAG_UPDATE_CURRENT)
mBuilder.setContentIntent(resultPendingIntent)
ToxSingleton.mNotificationManager.notify(friendNumber, mBuilder.build())
})
}
}
}
}
def handleGroupMessage(ctx: Context, groupNumber: Int, peerNumber: Int, groupKey: ToxKey, message: String, messageType: MessageType) = {
val db = State.db
val peerName = ToxSingleton.getGroupPeer(groupNumber, peerNumber).name
val chatActive = State.chatActive && State.activeKey.contains(groupKey)
db.addMessage(-1, groupKey, peerName, message, has_been_received = true,
has_been_read = chatActive, successfully_sent = true, messageType)
val preferences = PreferenceManager.getDefaultSharedPreferences(ctx)
val notificationsEnabled = preferences.getBoolean("notifications_enable_notifications", true) &&
preferences.getBoolean("notifications_new_message", true)
if (!chatActive && notificationsEnabled) {
val groupName = ToxSingleton.getGroup(groupNumber).name
val mBuilder = new NotificationCompat.Builder(ctx).setSmallIcon(R.drawable.ic_actionbar)
.setContentTitle(groupName)
.setContentText(message)
.setDefaults(Notification.DEFAULT_ALL)
val resultIntent = new Intent(ctx, classOf[MainActivity])
resultIntent.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP | Intent.FLAG_ACTIVITY_CLEAR_TOP)
resultIntent.setAction(Constants.SWITCH_TO_FRIEND)
resultIntent.putExtra("key", groupKey.toString)
resultIntent.putExtra("name", groupName)
val stackBuilder = TaskStackBuilder.create(ctx)
stackBuilder.addParentStack(classOf[MainActivity])
stackBuilder.addNextIntent(resultIntent)
val resultPendingIntent = stackBuilder.getPendingIntent(0, PendingIntent.FLAG_UPDATE_CURRENT)
mBuilder.setContentIntent(resultPendingIntent)
ToxSingleton.mNotificationManager.notify(groupNumber, mBuilder.build())
}
}
def sendMessage(ctx: Context, key: ToxKey, msg: String, isAction: Boolean, mDbId: Option[Integer]) = {
val mFriend = ToxSingleton.getAntoxFriend(key)
val messageType = if (isAction) MessageType.ACTION else MessageType.OWN
mFriend match {
case None =>
case Some(friend) =>
val db = State.db
for (splitMsg <- splitMessage(msg)) {
val mId = try {
Some(
if (isAction) friend.sendAction(splitMsg) else friend.sendMessage(splitMsg)
)
} catch {
case e: Exception =>
None
}
val senderName = ToxSingleton.tox.getName
mId match {
case Some(id) =>
mDbId match {
case Some(dbId) => db.updateUnsentMessage(id, dbId)
case None => db.addMessage(id, key, senderName,
splitMsg, has_been_received =
false, has_been_read = false, successfully_sent = true, messageType)
}
case None => db.addMessage(-1, key, senderName, splitMsg, has_been_received = false,
has_been_read = false, successfully_sent = false, messageType)
}
}
}
}
def sendGroupMessage(ctx: Context, key: ToxKey, msg: String, isAction: Boolean, mDbId: Option[Integer]) = {
val group = ToxSingleton.getGroup(key)
val db = State.db
val messageType = if (isAction) MessageType.GROUP_ACTION else MessageType.GROUP_OWN
for (splitMsg <- splitMessage(msg)) {
try {
if (isAction) {
group.sendAction(splitMsg)
} else {
group.sendMessage(splitMsg)
}
} catch {
case e: Exception =>
None
}
val senderName = ToxSingleton.tox.getName
mDbId match {
case Some(dbId) => db.updateUnsentMessage(0, dbId)
case None => db.addMessage(0, key, senderName,
splitMsg, has_been_received =
true, has_been_read = true, successfully_sent = true, messageType)
}
}
}
def splitMessage(msg: String): Array[String] = {
var currSplitPos = 0
val result: util.ArrayList[String] = new util.ArrayList[String]()
while (msg.length - currSplitPos > Constants.MAX_MESSAGE_LENGTH) {
val str = msg.substring(currSplitPos, currSplitPos + Constants.MAX_MESSAGE_LENGTH)
val spacePos = str.lastIndexOf(' ')
if (spacePos <= 0) {
result.add(str)
currSplitPos += Constants.MAX_MESSAGE_LENGTH
} else {
result.add(str.substring(0, spacePos))
currSplitPos += spacePos + 1
}
}
if (msg.length - currSplitPos > 0) {
result.add(msg.substring(currSplitPos))
}
result.asScala.toArray
}
def sendUnsentMessages(ctx: Context) {
val db = State.db
val unsentMessageList = db.getUnsentMessageList
for (unsentMessage <- unsentMessageList) {
val mFriend = ToxSingleton.getAntoxFriend(unsentMessage.key)
mFriend.foreach(friend => {
if (friend.isOnline && ToxSingleton.tox != null) {
sendMessage(ctx, unsentMessage.key, unsentMessage.message,
unsentMessage.`type` == MessageType.ACTION, Some(unsentMessage.id))
}
})
}
}
}
|
gale320/Antox
|
app/src/main/scala/chat/tox/antox/tox/MessageHelper.scala
|
Scala
|
gpl-3.0
| 7,712 |
package kafka.consumer
import com.softwaremill.react.kafka.ConsumerProperties
import kafka.message.MessageAndMetadata
import kafka.serializer.DefaultDecoder
import kafka.utils.{IteratorTemplate, Logging}
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Copied from https://github.com/stealthly/scala-kafka, 0.8.2-beta (not released at the moment)
*/
class KafkaConsumer[T](val props: ConsumerProperties[T]) extends Logging {
val connector = Consumer.create(props.toConsumerConfig)
val filterSpec = new Whitelist(props.topic)
logger.info(s"setup:start topic=${props.topic} for zk=${props.zookeeperConnect} and groupId=${props.groupId}")
val stream = connector.createMessageStreamsByFilter(filterSpec, props.numThreads, new DefaultDecoder(), props.decoder).head
logger.info(s"setup:complete topic=${props.topic} for zk=${props.zookeeperConnect} and groupId=${props.groupId}")
def iterator(): IteratorTemplate[MessageAndMetadata[Array[Byte], T]] = stream.iterator()
def close(): Unit = {
connector.shutdown()
}
def commitInterval = props.commitInterval.getOrElse(KafkaConsumer.DefaultCommitInterval)
def kafkaOffsetStorage = props.kafkaOffsetStorage
}
object KafkaConsumer {
val DefaultCommitInterval = 30 seconds
}
|
Tecsisa/reactive-kafka
|
core/src/main/scala/kafka/consumer/KafkaConsumer.scala
|
Scala
|
apache-2.0
| 1,275 |
package com.sothr.imagetools.engine.util
import grizzled.slf4j.Logging
/**
* Class to handle version detection and evaluation
*
* Created by drew on 1/6/14.
*/
class Version(val versionString: String) extends Logging {
//parse version into parts
//typical version string i.e. 0.1.0-DEV-27-060aec7
val (major, minor, patch, buildTag, buildNumber, buildHash) = {
var version: (Int, Int, Int, String, Int, String) = (0, 0, 0, "DEV", 0, "asdfzxcv")
try {
val splitVersion = versionString.split( """\\.""")
val splitType = splitVersion(splitVersion.length - 1).split( """-""")
version = (splitVersion(0).toInt, splitVersion(1).toInt, splitType(0).toInt, splitType(1), splitType(2).toInt, splitType(3))
} catch {
case nfe: NumberFormatException => error(s"Error parsing number from version string '$versionString'", nfe)
case e: Exception => error(s"Unexpected error parsing version string '$versionString'", e)
}
version
}
/*
* -3 = this.patch < that.patch
* -2 = this.minor < that.minor
* -1 = this.major < that.major
* 0 = Identical Versions
* 1 = this.major > that.major
* 2 = this.minor > that.minor
* 3 = this.patch > that.patch
* 4 = this.buildTag != that.buildTag
*/
def compare(that: Version): Integer = {
//Identical Versions
if (this.hashCode == that.hashCode) {
0
// This is at least a major version ahead
} else if (this.major > that.major) {
1
// This is at least a major version behind
} else if (this.major < that.major) {
-1
// major is the same
} else {
// This is at least a minor version ahead
if (this.minor > that.minor) {
2
// This is at least a minor version behind
} else if (this.minor < that.minor) {
-2
// major.minor are the same
} else {
// This is at least a patch version ahead
if (this.patch > that.patch) {
3
// This is at least a patch version version
} else if (this.patch < that.patch) {
-3
//major.minor.patch are all the same
} else {
// This is a different build
if (this.buildTag != that.buildTag) {
4
}
//should be caught by the first if, but in case not
0
}
}
}
}
override def hashCode(): Int = {
val prime: Int = 37
val result: Int = 255
var hash: Int = major
hash += minor
hash += patch
hash += buildTag.hashCode
prime * result + hash
}
def parsableToString(): String = {
s"$major.$minor.$patch-$buildTag-$buildNumber-$buildHash"
}
override def toString: String = {
s"$major.$minor.$patch-$buildTag build:$buildNumber code:$buildHash"
}
}
|
warricksothr/ImageTools
|
engine/src/main/scala/com/sothr/imagetools/engine/util/Version.scala
|
Scala
|
mit
| 2,781 |
package concurrent_programming.peers
object ProtocolMain {
def main(args: Array[String]): Unit = {
CentralizedProtocol.run()
SymmetricProtocol.run()
TwiceRing.run()
MultiRing.run()
TreeProtocol.run()
}
}
|
AlexandruValeanu/Concurrent-Programming-in-Scala
|
src/concurrent_programming/peers/ProtocolMain.scala
|
Scala
|
gpl-3.0
| 229 |
package com.typesafe.slick.testkit.tests
import org.junit.Assert._
import com.typesafe.slick.testkit.util.{TestkitTest, TestDB}
class ColumnDefaultTest(val tdb: TestDB) extends TestkitTest {
import tdb.profile.simple._
case class User(id: Int, first: String, last: String)
object A extends Table[(Int, String, Option[Boolean])]("a") {
def id = column[Int]("id")
def a = column[String]("a", O Default "foo")
def b = column[Option[Boolean]]("b", O Default Some(true))
def * = id ~ a ~ b
}
def test = ifCap(bcap.columnDefaults) {
A.ddl.createStatements foreach println
A.ddl.create
A.id insert 42
assertEquals(List((42, "foo", Some(true))), Query(A).list)
}
}
|
zefonseca/slick-1.0.0-scala.2.11.1
|
slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/ColumnDefaultTest.scala
|
Scala
|
bsd-2-clause
| 706 |
package scorex.consensus
import play.api.libs.json.JsObject
import scorex.block.Block
trait BlockGenerationData {
def bytes: Array[Byte]
def json: JsObject
def isGenesis: Boolean
def signature(): Array[Byte]
def isValid(block: Block): Boolean
//block score value, where blockchain quality measure is the sum of block scores.
//So block score is e.g. 1 in case of longest chain rule, 2^64/baseTarget in case of Nxt's cumulative difficulty etc
def blockScore(): BigInt
}
|
Pole-he/Scorex-Lagonaki
|
src/main/scala/scorex/consensus/BlockGenerationData.scala
|
Scala
|
cc0-1.0
| 491 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package impl
import api.base.ScStableCodeReferenceElement
import api.toplevel.imports.ScImportExpr
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement}
import com.intellij.util.io.StringRef
import psi.impl.ScalaPsiElementFactory
import com.intellij.reference.SoftReference
/**
* User: Alexander Podkhalyuzin
* Date: 20.06.2009
*/
class ScImportExprStubImpl[ParentPsi <: PsiElement](parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement])
extends StubBaseWrapper[ScImportExpr](parent, elemType) with ScImportExprStub {
var referenceText: StringRef = StringRef.fromString("")
var singleWildcard: Boolean = _
private var myReference: SoftReference[Option[ScStableCodeReferenceElement]] = null
def this(parent : StubElement[ParentPsi],
elemType : IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement], refText: String, singleWildcard: Boolean) {
this (parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
referenceText = StringRef.fromString(refText)
this.singleWildcard = singleWildcard
}
def reference: Option[ScStableCodeReferenceElement] = {
if (myReference != null && myReference.get != null) return myReference.get
val res = if (referenceText == StringRef.fromString("")) {
None
} else {
val psi = ScalaPsiElementFactory.createReferenceFromText(StringRef.toString(referenceText), getPsi, null)
if (psi != null) {
Some(psi)
} else None
}
myReference = new SoftReference[Option[ScStableCodeReferenceElement]](res)
res
}
def isSingleWildcard: Boolean = singleWildcard
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScImportExprStubImpl.scala
|
Scala
|
apache-2.0
| 1,849 |
package scala.meta.internal.semanticdb.scalac
import org.scalameta.unreachable
import scala.{meta => m}
import scala.meta.internal.inputs._
import scala.meta.internal.{semanticdb => s}
trait DiagnosticOps { self: SemanticdbOps =>
implicit class XtensionCompilationUnitDiagnostics(unit: g.CompilationUnit) {
def reportedDiagnostics(mstarts: collection.Map[Int, m.Name]): List[s.Diagnostic] = {
unit.hijackedDiagnostics.map {
case (gpos, gseverity, text) =>
val mpos: m.Position = {
// NOTE: The caret in unused import warnings points to Importee.pos, but
// the message position start/end point to the enclosing Import.pos.
// See https://github.com/scalameta/scalameta/issues/839
if (text == "Unused import") {
mstarts.get(gpos.point) match {
case Some(name) => name.pos
case None =>
if (unit.source.content(gpos.point) == '_') // Importee.Wildcard()
gpos.withStart(gpos.point).withEnd(gpos.point + 1).toMeta
else gpos.toMeta
}
} else gpos.toMeta
}
val sseverity = gseverity match {
case 0 => s.Diagnostic.Severity.INFORMATION
case 1 => s.Diagnostic.Severity.WARNING
case 2 => s.Diagnostic.Severity.ERROR
case _ => unreachable
}
s.Diagnostic(Some(mpos.toRange), sseverity, text)
}
}
}
}
|
olafurpg/scalameta
|
semanticdb/scalac/library/src/main/scala/scala/meta/internal/semanticdb/scalac/DiagnosticOps.scala
|
Scala
|
bsd-3-clause
| 1,495 |
package module.dao
import awscala.dynamodbv2._
import com.google.inject.ImplementedBy
import models.{CarAdvertField, CarAdvert}
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
@ImplementedBy(classOf[CarAdvertDaoImpl])
trait CarAdvertDao {
def deleteBy(id: String, title: String): Boolean
def update(carAdvert: CarAdvert): Boolean
def findAll(): List[CarAdvert]
def findAll(sortField: String): List[CarAdvert]
def findById(id: String): Option[CarAdvert]
def save(carAdvert: CarAdvert): Unit
}
class CarAdvertDaoImpl extends CarAdvertDao {
val CAR_ADVERTS: String = "car-adverts"
implicit val dynamoDb: DynamoDB = DynamoDB.local()
//init
createTableIfMissing()
def createTableIfMissing() = {
dynamoDb.table(CAR_ADVERTS).getOrElse({
val tableMeta: TableMeta = dynamoDb.createTable(
name = CAR_ADVERTS, hashPK = "guid" -> AttributeType.String, rangePK = "title" -> AttributeType.String,
otherAttributes = Seq(),
indexes = Seq(LocalSecondaryIndex(
name = "car-adverts-index",
keySchema = Seq(
KeySchema("guid", KeyType.Hash),
KeySchema("title", KeyType.Range)
),
projection = Projection(ProjectionType.Include, Seq("title"))
))
)
})
}
override def save(carAdvert: CarAdvert): Unit = {
saveOrUpdate(carAdvert)
}
override def findById(id: String): Option[CarAdvert] = dynamoDb.table(CAR_ADVERTS).get.query(Seq("guid" -> cond.eq(id))) match {
case Nil => None
case items => {
Some(CarAdvert.toObject(items(0)))
}
}
override def findAll(): List[CarAdvert] = {
dynamoDb.table(CAR_ADVERTS).get.scan(Seq("guid" -> cond.ne("a"))) match {
case items => items.map(item => CarAdvert.toObject(item)).toList
case Nil => List()
}
}
override def findAll(sortField: String): List[CarAdvert] = {
dynamoDb.table(CAR_ADVERTS).get.scan(Seq("guid" -> cond.ne("a"))) match {
case items => items.map(CarAdvert.toObject(_)).toList.sorted(CarAdvert.getOrdering(CarAdvertField.getCarAdvertField(sortField).getOrElse(CarAdvertField.Guid)))
case Nil => List()
}
}
override def update(carAdvert: CarAdvert): Boolean = findById(carAdvert.guid) match {
case item => saveOrUpdate(carAdvert); true
case None => false
}
override def deleteBy(id: String, title: String): Boolean = {
dynamoDb.table(CAR_ADVERTS).get.delete(id, title);
true
}
private def saveOrUpdate(carAdvert: CarAdvert): Unit = {
val firstRegistration = carAdvert.firstRegistration match {
case Some(item) => new DateTime(item).withTimeAtStartOfDay().getMillis // without time.
case None => None
}
dynamoDb.table(CAR_ADVERTS).get.put(
hashPK = carAdvert.guid,
rangePK = carAdvert.title,
"fuel" -> carAdvert.fuel,
"price" -> carAdvert.price,
"new" -> (if (carAdvert.isNew) 1 else 0),
"mileage" -> carAdvert.mileage.getOrElse(-1),
"firstRegistration" -> firstRegistration
)
}
}
|
boonhero/car-advert
|
app/module/dao/CarAdvertDao.scala
|
Scala
|
mit
| 3,056 |
package com.twitter.server.handler
import com.twitter.finagle.Service
import com.twitter.finagle.http.Request
import com.twitter.finagle.http.Response
import com.twitter.finagle.http.Uri
import com.twitter.finagle.stats.BucketAndCount
import com.twitter.finagle.stats.HistogramDetail
import com.twitter.finagle.stats.WithHistogramDetails
import com.twitter.io.Buf
import com.twitter.server.util.AdminJsonConverter
import com.twitter.server.util.HtmlUtils.escapeHtml
import com.twitter.server.util.HttpUtils.newResponse
import com.twitter.util.Future
object HistogramQueryHandler {
private val ContentTypeJson = "application/json;charset=UTF-8"
private val ContentTypeHtml = "text/html;charset=UTF-8"
private[handler] case class Summary(
name: String,
count: Long,
sum: Long,
average: Option[Long],
min: Option[Long],
max: Option[Long],
percentiles: Map[String, Long])
/** the name and percentile thresholds used for summaries */
private val SummaryThresholds = Seq(
"p50" -> 0.5,
"p90" -> 0.9,
"p95" -> 0.95,
"p99" -> 0.99,
"p999" -> 0.999,
"p9999" -> 0.9999
)
/**
* Stores histogram bucket and a percentage.
* The percentage is either the density or a
* cumulative distribution for the bucket
*/
case class BucketAndPercentage(lowerLimit: Long, upperLimit: Long, percentage: Float)
private[HistogramQueryHandler] def countPoints(counts: Seq[BucketAndCount]): Int =
counts.foldLeft(0) { case (acc, v) => acc + v.count }
// For each key return a percentage
private[server] def pdf(counts: Seq[BucketAndCount]): Seq[BucketAndPercentage] = {
val count = countPoints(counts)
counts.map { v => BucketAndPercentage(v.lowerLimit, v.upperLimit, v.count.toFloat / count) }
}
// For each key return a cumulative percentage
private[server] def cdf(counts: Seq[BucketAndCount]): Seq[BucketAndPercentage] = {
val count = countPoints(counts)
var c = 0
counts.map { v: BucketAndCount =>
c += v.count
BucketAndPercentage(v.lowerLimit, v.upperLimit, c.toFloat / count)
}
}
private[HistogramQueryHandler] def deliverData(
counts: Map[String, Seq[BucketAndCount]],
transform: Seq[BucketAndCount] => Any
): String =
// ".toMap" is important here for scala 2.13 as otherwise it will be a MapView which
// doesn't serialize correctly with Jackson
AdminJsonConverter.writeToString(counts.mapValues(transform).toMap)
// Generates html for visualizing histograms
private[HistogramQueryHandler] val render: String = {
val css =
"""<link type="text/css" href="/admin/files/css/histogram-query.css" rel="stylesheet"/>"""
val chart =
"""<div class="chart">
<div id="curve_chart" style="width: 900px; height: 500px"></div>
</div>"""
/** Generates an html table to display key statistics of a histogram */
val statsTable = {
def entry(id: String, display: String): String = {
s"""<tr>
<td style="text-align:left">${escapeHtml(display)}</td>
<td style="text-align:left" id="$id"></td>
</tr>"""
}
s"""
<div id="stats">
<table>
<thead>
<th style="text-align:left" colspan="2">Details</th>
</thead>
<tbody>
${entry("detail_count", "Count")}
${entry("detail_sum", "Sum")}
${entry("detail_average", "Average")}
${entry("detail_min", "Min")}
${entry("detail_max", "Max")}
${entry("detail_p50", "p50")}
${entry("detail_p90", "p90")}
${entry("detail_p95", "p95")}
${entry("detail_p99", "p99")}
${entry("detail_p999", "p999")}
${entry("detail_p9999", "p9999")}
</tbody>
</table>
</div>"""
}
val buttonPanel =
"""<div id="option-panel">
<form action="post">
<span class="option-description">Type:
<a id="PDF" class="button-switch button-light-green left-rounded" title="Probability density function">PDF</a><a id="CDF" class="button-switch button-green right-rounded" title="Cumulative distribution function">CDF</a>
</span>
<span class="option-description">Scale:
<a id="reg" class="button-switch button-red left-rounded" title="Linear scale">Reg</a><a id="log" class="button-switch button-light-red right-rounded" title="Log scale">Log</a>
</span>
<span class="option-description">Refresh:
<a id="refreshOn" class="button-switch button-gray left-rounded" title="Refresh the plot every minute">On</a><a id="refreshOff" class="button-switch button-black right-rounded">Off</a>
</span>
<span class="option-description-last"><a id="download-link" class="button-download button-blue" title="Download bucket counts in json">Download</a></span>
</form>
</div>"""
val scripts =
"""
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript" src="/admin/files/js/histogram-utils.js"></script>
<script type="text/javascript" src="/admin/files/js/histogram-dom.js"></script>
<script type="text/javascript" src="/admin/files/js/histogram-main.js"></script>"""
css + chart + statsTable + buttonPanel + scripts
}
// Generates html for the histogram selection page (/admin/histograms)
private[HistogramQueryHandler] def renderFront(keys: Seq[String]): String = {
val css =
"""
<link type="text/css" href="/admin/files/css/metric-query.css" rel="stylesheet"/>
<link type="text/css" href="/admin/files/css/histogram-homepage.css" rel="stylesheet"/>
"""
val histogramListing = s"""
<div id="metrics-grid" class="row">
<div class="col-md-4 snuggle-right">
<ul id="metrics" class="list-unstyled">
${(for (key <- keys.sorted) yield {
s"""<li id="${key.replace("/", "-")}"><a id="special-$key">${escapeHtml(key)}</a></li>"""
}).mkString("\\n")}
</ul>
</div>
<div class="col-md-8 snuggle-left">
<div style="width: 95%; margin: 0 auto;">
<div id="metrics-header">Histograms</div>
<ul>
<li class="metrics-point">Visualize metric distributions</li>
<li class="metrics-point">Download histogram contents</li>
<li class="metrics-point">For more, read the
<a id="doc-link" href="https://twitter.github.io/twitter-server/Features.html#histograms">docs</a>
</li>
</ul>
</div>
</div>
</div>
"""
val scripts = s"""
<script>
${(for (key <- keys.sorted) yield {
s"""document.getElementById("special-$key").setAttribute("href", window.location.href + "?h=$key&fmt=plot_cdf");"""
}).mkString("\\n")}
</script>
"""
css + histogramListing + scripts
}
}
/**
* A handler which accepts queries via http strings and returns
* json encoded histogram details
*/
private[server] class HistogramQueryHandler(details: WithHistogramDetails)
extends Service[Request, Response] {
import HistogramQueryHandler._
// If possible, access histograms inside statsReceiversLoaded
private[this] def histograms: Map[String, HistogramDetail] = details.histogramDetails
private[this] def jsonResponse(
query: String,
transform: Seq[BucketAndCount] => String
): Future[Response] =
newResponse(
contentType = ContentTypeJson,
content = {
val text = histograms.get(query) match {
case Some(h) => transform(h.counts)
case None => s"Key: ${escapeHtml(query)} is not a valid histogram."
}
Buf.Utf8(text)
}
)
private[this] def renderHistogramsJson: String =
AdminJsonConverter.writeToString(histograms.map {
case (key, value) =>
(key, value.counts)
})
// needs a special case for the upper bound sentinel.
private[this] def midPoint(bc: BucketAndCount): Double =
if (bc.upperLimit >= Int.MaxValue) bc.lowerLimit
else (bc.upperLimit + bc.lowerLimit) / 2.0
private[handler] def generateSummary(histoName: String): Option[Summary] = {
histograms.get(histoName).map { detail =>
val bcs = detail.counts.sortBy(_.lowerLimit)
// first, the basic computations: sum, count, min, max, average
val min = bcs.headOption.map(_.lowerLimit)
val max = bcs.lastOption.map(_.upperLimit)
var sum = 0.0
var count = 0L
bcs.foreach { bc =>
count += bc.count
sum += bc.count.toDouble * midPoint(bc)
}
val average =
if (count == 0L) None
else Some(sum.toLong / count)
// note: this is modeled after `c.t.f.stats.BucketedHistogram.percentile`
def percentile(total: Long, p: Double): Long = {
if (p < 0.0 || p > 1.0)
throw new AssertionError(s"percentile must be within 0.0 to 1.0 inclusive: $p")
val target = Math.round(p * total)
val iter = bcs.iterator
var sum = 0L
var bc: BucketAndCount = null
while (iter.hasNext && sum < target) {
bc = iter.next()
sum += bc.count
}
bc match {
case null => 0
case _ if !iter.hasNext => max.getOrElse(0)
case _ => midPoint(bc).toLong
}
}
val percentiles: Map[String, Long] = SummaryThresholds.map {
case (name, p) =>
name -> percentile(count, p)
}.toMap
Summary(
name = histoName,
count = count,
sum = sum.toLong,
average = average,
min = min,
max = max,
percentiles = percentiles
)
}
}
private[this] def renderSummary(summary: Summary): String =
AdminJsonConverter.writeToString(summary)
private[this] def htmlResponse(query: String): Future[Response] =
newResponse(
contentType = ContentTypeHtml,
content = Buf.Utf8 {
if (histograms.contains(query))
render
else
s"Key: ${escapeHtml(query)} is not a valid histogram."
}
)
/**
* Handles requests for all histograms (/admin/histogram.json)
* or for a specific histogram (/admin/histogram?h=...)
*
* For specific histograms the following options are available:
*
* "h": the name of the histogram
* Ex: finagle/timer/deviation_ms
*
* "fmt": the type of format used to display results.
* The formats we support are raw, pdf, and cdf
* raw: histogram bucket counts
* (use to do a custom computation with histogram counts)
* pdf: percentage of total for each bucket
* (use to identify modes of a distribution)
* cdf: cumulative percentage of total for each bucket
* (use to view more quantiles)
*
* "log_scale": whether or not the x-axis increases linearly or exponentially.
* This parameter can be omitted if not querying for plots
*
* For `histograms.json`, if there is a `summary` parameter,
* it will return JSON summarizing the histogram for the given `h` parameter.
* {{{
* {
* "name": "finagle/timer/deviation_ms",
* "summary": {
* "count": 65059,
* "sum": 651088,
* "average": 10,
* "min": 1,
* "max": 94,
* "percentiles": {
* "p50": 10,
* "p90": 12,
* "p95": 12,
* "p99": 13,
* "p999": 20,
* "p9999": 52,
* }
* }
* }
* }}}
*
* If `h` is not found, an empty JSON hash will be returned `{}`.
*/
def apply(req: Request): Future[Response] = {
val uri = Uri.fromRequest(req)
val path = uri.path
val params = uri.params
path match {
case "/admin/histograms.json" =>
if (!params.contains("summary")) {
newResponse(contentType = ContentTypeJson, content = Buf.Utf8(renderHistogramsJson))
} else {
val summary: Option[Summary] = params.get("h") match {
case Some(histoName) => generateSummary(histoName)
case _ => None
}
val text: String = summary match {
case Some(s) => renderSummary(s)
case None => "{}"
}
newResponse(contentType = ContentTypeJson, content = Buf.Utf8(text))
}
case "/admin/histograms" =>
params.get("h") match {
case Some(query) =>
params.get("fmt") match {
case Some("plot_pdf") | Some("plot_cdf") =>
htmlResponse(query)
case Some("raw") =>
jsonResponse(
query,
{ counts: Seq[BucketAndCount] =>
deliverData(Map(query -> counts), identity)
})
case Some("pdf") =>
jsonResponse(
query,
{ counts: Seq[BucketAndCount] =>
deliverData(Map(query -> counts), x => pdf(x))
})
case Some("cdf") =>
jsonResponse(
query,
{ counts: Seq[BucketAndCount] =>
deliverData(Map(query -> counts), x => cdf(x))
})
case _ =>
newResponse(
contentType = ContentTypeHtml,
content = Buf.Utf8("Please provide a format: fmt = raw | pdf | cdf")
)
}
case _ =>
newResponse(
contentType = ContentTypeHtml,
content = Buf.Utf8(renderFront(histograms.keySet.toSeq))
)
}
case _ =>
newResponse(
contentType = ContentTypeHtml,
content = Buf.Utf8("Invalid endpoint. Did you mean /admin/histograms.json?")
)
}
}
}
|
twitter/twitter-server
|
server/src/main/scala/com/twitter/server/handler/HistogramQueryHandler.scala
|
Scala
|
apache-2.0
| 14,067 |
package algorithm
import com.typesafe.scalalogging.slf4j.Logging
import net.sourceforge.cilib.algorithm.Algorithm
import net.sourceforge.cilib.stoppingcondition.StoppingCondition
/* Stops algorithm execution immediately */
object StopNowStoppingCondition extends StoppingCondition[Algorithm] with Logging {
override def getClone() = this
override def getPercentageCompleted(algorithm: Algorithm): Double = 1
override def apply(algorithm: Algorithm): Boolean = true
}
|
felixamerbauer/firefly-simulator
|
src/main/scala/algorithm/StopNowStoppingCondition.scala
|
Scala
|
gpl-3.0
| 477 |
package org.jetbrains.plugins.scala
package lang
package parameterInfo
import java.awt.Color
import com.intellij.codeInsight.CodeInsightBundle
import com.intellij.codeInsight.lookup.LookupElement
import com.intellij.lang.parameterInfo._
import com.intellij.psi._
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.ArrayUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScConstructorPattern, ScPattern, ScPatternArgumentList}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 22.02.2009
*/
class ScalaPatternParameterInfoHandler extends ParameterInfoHandlerWithTabActionSupport[ScPatternArgumentList, Any, ScPattern] {
def getArgListStopSearchClasses: java.util.Set[_ <: Class[_]] = {
java.util.Collections.singleton(classOf[PsiMethod]) //todo: ?
}
def getParameterCloseChars: String = "{},);\n"
def couldShowInLookup: Boolean = true
def getActualParameterDelimiterType: IElementType = ScalaTokenTypes.tCOMMA
def getActualParameters(patternArgumentList: ScPatternArgumentList): Array[ScPattern] = patternArgumentList.patterns.toArray
def getArgumentListClass: Class[ScPatternArgumentList] = classOf[ScPatternArgumentList]
def getActualParametersRBraceType: IElementType = ScalaTokenTypes.tRBRACE
def getArgumentListAllowedParentClasses: java.util.Set[Class[_]] = {
val set = new java.util.HashSet[Class[_]]()
set.add(classOf[ScConstructorPattern])
set
}
def findElementForParameterInfo(context: CreateParameterInfoContext): ScPatternArgumentList = {
findCall(context)
}
def findElementForUpdatingParameterInfo(context: UpdateParameterInfoContext): ScPatternArgumentList = {
findCall(context)
}
def getParametersForDocumentation(p: Any, context: ParameterInfoContext): Array[Object] = ArrayUtil.EMPTY_OBJECT_ARRAY
def getParametersForLookup(item: LookupElement, context: ParameterInfoContext): Array[Object] = null
def updateUI(p: Any, context: ParameterInfoUIContext): Unit = {
if (context == null || context.getParameterOwner == null || !context.getParameterOwner.isValid) return
context.getParameterOwner match {
case args: ScPatternArgumentList =>
val color: Color = context.getDefaultParameterColor
val index = context.getCurrentParameterIndex
val buffer: StringBuilder = new StringBuilder("")
p match {
//todo: join this match statement with same in FunctionParameterHandler to fix code duplicate.
case (sign: PhysicalSignature, i: Int) => {
//i can be -1 (it's update method)
val methodName = sign.method.name
val subst = sign.substitutor
val returnType = sign.method match {
case function: ScFunction => subst.subst(function.returnType.getOrAny)
case method: PsiMethod => subst.subst(ScType.create(method.getReturnType, method.getProject))
}
val oneArgCaseClassMethod: Boolean = sign.method match {
case function: ScFunction => ScPattern.isOneArgCaseClassMethod(function)
case _ => false
}
val params = ScPattern.extractorParameters(returnType, args, oneArgCaseClassMethod).zipWithIndex
if (params.length == 0) buffer.append(CodeInsightBundle.message("parameter.info.no.parameters"))
else {
buffer.append(params.map {
case (param, o) =>
val buffer: StringBuilder = new StringBuilder("")
buffer.append(ScType.presentableText(param))
val isSeq = methodName == "unapplySeq" && (ScType.extractClass(param) match {
case Some(clazz) => clazz.qualifiedName == "scala.Seq"
case _ => false
})
if (isSeq) {
buffer.delete(0, buffer.indexOf("[") + 1)
buffer.deleteCharAt(buffer.length - 1)
buffer.append("*")
}
val isBold = if (o == index || (isSeq && o <= index)) true
else {
//todo: check type
false
}
val paramTypeText = buffer.toString()
val paramText = paramTextFor(sign, o, paramTypeText)
if (isBold) "<b>" + paramText + "</b>" else paramText
}.mkString(", "))
}
}
case _ =>
}
val isGrey = buffer.indexOf("<g>")
if (isGrey != -1) buffer.replace(isGrey, isGrey + 3, "")
val startOffset = buffer.indexOf("<b>")
if (startOffset != -1) buffer.replace(startOffset, startOffset + 3, "")
val endOffset = buffer.indexOf("</b>")
if (endOffset != -1) buffer.replace(endOffset, endOffset + 4, "")
if (buffer.toString != "")
context.setupUIComponentPresentation(buffer.toString(), startOffset, endOffset, false, false, false, color)
else
context.setUIComponentEnabled(false)
case _ =>
}
}
/**
* @return 'paramName: ParamType' if `sign` is a synthetic unapply method; otherwise 'ParamType'
*/
private def paramTextFor(sign: PhysicalSignature, o: Int, paramTypeText: String): String = {
if (sign.method.name == "unapply") {
sign.method match {
case fun: ScFunction if fun.parameters.headOption.exists(_.name == "x$0") =>
val companionClass: Option[ScClass] = Option(fun.containingClass) match {
case Some(x: ScObject) => ScalaPsiUtil.getCompanionModule(x) match {
case Some(x: ScClass) => Some(x)
case _ => None
}
case _ => None
}
companionClass match {
case Some(cls) => ScalaPsiUtil.nthConstructorParam(cls, o) match {
case Some(param) =>
if (param.isRepeatedParameter) {
paramTypeText // Not handled yet.
} else {
param.name + ": " + paramTypeText // SCL-3006
}
case None => paramTypeText
}
case None => paramTypeText
}
case fun: ScFunction =>
// Look for a corresponding apply method beside the unapply method.
// TODO also check types correspond, allowing for overloading
val applyParam: Option[PsiParameter] = ScalaPsiUtil.getApplyMethods(fun.containingClass) match {
case Seq(sig) => sig.method.getParameterList.getParameters.lift(o)
case _ => None
}
applyParam match {
case Some(param) => param.getName + ": " + paramTypeText
case None => paramTypeText
}
case _ =>
paramTypeText
}
} else paramTypeText
}
def showParameterInfo(element: ScPatternArgumentList, context: CreateParameterInfoContext): Unit = {
context.showHint(element, element.getTextRange.getStartOffset, this)
}
def updateParameterInfo(o: ScPatternArgumentList, context: UpdateParameterInfoContext): Unit = {
if (context.getParameterOwner != o) context.removeHint()
val offset = context.getOffset
var child = o.getNode.getFirstChildNode
var i = 0
while (child != null && child.getStartOffset < offset) {
if (child.getElementType == ScalaTokenTypes.tCOMMA) i = i + 1
child = child.getTreeNext
}
context.setCurrentParameter(i)
}
def tracksParameterIndex: Boolean = true
private def findCall(context: ParameterInfoContext): ScPatternArgumentList = {
val (file, offset) = (context.getFile, context.getOffset)
val element = file.findElementAt(offset)
if (element == null) return null
val args: ScPatternArgumentList = PsiTreeUtil.getParentOfType(element, getArgumentListClass)
if (args != null) {
context match {
case context: CreateParameterInfoContext =>
args.getParent match {
case constr: ScConstructorPattern => {
val ref: ScStableCodeReferenceElement = constr.ref
val res: ArrayBuffer[Object] = new ArrayBuffer[Object]
if (ref != null) {
val name = ref.refName
val variants: Array[ResolveResult] = ref.multiResolve(false)
for (variant <- variants if variant.isInstanceOf[ScalaResolveResult]) {
val r = variant.asInstanceOf[ScalaResolveResult]
r.element match {
case fun: ScFunction if fun.parameters.nonEmpty =>
val substitutor = r.substitutor
val subst = if (fun.typeParameters.length == 0) substitutor
else {
val undefSubst = fun.typeParameters.foldLeft(ScSubstitutor.empty)((s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), ScUndefinedType(new ScTypeParameterType(p,
substitutor))))
val emptySubst: ScSubstitutor = fun.typeParameters.foldLeft(ScSubstitutor.empty)((s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), p.upperBound.getOrAny))
val result = fun.parameters(0).getType(TypingContext.empty)
if (result.isEmpty) substitutor
else {
val funType = undefSubst.subst(result.get)
constr.expectedType match {
case Some(tp) =>
val t = Conformance.conforms(tp, funType)
if (t) {
val undefSubst = Conformance.undefinedSubst(tp, funType)
undefSubst.getSubstitutor match {
case Some(newSubst) => newSubst.followed(substitutor)
case _ => substitutor
}
} else substitutor
case _ => substitutor
}
}
}
res += ((new PhysicalSignature(fun, subst), 0))
case _ =>
}
}
}
context.setItemsToShow(res.toArray)
}
case _ =>
}
case context: UpdateParameterInfoContext =>
var el = element
while (el.getParent != args) el = el.getParent
var index = 1
for (pattern <- args.patterns if pattern != el) index += 1
context.setCurrentParameter(index)
context.setHighlightedParameter(el)
case _ =>
}
}
args
}
}
|
double-y/translation-idea-plugin
|
src/org/jetbrains/plugins/scala/lang/parameterInfo/ScalaPatternParameterInfoHandler.scala
|
Scala
|
apache-2.0
| 11,453 |
/**********************************************************************************************************************
* This file is part of Scrupal, a Scalable Reactive Web Application Framework for HtmlContents Management *
* *
* Copyright (c) 2015, Reactific Software LLC. All Rights Reserved. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for *
* the specific language governing permissions and limitations under the License. *
**********************************************************************************************************************/
package scrupal.html
import java.io.{PrintWriter, StringWriter}
import org.apache.commons.lang3.exception.ExceptionUtils
import play.api.libs.json._
import scrupal.core.{ThrowableContent, Context, Stimulus}
import scalatags.Text.Modifier
import scalatags.Text.all._
case class unauthorized(what : String) extends SimpleGenerator {
def apply(): HtmlContents = {
div(
cls := "text-warning",
h1("Unauthorized"),
p(s"You are not authorized to access $what.")
)
}
}
case class danger(message : HtmlContents) extends SimpleGenerator {
def apply() : HtmlContents = { div(cls := "bg-danger", message) }
}
case class warning(message : HtmlContents) extends SimpleGenerator {
def apply() : HtmlContents = { div(cls := "bg-warning", message) }
}
case class successful(message : HtmlContents) extends SimpleGenerator {
def apply() : HtmlContents = { div(cls := "bg-success", message) }
}
case class throwable(xcptn : Throwable) extends SimpleGenerator {
def apply() = {
dl(cls := "dl-horizontal",
dt("Exception:"), dd(xcptn.getClass.getName),
dt("Message:"), dd(xcptn.getLocalizedMessage),
dt("Root Cause:"), dd(
pre(style := "width:95%", code(style := "font-size:8pt", {
var sw : StringWriter = null
var pw : PrintWriter = null
try {
sw = new StringWriter()
pw = new PrintWriter(sw)
ExceptionUtils.printRootCauseStackTrace(xcptn, pw)
sw.toString
} finally {
if (pw != null) pw.close()
if (sw != null) sw.close()
}
})),
br()
)
)
}
}
case class exception(activity : String, error : Throwable) extends SimpleGenerator {
def apply() : HtmlContents = {
danger(Seq(
p(s"While attempting to $activity an exception occurred:"),
throwable(error)()
))()
}
}
case class display_throwable_content(xcptn : ThrowableContent) extends SimpleGenerator {
def apply() = {
div(cls := "bg-danger", throwable(xcptn.content)())
}
}
object display_context_table extends HtmlContentsGenerator {
def apply(context : Context) = {
div(cls := "span10 row", style := "font-size: 0.75em",
table(cls := "span10 table table-striped table-bordered table-condensed",
caption(style := "font-size: 1.2em; font-weight: bold;", "Context Details"),
thead(tr(th("Parameter"), th("Value"))),
tbody(
tr(th("Site"), td(context.siteName)),
tr(th("User"), td(context.user)),
tr(th("Theme"), td(context.themeName))
)
)
)
}
}
case class display_stimulus_table(stimulus: Stimulus) extends HtmlContentsGenerator {
def apply(context : Context) = {
div(cls := "span10 row", style := "font-size: 0.75em",
table(cls := "span10 table table-striped table-bordered table-condensed",
caption(style := "font-size: 1.2em; font-weight: bold;", "Request Header Details"),
thead(tr(th("Parameter"), th("Value"))),
tbody(
tr(th("Method"), td(stimulus.method.toString)),
tr(th("Path"), td(stimulus.path)),
tr(th("URI"), td(stimulus.uri)),
tr(th("Version"), td(stimulus.version)),
tr(th("ID"), td(stimulus.id)),
tr(th("Query"), td(stimulus.queryString.toString())),
tr(th("RemoteAddress"), td(stimulus.remoteAddress)),
tr(th("Secure"), td(stimulus.secure.toString())),
tr(th("Tags"), td(stimulus.tags.toString())),
tr(th("Headers"), td(stimulus.headers.toString())),
tr(th("MediaType"), td(stimulus.mediaType.toString)),
tr(th("Context"), td(display_context_table.apply(stimulus.context)))
)
)
)
}
}
object debug_footer extends HtmlContentsGenerator {
def apply(context : Context) = {
context.site match {
case Some(site) ⇒
if (site.debugFooter) {
display_context_table(context)
} else {
emptyContents
}
case None ⇒
emptyContents
}
}
}
/*
object display_alerts extends HtmlGenerator {
def apply(context : Context) : HtmlContents = {
for (alert ← DataCache.alerts if alert.unexpired) yield {
div(cls := "alert alert-dismissible @alert.cssClass",
button(`type` := "button", cls := "close", data("dismiss") := "alert", aria.hidden := "true",
i(cls := "icon-remove-sign")),
strong(alert.icon(), " ", alert.prefix), " ", alert.message)
}
}
}
*/
trait json_fragment extends SimpleGenerator {
def value(value : JsValue) : Modifier = {
value match {
case s : JsString ⇒ "\"" + s.value + "\""
case i : JsNumber ⇒ i.value.toString
case b : JsBoolean ⇒ b.value.toString()
case a : JsArray ⇒ array(a)
case d : JsObject ⇒ document(d)
case JsNull ⇒ s"Null"
case _ ⇒ s"Unknown"
}
}
def array(array : JsArray) : Modifier = {
span(s"Array(${array.value.size}) [",
array.value.flatMap { e ⇒ Seq[Modifier](value(e), ", ") },
"]"
)
}
def document(doc : JsObject) : Modifier = {
div(s"Document(${doc.value.size}) {",
dl(cls := "dl-horizontal",
{for ((k, v) ← doc.value) yield {
Seq(dt(k), dd(value(v)))
}}.flatten.toSeq
),
"}"
)
}
}
case class json_value(bv : JsValue) extends json_fragment {
def apply() = { div(value(bv)) }
}
case class json_document_panel(title : String, doc : JsObject) extends json_fragment {
def apply() = {
div(
cls := "panel panel-primary",
div(
cls := "panel-heading",
h3(cls := "panel-title", title)
),
div(cls := "panel-body", document(doc))
)
}
}
object reactific_copyright extends SimpleGenerator {
def apply() = {
sub(sup("Copyright © 2012-2016, Reactific Software LLC. All Rights Reserved."))
}
}
object scrupal_stats extends HtmlContentsGenerator {
def apply(context: Context) = {
div(cls:="center-block",
div(java.time.format.DateTimeFormatter.ISO_INSTANT.format(java.time.Instant.now())),
dl(cls:="dl-horizontal",
dt("Sites"), dd(context.scrupal.sites.size),
dt("HTTP Requests"), dd(context.scrupal.httpRequestHandler.numRequests.get),
dt("Client Errors"), dd(context.scrupal.httpErrorHandler.clientErrors.get),
dt("Server Errors"), dd(context.scrupal.httpErrorHandler.serverErrors.get),
dt("Users Online"), dd("Not Collected")
)
)
}
}
|
scrupal/scrupal-core
|
scrupal-server/src/main/scala/scrupal/html/Utilities.scala
|
Scala
|
apache-2.0
| 8,246 |
/**
* Copyright (c) 2014 Rafael Brandão <[email protected]>
*
* This is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This software is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/>.
*/
package ecurrencies
import scala.reflect.ClassTag
trait Message {
val body: AnyRef
def mapBody(body: AnyRef): Message
def bodyAs[T] = body.asInstanceOf[T]
final def isAssignableFrom[T: ClassTag] =
implicitly[ClassTag[T]].runtimeClass.isAssignableFrom(body.getClass)
}
|
rafael-brandao/ecurrencies
|
api/src/main/scala/ecurrencies/Message.scala
|
Scala
|
lgpl-3.0
| 1,016 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.serialization
// JsonSerializable would be a nice name, but that is already defined in Jackson,
// so we are avoiding wrong import mistake by using another name
/**
* Marker interface for messages that are serialized as JSON.
*
* It extends java.io.Serializable to give it higher priority than JavaSerializer
* in Akka in case message class implements both interfaces.
*/
trait Jsonable extends Serializable
/**
* The serializer will compress the payload if the message class implements this
* marker interface and the payload is larger than the configured
* `compress-larger-than` value.
*/
trait CompressedJsonable extends Jsonable
|
edouardKaiser/lagom
|
jackson/src/main/scala/com/lightbend/lagom/serialization/Jsonable.scala
|
Scala
|
apache-2.0
| 751 |
package java.time.temporal
import java.time.{DateTimeException, Duration}
trait TemporalUnit {
def getDuration(): Duration
def isDurationEstimated(): Boolean
def isDateBased(): Boolean
def isTimeBased(): Boolean
def isSupportedBy(temporal: Temporal): Boolean = {
try {
temporal.plus(1, this)
true
} catch {
case _: DateTimeException => false
case _: ArithmeticException => true
}
}
def addTo[R <: Temporal](temporal: R, amount: Long): R
def between(start: Temporal, end: Temporal): Long
}
|
sjrd/scala-js-java-time
|
src/main/scala/java/time/temporal/TemporalUnit.scala
|
Scala
|
bsd-3-clause
| 549 |
// Test_2.scala
trait Bar
inline given derivedReducible(using scala.deriving.Mirror.SumOf[Qux[_]]): Bar =
scala.compiletime.summonInline[Bar]
???
def test = derivedReducible // error
|
dotty-staging/dotty
|
tests/neg-macros/i13406/Test_2.scala
|
Scala
|
apache-2.0
| 189 |
/**
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package scalding
import org.scalatest._
import TestUtil._
class CountWordsSpec extends FunSpec {
val testText =
"""Avantgarde Labs is a team of enthusiastic knowledge architects –
|Our strength is the rapid integration and analysis of complex databases.
|We make sense out of data and develop semantic and context-sensitive next generation information systems for our clients.
|Our clients include companies from various industries – from startups to Fortune 500 companies –
|as well as public sector and cultural institutions.
""".stripMargin
val expectedCounts = List(
("a",1), ("analysis",1), ("and",4), ("architects",1), ("as",2), ("avantgarde",1),
("clients",2), ("companies",2), ("complex",1), ("context",1), ("cultural",1),
("data",1), ("databases",1), ("develop",1), ("enthusiastic",1),
("for",1), ("fortune",1), ("from",2), ("generation",1),
("include",1), ("industries",1), ("information",1), ("institutions",1), ("integration",1), ("is",2),
("knowledge",1), ("labs",1), ("make",1), ("next",1), ("of",3), ("our",3), ("out",1), ("public",1), ("rapid",1),
("sector",1), ("semantic",1), ("sense",1), ("sensitive", 1), ("startups",1), ("strength",1), ("systems",1),
("team",1), ("the",1), ("to",1), ("various",1), ("we",1), ("well",1))
describe("CountWords") {
it("creates empty output for empty input") {
val output = "output/word-count-empty.txt"
val emptyFile = tempFile("word-count")
com.twitter.scalding.Tool.main(Array(
"scalding.CountWords", "--local", "--input", emptyFile.getAbsolutePath, "--output", output))
assert (io.Source.fromFile(output).getLines().size === 0)
}
it("creates tab-delimited word/count pairs, one per line for non-empty input") {
val output = "output/word-count-test.txt"
val file = writeTempFile("word-count", testText)
com.twitter.scalding.Tool.main(Array(
"scalding.CountWords", "--local", "--input", file.getAbsolutePath, "--output", output))
val actual = io.Source.fromFile(output).getLines().toList
val expected = expectedCounts.map{ case (word, count) => s"$word\\t$count" }
assert (actual.size === expected.size)
actual.zip(expected).foreach {
case (a, e) => assert (a === e)
}
}
}
}
|
avantgarde-labs/bdug-dd-scalding
|
src/test/scala/scalding/CountWordsSpec.scala
|
Scala
|
apache-2.0
| 2,399 |
/*
* Copyright (c) 2014 Dufresne Management Consulting LLC.
*/
package com.nickelsoftware.bettercare4me.models
import org.joda.time.DateTime
import org.joda.time.Months
import org.joda.time.Years
import com.nickelsoftware.bettercare4me.utils.NickelException
import com.nickelsoftware.bettercare4me.utils.Utils.add2Map
object PatientParser {
def fromList(l: List[String]): Patient = Patient(l(0), l(1), l(2), l(3), DateTime.parse(l(4)))
}
/**
* Class representing a patient and his/her demographics.
*/
case class Patient(patientID: String, firstName: String, lastName: String, gender: String, dob: DateTime) {
def age(asOf: DateTime): Int = Years.yearsBetween(dob, asOf).getYears()
def ageInMonths(asOf: DateTime): Int = Months.monthsBetween(dob, asOf).getMonths()
def toList = List(patientID, firstName, lastName, gender, dob.toString)
}
object PatientHistoryFactory {
def createPatientHistory(patient: Patient, claims: List[Claim]): PatientHistory = {
claims.foldLeft(PatientHistory(MedMap(), RxMap(), LabMap())) { (ph, claim) =>
/*
* Mapping Medical Claim
* - medMap: MedMap is the mapping of the codes / info back to medical claims
* - specialtyCde: Map[String, List[MedClaim]],
* - hcfaPOS: Map[String, List[MedClaim]],
* - icdD: Map[String, List[MedClaim]],
* - icdP: Map[String, List[MedClaim]],
* - cpt: Map[String, List[MedClaim]],
* - tob: Map[String, List[MedClaim]],
* - ubRevenue: Map[String, List[MedClaim]],
* - hcpcs: Map[String, List[MedClaim]])
*/
def medClaim2Map(c: MedClaim): MedMap = {
// Provider specialty
val specialtyCde = if (c.specialtyCde.length() > 0) add2Map(c.specialtyCde, c, ph.specialtyCde); else ph.specialtyCde
// HCFA POS place of service
val hcfaPOS = if (c.hcfaPOS.length() > 0) add2Map(c.hcfaPOS, c, ph.hcfaPOS); else ph.hcfaPOS
// ICD primary & secondary Diagnostics
val icdD1 = if (c.icdDPri.length() > 0) add2Map(c.icdDPri, c, ph.icdD); else ph.icdD
val icdD = c.icdD.foldLeft(icdD1)((m, s) => add2Map(s, c, m))
// ICD Procedures
val icdP = c.icdP.foldLeft(ph.icdP)((m, s) => add2Map(s, c, m))
// CPT code
val cpt = if (c.cpt.length() > 0) add2Map(c.cpt, c, ph.cpt); else ph.cpt
// TOB type of bill code
val tob = if (c.tob.length() > 0) add2Map(c.tob, c, ph.tob); else ph.tob
// UB Revenue code
val ubRevenue = if (c.ubRevenue.length() > 0) add2Map(c.ubRevenue, c, ph.ubRevenue); else ph.ubRevenue
// HCPCS code
val hcpcs = if (c.hcpcs.length() > 0) add2Map(c.hcpcs, c, ph.hcpcs); else ph.hcpcs
MedMap(specialtyCde, hcfaPOS, icdD, icdP, cpt, tob, ubRevenue, hcpcs)
}
// Rx Claim
def rxClaim2Map(c: RxClaim): RxMap = {
if (c.ndc.length() > 0) RxMap(add2Map(c.ndc, c, ph.ndc))
else ph.rxMap
}
// Lab Claim
def labClaim2Map(c: LabClaim): LabMap = {
val cptLab = if (c.cpt.length() > 0) add2Map(c.cpt, c, ph.cptLab); else ph.cptLab
val loinc = if (c.loinc.length() > 0) add2Map(c.loinc, c, ph.loinc); else ph.loinc
LabMap(cptLab, loinc)
}
claim match {
case medClaim: MedClaim => PatientHistory(medClaim2Map(medClaim), ph.rxMap, ph.labMap)
case rxClaim: RxClaim => PatientHistory(ph.medMap, rxClaim2Map(rxClaim), ph.labMap)
case labClaim: LabClaim => PatientHistory(ph.medMap, ph.rxMap, labClaim2Map(labClaim))
case _ => throw NickelException("PatientHistoryFactory.createPatientHistory - Unknown claim type")
}
}
}
}
/**
* Representing the mapping of the codes / info back to medical claims
*/
case class MedMap(
specialtyCde: Map[String, List[MedClaim]] = Map(),
hcfaPOS: Map[String, List[MedClaim]] = Map(),
icdD: Map[String, List[MedClaim]] = Map(),
icdP: Map[String, List[MedClaim]] = Map(),
cpt: Map[String, List[MedClaim]] = Map(),
tob: Map[String, List[MedClaim]] = Map(),
ubRevenue: Map[String, List[MedClaim]] = Map(),
hcpcs: Map[String, List[MedClaim]] = Map())
/**
* Representing the mapping of the codes / info back to pharmacy claims
*/
case class RxMap(ndc: Map[String, List[RxClaim]] = Map())
/**
* Representing the mapping of the codes / info back to lab claims
*/
case class LabMap(cptLab: Map[String, List[LabClaim]] = Map(), loinc: Map[String, List[LabClaim]] = Map())
/**
* Class representing the clinical history of a patient.
*
* It ties key clinical codes back to claims. The data elements are:
* - medMap: MedMap is the mapping of the codes / info back to medical claims
* - specialtyCde: Map[String, List[MedClaim]],
* - hcfaPOS: Map[String, List[MedClaim]],
* - icdD: Map[String, List[MedClaim]],
* - icdP: Map[String, List[MedClaim]],
* - cpt: Map[String, List[MedClaim]],
* - tob: Map[String, List[MedClaim]],
* - ubRevenue: Map[String, List[MedClaim]],
* - hcpcs: Map[String, List[MedClaim]])
* - rxMap: RxMap is the mapping of the ndc / info back to Rx claims
* - ndc: Map[String, List[RxClaim]]
* - labMap: LabMap is the mapping of the loinc / info back to lab claims
* - cptLab: Map[String, List[LabClaim]]
* - loinc: Map[String, List[LabClaim]]
*
* @param medMap is the mapping of the codes / info back to medical claims
* @param rxMap is the mapping of the ndc / info back to Rx claims
* @param labMap is the mapping of the loinc / info back to lab claims
*/
case class PatientHistory(medMap: MedMap, rxMap: RxMap, labMap: LabMap) {
//
// pass through methods
//
def specialtyCde = medMap.specialtyCde
def hcfaPOS = medMap.hcfaPOS
def icdD = medMap.icdD
def icdP = medMap.icdP
def cpt = medMap.cpt
def tob = medMap.tob
def ubRevenue = medMap.ubRevenue
def hcpcs = medMap.hcpcs
def ndc = rxMap.ndc
def cptLab = labMap.cptLab
def loinc = labMap.loinc
//
// Access methods for making the code more readable
//
def claims4Specialty(c: String): List[MedClaim] = medMap.specialtyCde.getOrElse(c, List())
def claims4HCFAPOS(c: String): List[MedClaim] = medMap.hcfaPOS.getOrElse(c, List())
def claims4ICDD(c: String): List[MedClaim] = medMap.icdD.getOrElse(c, List())
def claims4ICDP(c: String): List[MedClaim] = medMap.icdP.getOrElse(c, List())
def claims4CPT(c: String): List[MedClaim] = medMap.cpt.getOrElse(c, List())
def claims4TOB(c: String): List[MedClaim] = medMap.tob.getOrElse(c, List())
def claims4UBRev(c: String): List[MedClaim] = medMap.ubRevenue.getOrElse(c, List())
def claims4HCPCS(c: String): List[MedClaim] = medMap.hcpcs.getOrElse(c, List())
def claims4NDC(c: String): List[RxClaim] = rxMap.ndc.getOrElse(c, List())
def claims4CPTLab(c: String): List[LabClaim] = labMap.cptLab.getOrElse(c, List())
def claims4LOINC(c: String): List[LabClaim] = labMap.loinc.getOrElse(c, List())
}
|
reactivecore01/bettercare4.me
|
play/app/com/nickelsoftware/bettercare4me/models/Patient.scala
|
Scala
|
apache-2.0
| 6,925 |
package com.faacets.yamlson
import org.scalatest.{FunSuite, Matchers}
/**
* An opinionated stack of traits to improve consistency and reduce boilerplate.
* Thanks typelevel/cats for the idea.
*/
trait YamlsonSuite extends FunSuite with Matchers
|
denisrosset/yamlson
|
src/test/scala/com.faacets.yamlson/YamlsonSuite.scala
|
Scala
|
mit
| 253 |
package com.twitter.finagle
import com.twitter.finagle.Stack.Parameterized
import com.twitter.finagle.service.ResponseClassificationSyntheticException
package object buoyant {
implicit class ParamsMaybeWith(val params: Stack.Params) extends AnyVal {
def maybeWith[T: Stack.Param](p: Option[T]): Stack.Params = {
p match {
case Some(t) => params + t
case None => params
}
}
def maybeWith(ps: Option[Stack.Params]): Stack.Params = {
ps match {
case Some(ps) => params ++ ps
case None => params
}
}
}
implicit class ParameterizedMaybeWith[P <: Parameterized[P]](val self: P) extends AnyVal {
def maybeWith(ps: Option[Stack.Params]): P = {
ps match {
case Some(params) => self.withParams(params)
case None => self
}
}
}
implicit class MaybeTransform[A](val a: A) extends AnyVal {
def maybeTransform(f: Option[A => A]): A = {
f match {
case Some(f) => f(a)
case None => a
}
}
}
/**
* Reexport ResponseClassificationSyntheticException
* publicly so it can be used in H2 `StreamStatsFilter`.
* @return a ResponseClassificationSyntheticException
*/
def syntheticException: ResponseClassificationSyntheticException = new ResponseClassificationSyntheticException()
}
|
linkerd/linkerd
|
finagle/buoyant/src/main/scala/com/twitter/finagle/buoyant/package.scala
|
Scala
|
apache-2.0
| 1,336 |
package fpinscala.laziness
import Stream._
trait Stream[+A] {
def foldRight[B](z: => B)(f: (A, => B) => B): B = // The arrow `=>` in front of the argument type `B` means that the function `f` takes its second argument by name and may choose not to evaluate it.
this match {
case Cons(h,t) => f(h(), t().foldRight(z)(f)) // If `f` doesn't evaluate its second argument, the recursion never occurs.
case _ => z
}
def exists(p: A => Boolean): Boolean =
foldRight(false)((a, b) => p(a) || b) // Here `b` is the unevaluated recursive step that folds the tail of the stream. If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early.
@annotation.tailrec
final def find(f: A => Boolean): Option[A] = this match {
case Empty => None
case Cons(h, t) => if (f(h())) Some(h()) else t().find(f)
}
/** Exercise 1 */
def toList: List[A] = this match {
case Empty => Nil
case Cons(x, xs) => x() :: xs().toList
}
/** Exercise 2 */
def take(n: Int): Stream[A] =
if (n <= 0) Empty
else this match {
case Empty => Empty
case Cons(x, xs) => Stream.cons(x(), xs().take(n - 1))
}
def drop(n: Int): Stream[A] = this match {
case Empty => Empty
case Cons(x, xs) => xs().drop(n - 1)
}
/** Exercise 3 */
def takeWhile(p: A => Boolean): Stream[A] = this match {
case Empty => Empty
case Cons(x, xs) =>
if (p(x())) Stream.cons(x(), xs().takeWhile(p))
else Empty
}
/** Exercise 4 */
def forAll(p: A => Boolean): Boolean = this.foldRight(true)( (a, b) => p(a) && b )
/** Exercise 5 */
def takeWhile1(p: A => Boolean): Stream[A] = this.foldRight(Empty: Stream[A]) {
(a, b) =>
if (p(a)) Stream.cons(a, b)
else Empty: Stream[A]
}
/** Exercise 7 */
def map[B](f: A => B): Stream[B] = this.foldRight(Empty: Stream[B]) {
(a, b) => Stream.cons(f(a), b)
}
def filter(p: A => Boolean): Stream[A] = this.foldRight(Empty: Stream[A]) {
(a, b) => if (p(a)) Stream.cons(a, b) else b
}
def append[B >: A](rest: => Stream[B]): Stream[B] = this.foldRight(rest) {
(a, b) => Stream.cons(a, b)
}
// def flatMap
/** Exercise 13 */
def map1[B](f: A => B): Stream[B] = unfold(this) {
case Empty => None
case Cons(x, xs) => Some((f(x()), xs()))
}
def take1(n: Int): Stream[A] = unfold((n, this)) {
case (n, _) if n <= 0 => None
case (_, Empty) => None
case (n, Cons(x, xs)) => Some((x(), (n - 1, xs())))
}
def takeWhile2(p: A => Boolean): Stream[A] = unfold(this) {
case Cons(x, xs) if p(x()) => Some((x(), xs()))
case _ => None
}
def zip[B](that: Stream[B]): Stream[(A, B)] = unfold( (this, that)) {
case (_, Empty) => None
case (Empty, _) => None
case (Cons(x, xs), Cons(y, ys)) => Some( ((x(), y()), (xs(), ys())))
}
def zipAll[B](that: Stream[B]): Stream[(Option[A], Option[B])] = unfold((this, that)) {
case (Cons(x, xs), Empty) => Some( (Some(x()), None), (xs(), Empty) )
case (Empty, Cons(y, ys)) => Some( (None, Some(y())), (Empty, ys()) )
case (Cons(x, xs), Cons(y, ys)) => Some( (Some(x()), Some(y())), (xs(), ys()) )
case _ => None
}
/** Exercise 5.6 */
def headOption: Option[A] = this.foldRight(None: Option[A]) {
(a, acc) => Some(a)
}
// 5.7 map, filter, append, flatmap using foldRight. Part of the exercise is
// writing your own function signatures.
/** Exercise 14 */
def startsWith[B](s: Stream[B]): Boolean = (this, s) match {
case (Cons(x, xs), Cons(y, ys)) if (x() == y()) => xs().startsWith(ys())
case _ => false
}
/** Exercise 15 */
def tails: Stream[Stream[A]] = this.foldRight(Stream.cons(Empty: Stream[A], Empty: Stream[Stream[A]])) {
(a, b) => Stream.cons(Stream.cons(a, b.headOption.get), b)
}
/** Exercise 16 */
def scanRight[B](z: => B)(f: (A, => B) => B): Stream[B] = {
val (_, bs: Stream[B]) = this.foldRight(z, Stream(z)) {
case (a, Cons(b: B, bs: Stream[B])) => {
lazy val nextZ = f(a, b)
(nextZ, Stream.cons(nextZ, bs))
}
}
bs
}
}
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
val ones: Stream[Int] = Stream.cons(1, ones)
def range(from: Int, to: Int, step: Int): Stream[Int] =
if (from + step > to) empty
else cons(from, range(from + step, to, step))
/** Exercise 8 */
def constant[A](a: A): Stream[A] = {
lazy val as = Stream.cons(a, as)
as
}
/** Exercise 9 */
def from(n: Int): Stream[Int] = Stream.cons(n, from(n + 1))
/** Exercise 10 */
def fibs: Stream[Int] = {
def fib(a: Int, b: Int): Stream[Int] = Stream.cons(a, fib(b, a + b))
fib(0, 1)
}
/** Exercise 11 */
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = f(z) match {
case None => Empty
case Some((a, s)) => Stream.cons(a, unfold(s)(f))
}
/** Exercise 12 */
def fibs1: Stream[Int] = unfold( (0, 1) ) {
case (a1, a2) => Some((a1 + a2, (a2, a1 + a2) ))
}
def from1(n: Int): Stream[Int] = unfold(n)( (s) => Some((s + 1, s + 1)) )
def constant1[A](a: A): Stream[A] = unfold(a)( (_) => Some(a, a) )
def ones1: Stream[Int] = unfold(1)( (_) => Some((1, 1)) )
}
|
lzongren/fpinscala
|
exercises/src/main/scala/fpinscala/laziness/Stream.scala
|
Scala
|
mit
| 5,555 |
package com.programmaticallyspeaking.ncd.nashorn.java9
import com.programmaticallyspeaking.ncd.host.SimpleValue
import com.programmaticallyspeaking.ncd.nashorn.RealMarshallerTestFixture
import org.scalatest.Inside
class TemplateLiteralTest extends RealMarshallerTestFixture with RunningJava9 with Inside {
"Marshalling of template literals" - {
"works for a plain one" in {
evaluateExpression("`foo`") { (_, actual) =>
actual should be (SimpleValue("foo"))
}
}
"works for one with an embedded string" in {
evaluateExpression("(function (x) { return `foo${x}`; })(42)") { (_, actual) =>
actual should be (SimpleValue("foo42"))
}
}
"works for a multiline lstring" in {
evaluateExpression("`foo\\nbar`") { (_, actual) =>
actual should be (SimpleValue("foo\\nbar"))
}
}
}
}
|
provegard/ncdbg
|
src/test/scala/com/programmaticallyspeaking/ncd/nashorn/java9/TemplateLiteralTest.scala
|
Scala
|
bsd-3-clause
| 861 |
package is.hail.types.physical
import is.hail.annotations.{Annotation, Region}
import is.hail.types.physical.stypes.concrete.{SIndexablePointer, SIndexablePointerValue}
import is.hail.types.physical.stypes.interfaces.SIndexableValue
import is.hail.types.virtual.{TDict, Type}
import org.apache.spark.sql.Row
object PCanonicalDict {
def coerceArrayCode(contents: SIndexableValue): SIndexableValue = {
contents.st match {
case SIndexablePointer(PCanonicalArray(ps: PBaseStruct, r)) =>
PCanonicalDict(ps.types(0), ps.types(1), r)
.construct(contents)
}
}
}
final case class PCanonicalDict(keyType: PType, valueType: PType, required: Boolean = false) extends PDict with PArrayBackedContainer {
val elementType = PCanonicalStruct(required = true, "key" -> keyType, "value" -> valueType)
val arrayRep: PCanonicalArray = PCanonicalArray(elementType, required)
def setRequired(required: Boolean) = if(required == this.required) this else PCanonicalDict(keyType, valueType, required)
def _asIdent = s"dict_of_${keyType.asIdent}AND${valueType.asIdent}"
override def _pretty(sb: StringBuilder, indent: Int, compact: Boolean = false) {
sb.append("PCDict[")
keyType.pretty(sb, indent, compact)
if (compact)
sb += ','
else
sb.append(", ")
valueType.pretty(sb, indent, compact)
sb.append("]")
}
override def deepRename(t: Type) = deepRenameDict(t.asInstanceOf[TDict])
private def deepRenameDict(t: TDict) =
PCanonicalDict(this.keyType.deepRename(t.keyType), this.valueType.deepRename(t.valueType), this.required)
override def unstagedStoreJavaObject(annotation: Annotation, region: Region): Long = {
val annotMap = annotation.asInstanceOf[Map[Annotation, Annotation]]
val sortedArray = annotMap.map{ case (k, v) => Row(k, v) }
.toArray
.sorted(elementType.virtualType.ordering.toOrdering)
.toIndexedSeq
this.arrayRep.unstagedStoreJavaObject(sortedArray, region)
}
def construct(contents: SIndexableValue): SIndexableValue = {
contents.st match {
case SIndexablePointer(PCanonicalArray(pbs: PBaseStruct, _))
if pbs.types.size == 2 && pbs.types(0) == keyType && pbs.types(1) == valueType =>
case t => throw new RuntimeException(s"PCDict.construct: contents=${t}, arrayrep=${arrayRep}")
}
val cont = contents.asInstanceOf[SIndexablePointerValue]
new SIndexablePointerValue(SIndexablePointer(this), cont.a, cont.length, cont.elementsAddress)
}
override def copiedType: PType = {
val copiedK = keyType.copiedType
val copiedV = valueType.copiedType
if (copiedK.eq(keyType) && copiedV.eq(valueType))
this
else
PCanonicalDict(copiedK, copiedV, required)
}
}
|
hail-is/hail
|
hail/src/main/scala/is/hail/types/physical/PCanonicalDict.scala
|
Scala
|
mit
| 2,745 |
package org.littlewings.javaee7.microprofile.service
import javax.enterprise.context.ApplicationScoped
@ApplicationScoped
class CalcService {
def add(a: Int, b: Int): Int = a + b
}
|
kazuhira-r/javaee7-scala-examples
|
wildfly-swarm-mp-jaxrs-cdi/src/main/scala/org/littlewings/javaee7/microprofile/service/CalcService.scala
|
Scala
|
mit
| 185 |
/*
* ContainerTest.scala
* Container tests.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Nov 27, 2014
*
* Copyright 2014 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.library.collection
import org.scalatest.WordSpec
import org.scalatest.Matchers
import com.cra.figaro.library.collection._
import com.cra.figaro.language._
import com.cra.figaro.util._
import com.cra.figaro.algorithm.factored.VariableElimination
import com.cra.figaro.algorithm.sampling.Importance
import com.cra.figaro.library.compound._
import com.cra.figaro.algorithm.sampling.MetropolisHastings
class ContainerTest extends WordSpec with Matchers {
"A Container" should {
"create elements in the correct universe" in {
val u1 = Universe.createNew()
val proc = createContainer(List(2,3))
val fsa1 = new FixedSizeArray(1, i => Constant(true)("", u1))
val u2 = Universe.createNew()
val fsa2 = new FixedSizeArray(1, i => Constant(false)("", u2))
val e1 = proc(2)
e1.universe should equal (u1)
val e2 = proc.get(2)
e2.universe should equal (u1)
val e3 = proc(List(2,3))(2)
e3.universe should equal (u1)
val e4 = proc.get(List(2,3))(2)
e4.universe should equal (u1)
val e5 = proc.map(!_)(2)
e5.universe should equal (u1)
val e6 = proc.chain(if (_) Flip(0.3) else Flip(0.6))(2)
e6.universe should equal (u1)
val proc2 = createContainer(List(4))
val proc3 = proc ++ proc2
// It is possible to have elements from different universes in the same process. Getting an element should produce an element from
// the correct universe
val e7 = proc3.get(2)
val e8 = proc3.get(4)
e7.universe should equal (u1)
e8.universe should equal (u2)
val e9 = proc3.map(!_)(2)
val e10 = proc3.map(!_)(4)
e9.universe should equal (u1)
e10.universe should equal (u2)
val e11 = proc3.chain(if (_) Flip(0.3) else Flip(0.6))(2)
val e12 = proc3.chain(if (_) Flip(0.3) else Flip(0.6))(4)
e11.universe should equal (u1)
e12.universe should equal (u2)
val fsa3 = fsa1.concat(fsa2)
// It is possible to have elements from different universes in the same process. Getting an element should produce an element from
// the correct universe
val e13 = fsa3.get(0)
val e14 = fsa3.get(1)
e13.universe should equal (u1)
e14.universe should equal (u2)
val e15 = fsa3.map(!_)(0)
val e16 = fsa3.map(!_)(1)
e15.universe should equal (u1)
e16.universe should equal (u2)
val e17 = fsa3.chain(if (_) Flip(0.3) else Flip(0.6))(0)
val e18 = fsa3.chain(if (_) Flip(0.3) else Flip(0.6))(1)
e17.universe should equal (u1)
e18.universe should equal (u2)
}
"create elements in the correct universe using flatMap instead of chain" in {
val u1 = Universe.createNew()
val proc = createContainer(List(2,3))
val fsa1 = new FixedSizeArray(1, i => Constant(true)("", u1))
val u2 = Universe.createNew()
val fsa2 = new FixedSizeArray(1, i => Constant(false)("", u2))
val e1 = proc(2)
e1.universe should equal (u1)
val e2 = proc.get(2)
e2.universe should equal (u1)
val e3 = proc(List(2,3))(2)
e3.universe should equal (u1)
val e4 = proc.get(List(2,3))(2)
e4.universe should equal (u1)
val e5 = proc.map(!_)(2)
e5.universe should equal (u1)
val e6 = proc.flatMap(if (_) Flip(0.3) else Flip(0.6))(2)
e6.universe should equal (u1)
val proc2 = createContainer(List(4))
val proc3 = proc ++ proc2
// It is possible to have elements from different universes in the same process. Getting an element should produce an element from
// the correct universe
val e7 = proc3.get(2)
val e8 = proc3.get(4)
e7.universe should equal (u1)
e8.universe should equal (u2)
val e9 = proc3.map(!_)(2)
val e10 = proc3.map(!_)(4)
e9.universe should equal (u1)
e10.universe should equal (u2)
val e11 = proc3.flatMap(if (_) Flip(0.3) else Flip(0.6))(2)
val e12 = proc3.flatMap(if (_) Flip(0.3) else Flip(0.6))(4)
e11.universe should equal (u1)
e12.universe should equal (u2)
val fsa3 = fsa1.concat(fsa2)
// It is possible to have elements from different universes in the same process. Getting an element should produce an element from
// the correct universe
val e13 = fsa3.get(0)
val e14 = fsa3.get(1)
e13.universe should equal (u1)
e14.universe should equal (u2)
val e15 = fsa3.map(!_)(0)
val e16 = fsa3.map(!_)(1)
e15.universe should equal (u1)
e16.universe should equal (u2)
val e17 = fsa3.flatMap(if (_) Flip(0.3) else Flip(0.6))(0)
val e18 = fsa3.flatMap(if (_) Flip(0.3) else Flip(0.6))(1)
e17.universe should equal (u1)
e18.universe should equal (u2)
}
"check range correctly" in {
Universe.createNew()
val proc = createContainer(List(2,3))
proc.rangeCheck(2) should equal (true)
proc.rangeCheck(1) should equal (false)
}
"generate the correct elements" in {
Universe.createNew()
val proc = createContainer(List(2,3))
val elems = proc.elements
assert(elems(0).isInstanceOf[AtomicFlip])
elems(0).asInstanceOf[AtomicFlip].prob should equal (0.5)
assert(elems(1).isInstanceOf[AtomicFlip])
elems(1).asInstanceOf[AtomicFlip].prob should be ((1.0/3) +- 0.0000000001)
elems.length should equal (2)
}
"generate the same elements each time" in {
Universe.createNew()
val proc = createContainer(List(2,3))
val elems1 = proc.elements
val elems2 = proc.elements
elems1(0) should equal (elems2(0))
elems1(1) should equal (elems2(1))
}
"generate the correct map" in {
Universe.createNew()
val proc = createContainer(List(2,3))
}
"when mapping, have each point mapped according to the function" in {
Universe.createNew()
val proc = createContainer(List(2,3)).map(!_)
val elem = proc(3)
VariableElimination.probability(elem, true) should be ((2.0 / 3) +- 0.000000001)
}
"when chaining, have each point flatMapped according to the function" in {
Universe.createNew()
val proc = createContainer(List(2,3)).chain(if (_) Flip(0.3) else Flip(0.6))
val elem = proc(3)
VariableElimination.probability(elem, true) should be ((1.0 / 3 * 0.3 + 2.0 / 3 * 0.6) +- 0.000000001)
}
"(use flatMap instead of chain) when chaining, have each point flatMapped according to the function" in {
Universe.createNew()
val proc = createContainer(List(2,3)).flatMap(if (_) Flip(0.3) else Flip(0.6))
val elem = proc(3)
VariableElimination.probability(elem, true) should be ((1.0 / 3 * 0.3 + 2.0 / 3 * 0.6) +- 0.000000001)
}
"when appending, have all the elements of both processes, with the second process replacing the first when necessary" in {
Universe.createNew()
val proc1 = createContainer(List(2,3))
val proc2 = createContainer(List(3,4), true)
val proc = proc1 ++ proc2
val elem2 = proc(2)
val elem3 = proc(3)
val elem4 = proc(4)
an [proc.IndexOutOfRangeException] should be thrownBy { proc(1) }
val alg = VariableElimination(elem2, elem3, elem4)
alg.start()
alg.probability(elem2, true) should be (0.5 +- 0.000001)
alg.probability(elem3, true) should be (2.0 / 3.0 +- 0.00000001) // inverted
alg.probability(elem4, true) should be (3.0 / 4.0 +- 0.00000001) // inverted
alg.kill()
}
"when folding or reducing, have the points folded according to the function" in {
Universe.createNew()
val proc = createContainer(List(2,3))
val elem1 = proc.foldLeft(true)((b1: Boolean, b2: Boolean) => b1 && b2)
val elem2 = proc.foldRight(true)((b1: Boolean, b2: Boolean) => b1 && b2)
val elem3 = proc.reduce((b1: Boolean, b2: Boolean) => b1 && b2)
val elem4 = proc.aggregate(true)((b1: Boolean, b2: Boolean) => !b1 || b2, (b1: Boolean, b2: Boolean) => b1 && b2)
val alg = Importance(10000, elem1, elem2, elem3, elem4)
alg.start()
alg.probability(elem1, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
alg.probability(elem2, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
alg.probability(elem3, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
alg.probability(elem4, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
alg.kill()
VariableElimination.probability(elem1, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
MetropolisHastings.probability(elem1, true) should be (((1.0 / 2.0) * (1.0 / 3.0)) +- 0.02)
}
"when quantifying over elements satisfying a predicate, produce the right answer" in {
Universe.createNew()
val proc = createContainer(List(2,3))
val elem1 = proc.count((b: Boolean) => b)
val elem2 = proc.exists((b: Boolean) => b)
val elem3 = proc.forall((b: Boolean) => b)
val alg = Importance(10000, elem1, elem2, elem3)
alg.start()
val p0 = 1.0 / 2 * 2.0 / 3
val p1 = 1.0 / 2 * 1.0 / 3 + 1.0 / 2 * 2.0 / 3
val p2 = 1.0 / 2 * 1.0 / 3
alg.probability(elem1, 0) should be (p0 +- 0.02)
alg.probability(elem1, 1) should be (p1 +- 0.02)
alg.probability(elem1, 2) should be (p2 +- 0.02)
alg.probability(elem2, true) should be ((p1 + p2) +- 0.02)
alg.probability(elem3, true) should be (p2 +- 0.02)
alg.kill()
}
"when finding the index of the first element, produce the right answer" in {
Universe.createNew()
val proc = createContainer(List(2,3))
val elem = proc.findIndex((b: Boolean) => b)
val alg = Importance(10000, elem)
alg.start()
val p2 = 1.0 / 2.0
val p3 = (1 - p2) * 1.0 / 3.0
val pNone = 1 - p2 - p3
alg.probability(elem, Some(2)) should be (p2 +- 0.02)
alg.probability(elem, Some(3)) should be (p3 +- 0.02)
alg.probability(elem, None) should be (pNone +- 0.02)
alg.kill()
}
"when concatenating, have all the elements of both processes, with the second process following the first" in {
Universe.createNew()
val fsa1 = new FixedSizeArray(2, i => Constant(i))
val fsa2 = new FixedSizeArray(2, i => Constant(i + 2))
val fsa = fsa1.concat(fsa2)
fsa.indices.toList should equal (List(0, 1, 2, 3))
val e0 = fsa(0)
val e1 = fsa(1)
val e2 = fsa(2)
val e3 = fsa(3)
assert(e0.isInstanceOf[Constant[Int]])
e0.asInstanceOf[Constant[Int]].constant should equal (0)
assert(e2.isInstanceOf[Constant[Int]])
e2.asInstanceOf[Constant[Int]].constant should equal (2)
}
"when choosing a random element, choose one of the elements uniformly at random" in {
Universe.createNew()
val fsa = new FixedSizeArray(2, i => Constant(i))
val elem = fsa.randomElement
VariableElimination.probability(elem, 1) should be (0.5 +- 0.00000001)
}
"when choosing two random elements, have them be independent" in {
Universe.createNew()
val fsa = new FixedSizeArray(2, i => Constant(i))
val elem1 = fsa.randomElement()
val elem2 = fsa.randomElement()
val eq = elem1 === elem2
VariableElimination.probability(eq, true) should be (0.5 +- 0.000000001)
}
}
def createContainer(is: List[Int], invert: Boolean = false): Container[Int, Boolean] = new Container[Int, Boolean] {
val universe = Universe.universe
val indices = is
def generate(index: Int) = if (invert) Flip(1.0 - 1.0 / index)("", universe) else Flip(1.0 / index)("", universe)
def generate(indices: List[Int]) = {
val unary = for {
index <- indices
} yield (index, generate(index))
val map = Map(unary:_*)
val binary =
for {
index1 <- indices
index2 <- indices
if index1 < index2
} yield {
val elem1 = map(index1)
val elem2 = map(index2)
val pair = ^^(elem1, elem2)("", universe)
pair.addConstraint((pair: (Boolean, Boolean)) => if (pair._1 != pair._2) 1.0 / (index1 + index2) else 1.0)
pair
}
Map(unary:_*)
}
}
}
|
jyuhuan/figaro
|
Figaro/src/test/scala/com/cra/figaro/test/library/collection/ContainerTest.scala
|
Scala
|
bsd-3-clause
| 12,596 |
package com.arcusys.learn.liferay
import com.liferay.asset.kernel.exception.NoSuchVocabularyException
import com.liferay.asset.kernel.model.{AssetEntry, BaseAssetRenderer, BaseAssetRendererFactory, _}
import com.liferay.blogs.kernel.model.BlogsEntry
import com.liferay.bookmarks.model.BookmarksEntry
import com.liferay.calendar.model.CalendarBooking
import com.liferay.document.library.kernel.model.DLFileEntry
import com.liferay.journal.model.JournalArticle
import com.liferay.mail.kernel.model.MailMessage
import com.liferay.portal.kernel.bean.BeanLocator
import com.liferay.portal.kernel.dao.orm.DynamicQuery
import com.liferay.portal.kernel.events.SimpleAction
import com.liferay.portal.kernel.exception.{NoSuchGroupException, NoSuchLayoutException, NoSuchRoleException, _}
import com.liferay.portal.kernel.language.UTF8Control
import com.liferay.portal.kernel.messaging.{Message, MessageListener}
import com.liferay.portal.kernel.model.{User, _}
import com.liferay.portal.kernel.notifications.BaseUserNotificationHandler
import com.liferay.portal.kernel.portlet.{LiferayPortletRequest, LiferayPortletResponse, LiferayPortletSession}
import com.liferay.portal.kernel.search.{Hits, HitsOpenSearchImpl, SearchContext, Summary, _}
import com.liferay.portal.kernel.security.permission.PermissionChecker
import com.liferay.portal.kernel.service.{BaseLocalService, ServiceContext}
import com.liferay.portal.kernel.struts.{BaseStrutsAction, StrutsAction}
import com.liferay.portal.kernel.theme.ThemeDisplay
import com.liferay.portal.kernel.upgrade.UpgradeProcess
import com.liferay.portal.kernel.util.{UnicodeProperties, WebKeys}
import com.liferay.ratings.kernel.model.{RatingsEntry, RatingsStats}
import com.liferay.social.kernel.model.{SocialActivity, SocialActivityFeedEntry}
import com.liferay.message.boards.kernel.model.MBMessage
import com.liferay.portal.kernel.security.auth.PrincipalException
import com.liferay.wiki.model.WikiPage
object LiferayClasses {
type LAssetEntry = AssetEntry
type LBeanLocator = BeanLocator
type LAssetRenderer = AssetRenderer[Any]
type LBaseAssetRenderer = BaseAssetRenderer[Any]
type LBaseAssetRendererFactory = BaseAssetRendererFactory[Any]
type LBaseIndexer[T] = BaseIndexer[T]
type LIndexer[T] = Indexer[T]
type LBaseLocalService = BaseLocalService
type LBaseModel[T] = BaseModel[T]
type LBaseModelListener[T <: LBaseModel[T]] = BaseModelListener[T]
type LBaseStrutsAction = BaseStrutsAction
type LBooleanQuery = BooleanQuery
type LDocument = Document
type LDocumentImpl = DocumentImpl
type LDynamicQuery = DynamicQuery
type LGroup = com.arcusys.learn.liferay.model.LGroup
type LAssetVocabulary = AssetVocabulary
type LHits = Hits
type LHitsOpenSearchImpl = HitsOpenSearchImpl
type LLayout = Layout
type LLayoutSetPrototype = LayoutSetPrototype
type LLayoutTypePortlet = LayoutTypePortlet
type LLiferayPortletRequest = LiferayPortletRequest
type LLiferayPortletResponse = LiferayPortletResponse
type LPermissionChecker = PermissionChecker
type LSearchContext = SearchContext
type LServiceContext = ServiceContext
type LSocialActivity = SocialActivity
type LSocialActivityFeedEntry = SocialActivityFeedEntry
type LStrutsAction = StrutsAction
type LSummary = Summary
type LThemeDisplay = ThemeDisplay
type LTheme = Theme
type LUnicodeProperties = UnicodeProperties
type LUpgradeProcess = UpgradeProcess
type LUser = User
type LSimpleAction = SimpleAction
type LMessageListener = MessageListener
type LMessage = Message
type LBaseUserNotificationHandler = BaseUserNotificationHandler
type LUserNotificationEvent = UserNotificationEvent
type LJournalArticle = JournalArticle
type LRatingsStats = RatingsStats
type LRatingsEntry = RatingsEntry
type LAssetCategory = AssetCategory
type LOrganization = Organization
//Liferay Activities
type LBlogsEntry = BlogsEntry
type LDLFileEntry = DLFileEntry
type LWikiPage = WikiPage
type LMBMessage = MBMessage
type LCalendarBooking = CalendarBooking
type LBookmarksEntry = BookmarksEntry
type LAddress = Address
type LCompany = Company
type LMailMessage = MailMessage
type LMembershipRequest = MembershipRequest
type LRole = Role
// Exceptions
type LNoSuchRoleException = NoSuchRoleException
type LNoSuchGroupException = NoSuchGroupException
type LNoSuchLayoutException = NoSuchLayoutException
type LNoSuchUserException = NoSuchUserException
type LNoSuchResourceActionException = NoSuchResourceActionException
type LNoSuchVocabularyException = NoSuchVocabularyException
type LNoSuchCompanyException = NoSuchCompanyException
type LGroupFriendlyURLException = GroupFriendlyURLException
type LDuplicateGroupException = DuplicateGroupException
type LLayoutFriendlyURLException = LayoutFriendlyURLException
type LNoSuchResourcePermissionException = NoSuchResourcePermissionException
type LMustBeAuthenticatedException = PrincipalException.MustBeAuthenticated
type LUTF8Control = UTF8Control
object LUTF8Control {
val instance: LUTF8Control = UTF8Control.INSTANCE
}
object LWebKeys {
val ThemeDisplay = WebKeys.THEME_DISPLAY
}
object LLiferayPortletSession {
val LayoutSeparator = LiferayPortletSession.LAYOUT_SEPARATOR
}
object LLayoutFriendlyURLExceptionHelper {
val ADJACENT_SLASHES = LayoutFriendlyURLException.ADJACENT_SLASHES
val DOES_NOT_START_WITH_SLASH = LayoutFriendlyURLException.DOES_NOT_START_WITH_SLASH
val DUPLICATE = LayoutFriendlyURLException.DUPLICATE
val ENDS_WITH_SLASH = LayoutFriendlyURLException.ENDS_WITH_SLASH
val INVALID_CHARACTERS = LayoutFriendlyURLException.INVALID_CHARACTERS
val KEYWORD_CONFLICT = LayoutFriendlyURLException.KEYWORD_CONFLICT
val POSSIBLE_DUPLICATE = LayoutFriendlyURLException.POSSIBLE_DUPLICATE
val TOO_DEEP = LayoutFriendlyURLException.TOO_DEEP
val TOO_LONG = LayoutFriendlyURLException.TOO_LONG
val TOO_SHORT = LayoutFriendlyURLException.TOO_SHORT
}
}
|
arcusys/Valamis
|
learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/LiferayClasses.scala
|
Scala
|
gpl-3.0
| 5,993 |
package org.atnos.eff.concurrent
import scala.concurrent.duration.FiniteDuration
/**
* The design of the Scheduler is taken from:
* https://github.com/functional-streams-for-scala/fs2/blob/series/1.0/core/jvm/src/main/scala/fs2/Scheduler.scala
*/
trait Scheduler {
def schedule(timedout: =>Unit, duration: FiniteDuration): () => Unit
}
|
etorreborre/eff
|
shared/src/main/scala/org/atnos/eff/concurrent/Scheduler.scala
|
Scala
|
mit
| 345 |
object Test {
implicit class Foo(sc: StringContext) {
object q {
def apply(arg: Any*): Int = 3
}
}
def f = {
val _parent = 3
q"val hello = $_parent"
q"class $_" // error // error
} // error
}
|
som-snytt/dotty
|
tests/neg/i1779.scala
|
Scala
|
apache-2.0
| 227 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.dsl.window.impl
import java.time.{Duration, Instant}
import org.apache.gearpump.Message
import org.apache.gearpump.streaming.dsl.api.functions.ReduceFunction
import org.apache.gearpump.streaming.MockUtil
import org.apache.gearpump.streaming.dsl.plan.functions.FoldRunner
import org.apache.gearpump.streaming.dsl.window.api.SessionWindows
import org.apache.gearpump.streaming.source.Watermark
import org.scalatest.{Matchers, PropSpec}
import org.scalatest.mock.MockitoSugar
import org.scalatest.prop.PropertyChecks
class DefaultWindowRunnerSpec extends PropSpec with PropertyChecks
with Matchers with MockitoSugar {
property("DefaultWindowRunner should handle SessionWindow") {
val data = List(
Message(("foo", 1L), Instant.ofEpochMilli(1L)),
Message(("foo", 1L), Instant.ofEpochMilli(15L)),
Message(("foo", 1L), Instant.ofEpochMilli(25L)),
Message(("foo", 1L), Instant.ofEpochMilli(26L))
)
type KV = (String, Long)
implicit val system = MockUtil.system
val reduce = ReduceFunction[KV]((kv1, kv2) => (kv1._1, kv1._2 + kv2._2))
val windows = SessionWindows.apply(Duration.ofMillis(4L))
val windowRunner = new WindowOperator[KV, Option[KV]](windows,
new FoldRunner[KV, Option[KV]](reduce, "reduce"))
data.foreach(m => windowRunner.foreach(TimestampedValue(m.value.asInstanceOf[KV], m.timestamp)))
windowRunner.trigger(Watermark.MAX).outputs.toList shouldBe
List(
TimestampedValue(Some(("foo", 1)), Instant.ofEpochMilli(4)),
TimestampedValue(Some(("foo", 1)), Instant.ofEpochMilli(18)),
TimestampedValue(Some(("foo", 2)), Instant.ofEpochMilli(29))
)
}
}
|
manuzhang/incubator-gearpump
|
streaming/src/test/scala/org/apache/gearpump/streaming/dsl/window/impl/DefaultWindowRunnerSpec.scala
|
Scala
|
apache-2.0
| 2,512 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package imports
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.Key
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.caches.ScalaShortNamesCacheManager
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.usages._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.base.types.ScSimpleTypeElementImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.ScImportStmtStub
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{ScDesignatorType, ScSubstitutor}
import org.jetbrains.plugins.scala.lang.resolve.processor._
import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, StdKinds}
import scala.annotation.tailrec
import scala.collection.immutable.Set
import scala.collection.mutable
/**
* @author Alexander Podkhalyuzin
* Date: 20.02.2008
*/
class ScImportStmtImpl extends ScalaStubBasedElementImpl[ScImportStmt] with ScImportStmt {
def this(node: ASTNode) = {this(); setNode(node)}
def this(stub: ScImportStmtStub) = {this(); setStub(stub); setNode(null)}
override def toString: String = "ScImportStatement"
import com.intellij.psi.scope._
def importExprs: Array[ScImportExpr] =
getStubOrPsiChildren(ScalaElementTypes.IMPORT_EXPR, JavaArrayFactoryUtil.ScImportExprFactory)
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
val importsIterator = importExprs.takeWhile(_ != lastParent).reverseIterator
while (importsIterator.hasNext) {
val importExpr = importsIterator.next()
ProgressManager.checkCanceled()
def workWithImportExpr: Boolean = {
val ref = importExpr.reference match {
case Some(element) => element
case _ => return true
}
val nameHint = processor.getHint(NameHint.KEY)
val name = if (nameHint == null) "" else nameHint.getName(state)
if (name != "" && !importExpr.singleWildcard) {
val decodedName = ScalaPsiUtil.convertMemberName(name)
importExpr.selectorSet match {
case Some(set) => set.selectors.exists(selector => ScalaPsiUtil.convertMemberName(selector.reference.refName) == decodedName)
case None => if (ScalaPsiUtil.convertMemberName(ref.refName) != decodedName) return true
}
}
val checkWildcardImports = processor match {
case r: ResolveProcessor =>
if (!r.checkImports()) return true
r.checkWildcardImports()
case _ => true
}
val exprQual: ScStableCodeReferenceElement = importExpr.selectorSet match {
case Some(_) => ref
case None if importExpr.singleWildcard => ref
case None => ref.qualifier.getOrElse(return true)
}
val resolve: Array[ResolveResult] = ref.multiResolve(false)
//todo: making lazy next two definitions leads to compiler failure
val poOpt = () => exprQual.bind() match {
case Some(ScalaResolveResult(p: PsiJavaPackage, _)) =>
Option(ScalaShortNamesCacheManager.getInstance(getProject).getPackageObjectByName(p.getQualifiedName, getResolveScope))
case _ => None
}
val exprQualRefType = () => ScSimpleTypeElementImpl.calculateReferenceType(exprQual, shapesOnly = false)
def checkResolve(resolve: ResolveResult): Boolean = {
resolve match {
case ScalaResolveResult(elem, _) =>
PsiTreeUtil.getContextOfType(elem, true, classOf[ScTypeDefinition]) match {
case obj: ScObject if obj.isPackageObject => true
case _ => false
}
case _ => false
}
}
def calculateRefType(checkPo: => Boolean) = {
exprQual.bind() match {
case Some(ScalaResolveResult(p: PsiJavaPackage, _)) =>
poOpt() match {
case Some(po) =>
if (checkPo) {
po.getType(TypingContext.empty)
} else Failure("no failure", Some(this))
case _ => Failure("no failure", Some(this))
}
case _ => exprQualRefType()
}
}
val resolveIterator = resolve.iterator
while (resolveIterator.hasNext) {
val next = resolveIterator.next()
val (elem, importsUsed, s) = next match {
case s: ScalaResolveResult =>
@tailrec
def getFirstReference(ref: ScStableCodeReferenceElement): ScStableCodeReferenceElement = {
ref.qualifier match {
case Some(qual) => getFirstReference(qual)
case _ => ref
}
}
(s.getElement,
getFirstReference(exprQual).bind().fold(s.importsUsed)(r => r.importsUsed ++ s.importsUsed),
s.substitutor)
case r: ResolveResult => (r.getElement, Set[ImportUsed](), ScSubstitutor.empty)
}
(elem, processor) match {
case (pack: PsiJavaPackage, complProc: CompletionProcessor) if complProc.includePrefixImports =>
val settings: ScalaCodeStyleSettings = ScalaCodeStyleSettings.getInstance(getProject)
val prefixImports = settings.getImportsWithPrefix.filter(s =>
!s.startsWith(ScalaCodeStyleSettings.EXCLUDE_PREFIX) &&
s.substring(0, s.lastIndexOf(".")) == pack.getQualifiedName
)
val excludeImports = settings.getImportsWithPrefix.filter(s =>
s.startsWith(ScalaCodeStyleSettings.EXCLUDE_PREFIX) &&
s.substring(ScalaCodeStyleSettings.EXCLUDE_PREFIX.length, s.lastIndexOf(".")) == pack.getQualifiedName
)
val names = new mutable.HashSet[String]()
for (prefixImport <- prefixImports) {
names += prefixImport.substring(prefixImport.lastIndexOf('.') + 1)
}
val excludeNames = new mutable.HashSet[String]()
for (prefixImport <- excludeImports) {
excludeNames += prefixImport.substring(prefixImport.lastIndexOf('.') + 1)
}
val wildcard = names.contains("_")
def isOK(name: String): Boolean = {
if (wildcard) !excludeNames.contains(name)
else names.contains(name)
}
val newImportsUsed = Set(importsUsed.toSeq: _*) + ImportExprUsed(importExpr)
val newState = state.put(ScalaCompletionUtil.PREFIX_COMPLETION_KEY, true).put(ImportUsed.key, newImportsUsed)
elem.processDeclarations(new BaseProcessor(StdKinds.stableImportSelector) {
def execute(element: PsiElement, state: ResolveState): Boolean = {
element match {
case elem: PsiNamedElement if isOK(elem.name) => processor.execute(element, state)
case _ => true
}
}
}, newState, this, place)
case _ =>
}
val subst = state.get(ScSubstitutor.key).toOption.getOrElse(ScSubstitutor.empty).followed(s)
ProgressManager.checkCanceled()
importExpr.selectorSet match {
case None =>
// Update the set of used imports
val newImportsUsed = Set(importsUsed.toSeq: _*) + ImportExprUsed(importExpr)
var newState: ResolveState = state.put(ImportUsed.key, newImportsUsed).put(ScSubstitutor.key, subst)
val refType = calculateRefType(checkResolve(next))
refType.foreach { tp =>
newState = newState.put(BaseProcessor.FROM_TYPE_KEY, tp)
}
if (importExpr.singleWildcard) {
if (!checkWildcardImports) return true
(elem, processor) match {
case (cl: PsiClass, processor: BaseProcessor) if !cl.isInstanceOf[ScTemplateDefinition] =>
if (!processor.processType(new ScDesignatorType(cl, true), place,
newState)) return false
case (_, processor: BaseProcessor) if refType.isDefined =>
if (!processor.processType(refType.get, place, newState)) return false
case _ => if (!elem.processDeclarations(processor, newState, this, place)) return false
}
} else if (!processor.execute(elem, newState)) return false
case Some(set) =>
val shadowed: mutable.HashSet[(ScImportSelector, PsiElement)] = mutable.HashSet.empty
set.selectors foreach {
selector =>
ProgressManager.checkCanceled()
val selectorResolve: Array[ResolveResult] = selector.reference.multiResolve(false)
selectorResolve foreach { result =>
if (selector.isAliasedImport && selector.importedName != selector.reference.refName) {
//Resolve the name imported by selector
//Collect shadowed elements
shadowed += ((selector, result.getElement))
var newState: ResolveState = state
newState = state.put(ResolverEnv.nameKey, selector.importedName)
newState = newState.put(ImportUsed.key, Set(importsUsed.toSeq: _*) + ImportSelectorUsed(selector)).
put(ScSubstitutor.key, subst)
calculateRefType(checkResolve(result)).foreach {tp =>
newState = newState.put(BaseProcessor.FROM_TYPE_KEY, tp)
}
if (!processor.execute(result.getElement, newState)) {
return false
}
}
}
}
// There is total import from stable id
// import a.b.c.{d=>e, f=>_, _}
if (set.hasWildcard) {
if (!checkWildcardImports) return true
processor match {
case bp: BaseProcessor =>
ProgressManager.checkCanceled()
val p1 = new BaseProcessor(bp.kinds) {
override def getHint[T](hintKey: Key[T]): T = processor.getHint(hintKey)
override def isImplicitProcessor: Boolean = bp.isImplicitProcessor
override def handleEvent(event: PsiScopeProcessor.Event, associated: Object) {
processor.handleEvent(event, associated)
}
override def getClassKind: Boolean = bp.getClassKind
override def setClassKind(b: Boolean) {
bp.setClassKind(b)
}
override def execute(element: PsiElement, state: ResolveState): Boolean = {
if (shadowed.exists(p => element == p._2)) return true
var newState = state.put(ScSubstitutor.key, subst)
def isElementInPo: Boolean = {
PsiTreeUtil.getContextOfType(element, true, classOf[ScTypeDefinition]) match {
case obj: ScObject if obj.isPackageObject => true
case _ => false
}
}
calculateRefType(isElementInPo).foreach {tp =>
newState = newState.put(BaseProcessor.FROM_TYPE_KEY, tp)
}
processor.execute(element, newState)
}
}
val newImportsUsed: Set[ImportUsed] = Set(importsUsed.toSeq: _*) + ImportWildcardSelectorUsed(importExpr)
var newState: ResolveState = state.put(ImportUsed.key, newImportsUsed).put(ScSubstitutor.key, subst)
(elem, processor) match {
case (cl: PsiClass, processor: BaseProcessor) if !cl.isInstanceOf[ScTemplateDefinition] =>
calculateRefType(checkResolve(next)).foreach {tp =>
newState = newState.put(BaseProcessor.FROM_TYPE_KEY, tp)
}
if (!processor.processType(new ScDesignatorType(cl, true), place, newState)) return false
case _ =>
if (!elem.processDeclarations(p1,
// In this case import optimizer should check for used selectors
newState,
this, place)) return false
}
case _ => true
}
}
//wildcard import first, to show that this imports are unused if they really are
set.selectors foreach {
selector =>
ProgressManager.checkCanceled()
val selectorResolve: Array[ResolveResult] = selector.reference.multiResolve(false)
selectorResolve foreach { result =>
var newState: ResolveState = state
if (!selector.isAliasedImport || selector.importedName == selector.reference.refName) {
val rSubst = result match {
case result: ScalaResolveResult => result.substitutor
case _ => ScSubstitutor.empty
}
newState = newState.put(ImportUsed.key, Set(importsUsed.toSeq: _*) + ImportSelectorUsed(selector)).
put(ScSubstitutor.key, subst.followed(rSubst))
calculateRefType(checkResolve(result)).foreach {tp =>
newState = newState.put(BaseProcessor.FROM_TYPE_KEY, tp)
}
if (!processor.execute(result.getElement, newState)) {
return false
}
}
}
}
}
}
true
}
if (!workWithImportExpr) return false
}
true
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/imports/ScImportStmtImpl.scala
|
Scala
|
apache-2.0
| 15,102 |
object Foo {
def foo(): Int = {
val f: Int ?=> Int = (using x: Int) => 2 * x
f(using 2)
}
val f = implicit (x: Int) => x
((using x: Int) => x): (Int ?=> Int) // error: no implicit argument found
}
|
som-snytt/dotty
|
tests/neg/i2514a.scala
|
Scala
|
apache-2.0
| 216 |
/*
* Copyright (C) 2010 Peter Lewerin
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo
package staging
import edu.umd.cs.piccolo.PNode
import edu.umd.cs.piccolo.nodes.PPath
import edu.umd.cs.piccolo.util.PBounds
import edu.umd.cs.piccolo.activities.PActivity
import net.kogics.kojo.util.Utils
import net.kogics.kojo.core.Point
import java.awt.Color
import math._
object Impl {
val canvas = SpriteCanvas.instance
val turtle0 = canvas.turtle0
val figure0 = canvas.figure0
}
/** Staging API
*
* This object contains the API for using Staging within Kojo scripts.
*
* DISCLAIMER
*
* Parts of this interface is written to approximately conform to the
* Processing API as described in the reference at
* <URL: http://processing.org/reference/>.
* The implementation code is the work of Peter Lewerin
* (<[email protected]>) and is not in any way derived from the
* Processing source. */
object API {
//W#summary Developer home-page for the Staging Module
//W
//W=Introduction=
//W
//WThe Staging Module is currently being developed by Peter Lewerin.
//WThe original impetus came from a desire to run Processing-style code in Kojo.
//W
//WAt this point, the shape hierarchy is the most complete part, but
//Wutilities for color definition, time keeping etc are being added.
//W
//W=Examples=
//W
//W * StagingHelloKojoExample
//W * StagingArrayExample
//W * StagingArrayTwoDeeExample
//W * StagingClockExample
//W * StagingColorWheelExample
//W * StagingCreatingColorsExample
//W * StagingDifferenceOfTwoSquaresExample
//W * StagingEasingExample
//W * StagingHueSaturationBrightnessExample
//W * StagingSineOfAnAngleExample
//W
//W=Overview=
//W
//W==Points==
//W
//WStaging uses {{{net.kogics.kojo.core.Point}}} for coordinates.
//W
//T PointTest begins
def point(x: Double, y: Double) = Point(x, y)
implicit def tupleDDToPoint(tuple: (Double, Double)) = Point(tuple._1, tuple._2)
implicit def tupleDIToPoint(tuple: (Double, Int)) = Point(tuple._1, tuple._2)
implicit def tupleIDToPoint(tuple: (Int, Double)) = Point(tuple._1, tuple._2)
implicit def tupleIIToPoint(tuple: (Int, Int)) = Point(tuple._1, tuple._2)
//implicit def baseShapeToPoint(b: BaseShape) = b.origin
//implicit def awtPointToPoint(p: java.awt.geom.Point2D) = Point(p.getX, p.getY)
//implicit def awtDimToPoint(d: java.awt.geom.Dimension2D) = Point(d.getWidth, d.getHeight)
/** The point of origin, located at a corner of the user screen if
* `screenSize` has been called, or the middle of the screen otherwise. */
val O = Point(0, 0)
//T PointTest ends
//W
//W==User Screen==
//W
//WThe zoom level and axis orientations can be set using `screenSize`.
//W
//T ScreenMethodsTest begins
def screenWidth = Screen.rect.getWidth.toInt
def screenHeight = Screen.rect.getHeight.toInt
def screenSize(width: Int, height: Int) = Screen.size(width, height)
/** The middle point of the user screen, or (0, 0) if `screenSize` hasn't
* been called. */
def screenMid = Screen.rect.getCenter2D
/** The extreme point of the user screen (i.e. the opposite corner from
* the point of origin), or (0, 0) if `screenSize` hasn't been called. */
def screenExt = Screen.rect.getExt
/** Fills the user screen with the specified color. */
def background(bc: Color) = {
withStyle(bc, null, 1) { rectangle(O, screenExt) }
}
//T ScreenMethodsTest ends
//W
//W==Simple shapes and text==
//W
//WGiven `Point`s or _x_ and _y_ coordinate values, simple shapes like dots,
//Wlines, rectangles, ellipses, and elliptic arcs can be drawn. Texts can
//Walso be placed in this way.
//W
//T SimpleShapesTest begins
def dot(x: Double, y: Double) = Dot(Point(x, y))
def dot(p: Point) = Dot(p)
def line(x1: Double, y1: Double, x2: Double, y2: Double) =
Line(Point(x1, y1), Point(x2, y2))
def line(p1: Point, p2: Point) =
Line(p1, p2)
def vector(x1: Double, y1: Double, x2: Double, y2: Double, a: Double) =
Vector(Point(x1, y1), Point(x2, y2), a)
def vector(p1: Point, p2: Point, a: Double) =
Vector(p1, p2, a)
def rectangle(x: Double, y: Double, w: Double, h: Double) =
Rectangle(Point(x, y), Point(x + w, y + h))
def rectangle(p: Point, w: Double, h: Double) =
Rectangle(p, Point(p.x + w, p.y + h))
def rectangle(p1: Point, p2: Point) =
Rectangle(p1, p2)
def square(x: Double, y: Double, s: Double) =
Rectangle(Point(x, y), Point(x + s, y + s))
def square(p: Point, s: Double) =
Rectangle(p, Point(p.x + s, p.y + s))
def roundRectangle(
x: Double, y: Double,
w: Double, h: Double,
rx: Double, ry: Double
) =
RoundRectangle(Point(x, y), Point(x + w, y + h), Point(rx, ry))
def roundRectangle(
p: Point,
w: Double, h: Double,
rx: Double, ry: Double
) =
RoundRectangle(p, Point(p.x + w, p.y + h), Point(rx, ry))
def roundRectangle(p1: Point, p2: Point, rx: Double, ry: Double) =
RoundRectangle(p1, p2, Point(rx, ry))
def roundRectangle(p1: Point, p2: Point, p3: Point) =
RoundRectangle(p1, p2, p3)
def ellipse(cx: Double, cy: Double, rx: Double, ry: Double) =
Ellipse(Point(cx, cy), Point(cx + rx, cy + ry))
def ellipse(p: Point, rx: Double, ry: Double) =
Ellipse(p, Point(p.x + rx, p.y + ry))
def ellipse(p1: Point, p2: Point) =
Ellipse(p1, p2)
def circle(x: Double, y: Double, r: Double) =
Ellipse(Point(x, y), Point(x + r, y + r))
def circle(p: Point, r: Double) =
Ellipse(p, Point(p.x + r, p.y + r))
def arc(cx: Double, cy: Double, rx: Double, ry: Double, s: Double, e: Double) =
Arc(Point(cx, cy), Point(cx + rx, cy + ry), s, e, java.awt.geom.Arc2D.PIE)
def arc(p: Point, rx: Double, ry: Double, s: Double, e: Double) =
Arc(p, Point(p.x + rx, p.y + ry), s, e, java.awt.geom.Arc2D.PIE)
def arc(p1: Point, p2: Point, s: Double, e: Double) =
Arc(p1, p2, s, e, java.awt.geom.Arc2D.PIE)
def pieslice(cx: Double, cy: Double, rx: Double, ry: Double, s: Double, e: Double) =
Arc(Point(cx, cy), Point(cx + rx, cy + ry), s, e, java.awt.geom.Arc2D.PIE)
def pieslice(p: Point, rx: Double, ry: Double, s: Double, e: Double) =
Arc(p, Point(p.x + rx, p.y + ry), s, e, java.awt.geom.Arc2D.PIE)
def pieslice(p1: Point, p2: Point, s: Double, e: Double) =
Arc(p1, p2, s, e, java.awt.geom.Arc2D.PIE)
def openArc(cx: Double, cy: Double, rx: Double, ry: Double, s: Double, e: Double) =
Arc(Point(cx, cy), Point(cx + rx, cy + ry), s, e, java.awt.geom.Arc2D.OPEN)
def openArc(p: Point, rx: Double, ry: Double, s: Double, e: Double) =
Arc(p, Point(p.x + rx, p.y + ry), s, e, java.awt.geom.Arc2D.OPEN)
def openArc(p1: Point, p2: Point, s: Double, e: Double) =
Arc(p1, p2, s, e, java.awt.geom.Arc2D.OPEN)
def chord(cx: Double, cy: Double, rx: Double, ry: Double, s: Double, e: Double) =
Arc(Point(cx, cy), Point(cx + rx, cy + ry), s, e, java.awt.geom.Arc2D.CHORD)
def chord(p: Point, rx: Double, ry: Double, s: Double, e: Double) =
Arc(p, Point(p.x + rx, p.y + ry), s, e, java.awt.geom.Arc2D.CHORD)
def chord(p1: Point, p2: Point, s: Double, e: Double) =
Arc(p1, p2, s, e, java.awt.geom.Arc2D.CHORD)
def text(s: String, x: Double, y: Double) = Text(s, Point(x, y))
def text(s: String, p: Point) = Text(s, p)
def star(cx: Double, cy: Double, inner: Double, outer: Double, points: Int) =
Star(Point(cx, cy), inner, outer, points)
def star(p: Point, inner: Double, outer: Double, points: Int) =
Star(p, inner, outer, points)
def star(p1: Point, p2: Point, p3: Point, points: Int) =
Star(p1, dist(p1, p2), dist(p1, p3), points)
def cross(p1: Point, p2: Point, cw: Double, r: Double = 1, greek: Boolean = false) =
Cross(p1, p2, cw, r, greek)
def crossOutline(p1: Point, p2: Point, cw: Double, r: Double = 1, greek: Boolean = false) =
CrossOutline(p1, p2, cw, r, greek)
def saltire(p1: Point, p2: Point, s: Double) = Saltire(p1, p2, s)
def saltireOutline(p1: Point, p2: Point, s: Double) = SaltireOutline(p1, p2, s)
//T SimpleShapesTest ends
//W
//W==Complex Shapes==
//W
//WGiven a sequence of `Point`s, a number of complex shapes can be drawn,
//Wincluding basic polylines and polygons, and patterns of polylines/polygons.
//W
//T ComplexShapesTest begins
def polyline(pts: Seq[Point]) = Polyline(pts)
def polygon(pts: Seq[Point]): Polygon = Polygon(pts)
def triangle(p0: Point, p1: Point, p2: Point) = polygon(Seq(p0, p1, p2))
def quad(p0: Point, p1: Point, p2: Point, p3: Point) =
polygon(Seq(p0, p1, p2, p3))
def linesShape(pts: Seq[Point]) = LinesShape(pts)
def trianglesShape(pts: Seq[Point]) = TrianglesShape(pts)
def triangleStripShape(pts: Seq[Point]) = TriangleStripShape(pts)
def quadsShape(pts: Seq[Point]) = QuadsShape(pts)
def quadStripShape(pts: Seq[Point]) = QuadStripShape(pts)
def triangleFanShape(p0: Point, pts: Seq[Point]) = TriangleFanShape(p0, pts)
//T ComplexShapesTest ends
//W
//W==SVG Shapes==
//W
//WGiven an SVG element, the corresponding shape can be drawn.
//W
//T SvgShapesTest begins
def svgShape(node: scala.xml.Node) = SvgShape(node)
//T SvgShapesTest ends
//W
//W==Color==
//W
//WColor values can be created with the method `color`, or using a
//W_color-maker_. The methods `fill`, `noFill`,
//W`stroke`, and `noStroke` set the colors used to draw the insides and edges
//Wof figures. The method `strokeWidth` doesn't actually affect color but is
//Wtypically used together with the color setting methods. The method
//W`withStyle` allows the user to set fill color, stroke color, and stroke
//Wwidth temporarily.
//W
//W
//T ColorTest begins
def grayColors(grayMax: Int) =
ColorMaker(GRAY(grayMax))
def grayColorsWithAlpha(grayMax: Int, alphaMax: Int) =
ColorMaker(GRAYA(grayMax, alphaMax))
def rgbColors(rMax: Int, gMax: Int, bMax: Int) =
ColorMaker(RGB(rMax, gMax, bMax))
def rgbColorsWithAlpha(rMax: Int, gMax: Int, bMax: Int, alphaMax: Int) =
ColorMaker(RGBA(rMax, gMax, bMax, alphaMax))
def hsbColors(hMax: Int, sMax: Int, bMax: Int) =
ColorMaker(HSB(hMax, sMax, bMax))
def namedColor(s: String) = ColorMaker.color(s)
def fill(c: Color) = Impl.figure0.setFillColor(c)
def noFill() = Impl.figure0.setFillColor(null)
def stroke(c: Color) = Impl.figure0.setPenColor(c)
def noStroke() = Impl.figure0.setPenColor(null)
def strokeWidth(w: Double) = Impl.figure0.setPenThickness(w)
def withStyle(fc: Color, sc: Color, sw: Double)(body: => Unit) =
Style(fc, sc, sw)(body)
implicit def ColorToRichColor (c: java.awt.Color) = RichColor(c)
def lerpColor(from: RichColor, to: RichColor, amt: Double) =
RichColor.lerpColor(from, to, amt)
//T ColorTest ends
Utils.runInSwingThread {
Inputs.init()
}
//W
//W==Timekeeping==
//W
//WA number of methods report the current time.
//W
//T TimekeepingTest begins
def millis = System.currentTimeMillis()
import java.util.Calendar
def second = Calendar.getInstance().get(Calendar.SECOND)
def minute = Calendar.getInstance().get(Calendar.MINUTE)
def hour = Calendar.getInstance().get(Calendar.HOUR_OF_DAY)
def day = Calendar.getInstance().get(Calendar.DAY_OF_MONTH)
def month = Calendar.getInstance().get(Calendar.MONTH) + 1
def year = Calendar.getInstance().get(Calendar.YEAR)
//T UtilsTest ends
//W
//W==Math==
//W
//WA number of methods perform number processing tasks.
//W
//T MathTest begins
def constrain(value: Double, min: Double, max: Double) =
Math.constrain(value, min, max)
def norm(value: Double, low: Double, high: Double) =
Math.map(value, low, high, 0, 1)
def map(value: Double, low1: Double, high1: Double, low2: Double, high2: Double) =
Math.map(value, low1, high1, low2, high2)
def lerp(value1: Double, value2: Double, amt: Double) =
Math.lerp(value1, value2, amt)
def sq(x: Double) = x * x
def dist(x0: Double, y0: Double, x1: Double, y1: Double) =
sqrt(sq(x1 - x0) + sq(y1 - y0))
def dist(p1: Point, p2: Point) =
sqrt(sq(p2.x - p1.x) + sq(p2.y - p1.y))
def mag(x: Double, y: Double) = dist(0, 0, x, y)
def mag(p: Point) = dist(0, 0, p.x, p.y)
//W
//W==Trigonometry==
//W
//WA number of methods perform trigonometry tasks.
//W
val PI = math.Pi
val TWO_PI = 2*PI
val HALF_PI = PI/2
val QUARTER_PI = PI/4
def sin(a: Double) = math.sin(a)
def cos(a: Double) = math.cos(a)
def tan(a: Double) = math.tan(a)
def radians(deg: Double) = deg.toRadians
def degrees(rad: Double) = rad.toDegrees
//T MathTest ends
//W
//W==Control==
//W
//WThere are methods for execution control and mouse feedback.
//W
//T ControlTest begins
def loop(fn: => Unit) = Impl.figure0.refresh(fn)
def stop() = Impl.figure0.stopRefresh()
def reset() = {
Impl.canvas.clear()
Impl.canvas.turtle0.invisible()
}
def wipe() = Impl.figure0.fgClear()
def mouseX() = Inputs.stepMousePos.x
def mouseY() = Inputs.stepMousePos.y
def pmouseX() = Inputs.prevMousePos.x
def pmouseY() = Inputs.prevMousePos.y
val LEFT = 1
val CENTER = 2
val RIGHT = 3
def mouseButton = Inputs.mouseBtn
def mousePressed = Inputs.mousePressedFlag
def interpolatePolygon(pts1: Seq[Point], pts2: Seq[Point], n: Int) {
require(pts1.size == pts2.size, "The polygons don't match up.")
var g0 = polygon(pts1)
for (i <- 0 to n ; amt = i / n.toFloat) {
val pts = (pts1 zip pts2) map { case(p1, p2) =>
point(lerp(p1.x, p2.x, amt), lerp(p1.y, p2.y, amt))
}
g0.hide
g0 = polygon(pts)
for (i <- 0 to 10) {
net.kogics.kojo.util.Throttler.throttle()
}
}
}
//T ControlTest ends
//W
//W=Usage=
//W
} // end of API
abstract class ColorModes
case class RGB(r: Int, g: Int, b: Int) extends ColorModes
case class RGBA(r: Int, g: Int, b: Int, a: Int) extends ColorModes
case class HSB(h: Int, s: Int, b: Int) extends ColorModes
case class HSBA(h: Int, s: Int, b: Int, a: Int) extends ColorModes
case class GRAY(v: Int) extends ColorModes
case class GRAYA(v: Int, a: Int) extends ColorModes
object Point {
def apply(x: Double, y: Double) = new Point(x, y)
def unapply(p: Point) = Some((p.x, p.y))
}
//T ShapeMethodsTest begins
trait Shape {
def node: PNode
var sizeFactor = 1.
var orientation = 90.
def hide() {
Utils.runInSwingThread {
node.setVisible(false)
}
Impl.canvas.repaint()
}
def show() {
Utils.runInSwingThread {
node.setVisible(true)
}
Impl.canvas.repaint()
}
def fill_=(color: Color) {
Utils.runInSwingThread {
node.setPaint(color)
node.repaint()
}
}
def fill = Utils.runInSwingThreadAndWait {
node.getPaint
}
def rotate(amount: Double) = {
Utils.runInSwingThread {
val p = node.getFullBounds.getCenter2D
node.rotateAboutPoint(amount.toRadians, p)
node.repaint()
}
orientation = (orientation + amount + 360) % 360
}
def rotateTo(angle: Double) = {
rotate(angle - orientation)
}
def scale(amount: Double) = {
Utils.runInSwingThread {
node.scale(amount)
node.repaint()
}
sizeFactor *= amount
}
def scaleTo(size: Double) = {
scale(size / sizeFactor)
}
def translate(p: Point) = {
Utils.runInSwingThread {
node.offset(p.x, p.y)
node.repaint()
}
}
def offset = Utils.runInSwingThreadAndWait {
val o = node.getOffset
Point(o.getX, o.getY)
}
// def addActivity(a: PActivity) = Impl.canvas.getRoot.addActivity(a)
}
//T ShapeMethodsTest ends
trait Rounded {
val curvature: Point
def radiusX = curvature.x
def radiusY = curvature.y
}
trait BaseShape extends Shape {
val origin: Point
}
trait StrokedShape extends BaseShape {
val path: PPath
def node = path
def stroke_=(color: Color) {
Utils.runInSwingThread {
node.setStrokePaint(color)
node.repaint()
}
}
def stroke = Utils.runInSwingThreadAndWait {
node.getStrokePaint
}
}
trait SimpleShape extends StrokedShape {
val endpoint: Point
def width = endpoint.x - origin.x
def height = endpoint.y - origin.y
}
trait Elliptical extends SimpleShape with Rounded {
val curvature = endpoint - origin
override def width = 2 * radiusX
override def height = 2 * radiusY
}
class Text(val text: String, val origin: Point) extends BaseShape {
import java.awt.Font
val tnode = new edu.umd.cs.piccolo.nodes.PText(text)
def node = tnode
Utils.runInSwingThread {
node.getTransformReference(true).setToScale(1, -1)
node.setOffset(origin.x, origin.y)
val font = new Font(node.getFont.getName, Font.PLAIN, 14)
node.setFont(font)
}
override def toString = "Staging.Text(" + text + ", " + origin + ")"
}
object Text {
def apply(s: String, p: Point) = {
val shape = new Text(s, p)
Impl.figure0.pnode(shape.node)
shape
}
}
trait PolyShape extends BaseShape {
val points: Seq[Point]
val origin = points(0)
//def toPolygon: Polygon = Polygon(points)
//def toPolyline: Polyline = Polyline(points)
}
trait CrossShape {
val xdims = Array.fill(8){0.}
val ydims = Array.fill(8){0.}
def crossDims(len: Double, wid: Double, cw: Double, r: Double = 1, greek: Boolean = false) = {
require(wid / 2 > cw)
require(len / 2 > cw)
val a = (wid - cw) / 2
val b = a / (if (greek) 1 else r)
val c = cw / 6
val d = c / 2
xdims(1) = a - c
xdims(2) = a
xdims(3) = a + d
xdims(4) = a + cw - d
xdims(5) = a + cw
xdims(6) = a + cw + c
xdims(7) = len
ydims(1) = b - c
ydims(2) = b
ydims(3) = b + d
ydims(4) = b + cw - d
ydims(5) = b + cw
ydims(6) = b + cw + c
ydims(7) = wid
this
}
def points() = List(
Point(xdims(0), ydims(5)), Point(xdims(2), ydims(5)),
Point(xdims(2), ydims(7)), Point(xdims(5), ydims(7)),
Point(xdims(5), ydims(5)), Point(xdims(7), ydims(5)),
Point(xdims(7), ydims(2)), Point(xdims(5), ydims(2)),
Point(xdims(5), ydims(0)), Point(xdims(2), ydims(0)),
Point(xdims(2), ydims(2)), Point(xdims(0), ydims(2))
)
def outlinePoints() = List(
Point(xdims(0), ydims(6)), Point(xdims(1), ydims(6)), Point(xdims(1), ydims(7)),
Point(xdims(3), ydims(7)), Point(xdims(3), ydims(4)), Point(xdims(0), ydims(4)),
Point(xdims(6), ydims(7)), Point(xdims(6), ydims(6)), Point(xdims(7), ydims(6)),
Point(xdims(7), ydims(4)), Point(xdims(4), ydims(4)), Point(xdims(4), ydims(7)),
Point(xdims(7), ydims(1)), Point(xdims(6), ydims(1)), Point(xdims(6), ydims(0)),
Point(xdims(4), ydims(0)), Point(xdims(4), ydims(3)), Point(xdims(7), ydims(3)),
Point(xdims(1), ydims(0)), Point(xdims(1), ydims(1)), Point(xdims(0), ydims(1)),
Point(xdims(0), ydims(3)), Point(xdims(3), ydims(3)), Point(xdims(3), ydims(0))
)
}
class Composite(val shapes: Seq[Shape]) extends Shape {
val node = new PNode
Utils.runInSwingThread {
shapes foreach { shape =>
node.addChild(shape.node)
}
}
override def toString = "Staging.Group(" + shapes.mkString(",") + ")"
}
object Composite {
def apply(shapes: Seq[Shape]) = {
new Composite(shapes)
}
}
object Style {
val savedStyles =
new scala.collection.mutable.Stack[(Color, Color, java.awt.Stroke)]()
val f = Impl.figure0
def save {
Utils.runInSwingThread {
savedStyles push Tuple3(f.fillColor, f.lineColor, f.lineStroke)
}
}
def restore {
Utils.runInSwingThread {
if (savedStyles nonEmpty) {
val (fc, sc, st) = savedStyles.pop
f.setFillColor(fc)
f.setPenColor(sc)
f.setLineStroke(st)
}
}
}
def apply(fc: Color, sc: Color, sw: Double)(body: => Unit) = {
save
f.setFillColor(fc)
f.setPenColor(sc)
f.setPenThickness(sw)
try { body }
finally { restore }
}
}
class Bounds(x1: Double, y1: Double, x2: Double, y2: Double) {
val bounds = Utils.runInSwingThreadAndWait {
new PBounds(x1, y1, x2 - x1, y2 - y1)
}
def getWidth = Utils.runInSwingThreadAndWait { bounds.getWidth }
def getHeight = Utils.runInSwingThreadAndWait { bounds.getHeight }
def getOrigin = Utils.runInSwingThreadAndWait {
val p = bounds.getOrigin
Point(p.getX, p.getY)
}
def getCenter2D = Utils.runInSwingThreadAndWait {
val p = bounds.getCenter2D
Point(p.getX, p.getY)
}
def getExt = Utils.runInSwingThreadAndWait {
val p = bounds.getOrigin
Point(p.getX + bounds.getWidth, p.getY + bounds.getHeight)
}
def resetToZero = Utils.runInSwingThreadAndWait { bounds.resetToZero }
def inset(dx: Double, dy: Double) = Utils.runInSwingThreadAndWait {
bounds.inset(dx, dy)
}
def setRect(x1: Double, y1: Double, x2: Double, y2: Double) {
Utils.runInSwingThread {
bounds.setRect(x1, y1, x2 - x1, y2 - y1)
}
}
// Utils.runInSwingThread {
}
object Bounds {
def apply(b: PBounds) = Utils.runInSwingThreadAndWait {
val x = b.getX
val y = b.getY
val w = b.getWidth
val h = b.getHeight
new Bounds(x, y, x + w, y + h)
}
def apply(x1: Double, y1: Double, x2: Double, y2: Double) =
new Bounds(x1, y1, x2, y2)
}
|
richardfontana/fontana2007-t
|
KojoEnv/src/net/kogics/kojo/staging/staging.scala
|
Scala
|
gpl-3.0
| 21,679 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.util.hadoop
import java.io.IOException
import java.io.InputStream
import org.apache.hadoop.fs.FSInputStream
class HttpInputStream(is: InputStream) extends FSInputStream {
val lock: AnyRef = new Object
var pos: Long = 0
override def seek(pos: Long) = throw new IOException("Seek not supported");
override def getPos: Long = pos
override def seekToNewSource(targetPos: Long): Boolean = throw new IOException("Seek not supported");
override def read: Int = {
lock.synchronized {
var byteRead = is.read()
if (byteRead >= 0) {
pos += 1
}
byteRead
}
}
}
|
fredji97/samza
|
samza-yarn/src/main/scala/org/apache/samza/util/hadoop/HttpInputStream.scala
|
Scala
|
apache-2.0
| 1,443 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.models.json
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}
class StandardSpec
extends WordSpec
with Matchers
with ScalaFutures
|
deepsense-io/seahorse-workflow-executor
|
workflowjson/src/test/scala/io/deepsense/models/json/StandardSpec.scala
|
Scala
|
apache-2.0
| 795 |
package core.api
import com.lvxingpai.model.marketplace.misc.Coupon
import com.lvxingpai.model.marketplace.order.{ Order, OrderActivity }
import core.exception.ResourceNotFoundException
import core.formatter.marketplace.order.OrderFormatter
import core.payment.{ AlipayService, PaymentService, WeChatPaymentService }
import core.service.ViaeGateway
import org.bson.types.ObjectId
import org.joda.time.DateTime
import org.mongodb.morphia.Datastore
import org.mongodb.morphia.query.UpdateResults
import play.api.Play
import play.api.Play.current
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
/**
* Created by topy on 2015/10/22.
*/
object OrderAPI {
/**
* 如果订单处于未支付的时候, 刷新订单的支付状态
* @param order
* @return
*/
def refreshOrderPayment(order: Order): Future[Order] = {
order.status match {
case "pending" =>
val paymentInfo = Option(order.paymentInfo) map mapAsScalaMap getOrElse scala.collection.mutable.Map()
// 以尾递归的形式, 查看具体支付渠道的支付结果
val itr = paymentInfo.iterator
// 查看某个具体的渠道
def refreshSinglePayment(): Future[Order] = {
val entry = itr.next()
val result: Future[Order] = entry._1 match {
case s if s == PaymentService.Provider.Alipay.toString =>
AlipayService.instance.refreshPaymentStatus(order)
case s if s == PaymentService.Provider.WeChat.toString =>
WeChatPaymentService.instance.refreshPaymentStatus(order)
case _ => Future(order) // 如果既不是微信, 也不是支付宝, 直接返回自身
}
result flatMap (order => {
if (order.status == "pending") {
// 依然处于待支付的状态, 尝试刷新下一个渠道
if (itr.hasNext) refreshSinglePayment()
else Future(order)
} else {
// 已经不再是待支付状态了
Future(order)
}
})
}
if (itr.hasNext) refreshSinglePayment()
else Future(order)
case _ =>
Future(order)
}
}
/**
* 将某个订单设置为已支付
*
* @param orderId 订单号
* @param provider 支付渠道
*/
def setPaid(orderId: Long, provider: PaymentService.Provider.Value)(implicit ds: Datastore): Future[Unit] = {
val providerName = provider.toString
// 设置activity
val act = new OrderActivity
act.action = OrderActivity.Action.pay.toString
act.timestamp = DateTime.now().toDate
act.prevStatus = Order.Status.Pending.toString
// 设置payment状态
val paymentQuery = ds.createQuery(classOf[Order]) field "orderId" equal orderId field
s"paymentInfo.$providerName" notEqual null
val paymentOps = ds.createUpdateOperations(classOf[Order]).set(s"paymentInfo.$providerName.paid", true)
// 如果订单还处于pending, 则将其设置为paid, 同时更新订单的expireDate
val statusQuery = ds.createQuery(classOf[Order]) field "orderId" equal orderId field
s"paymentInfo.$providerName" notEqual null field "status" equal "pending"
val statusOps = ds.createUpdateOperations(classOf[Order]).set("status", "paid").add("activities", act)
.set("expireDate", DateTime.now().plusDays(1).toDate)
val ret: Future[Seq[UpdateResults]] = Future.sequence(Seq(
Future {
ds.update(paymentQuery, paymentOps)
}, Future {
val updateResult = ds.update(statusQuery, statusOps)
if (updateResult.getUpdatedCount == 1) {
// 真正的从pending到paid, 触发onPayOrder事件
getOrder(orderId) flatMap (value => {
val order = value.get
val viae = Play.application.injector instanceOf classOf[ViaeGateway]
val orderNode = OrderFormatter.instance.formatJsonNode(order)
viae.sendTask("viae.event.marketplace.onPayOrder", kwargs = Some(Map("order" -> orderNode)))
})
}
updateResult
}
))
ret map (_ => ())
}
/**
* 根据订单id查询订单信息
* @param orderId 订单id
* @return 订单信息
*/
def getOrder(orderId: Long)(implicit ds: Datastore): Future[Option[Order]] = {
Future {
Option(ds.find(classOf[Order], "orderId", orderId).get)
}
}
/**
* 根据订单id查询订单信息. 和getOrder不同的是, 如果无法查找到对应的记录, 该方法会抛出异常
* @param orderId 订单id
* @return
*/
def fetchOrder(orderId: Long)(implicit ds: Datastore): Future[Order] = {
getOrder(orderId) map (_ getOrElse {
throw ResourceNotFoundException(s"Cannot find order #$orderId")
})
}
/**
* 根据订单id查询订单信息
* @param orderId 订单id
* @param fields 查询哪些字段
* @return
*/
def getOrder(orderId: Long, fields: Seq[String])(implicit ds: Datastore): Future[Option[Order]] = {
Future {
Option(ds.find(classOf[Order], "orderId", orderId).retrievedFields(true, fields: _*).get)
}
}
/**
*
* @param orderId
* @param ds
* @return
*/
def getOrderOnlyStatus(orderId: Long)(implicit ds: Datastore): Future[Order] = {
Future {
ds.find(classOf[Order], "orderId", orderId).retrievedFields(true, Seq("consumerId", "status"): _*).get
}
}
/**
* 根据用户id获取订单列表
* 如果订单状态为空, 获取所在用户下的所有的订单列表
* 如果订单状态不为空, 获取所在用户下的某个订单状态的订单列表
* @param userId 用户id
* @param status 订单状态
* @return 订单列表
*/
def getOrderList(userId: Option[Long], sellerId: Option[Long], status: Option[String], start: Int, count: Int, fields: Seq[String] = Seq())(implicit ds: Datastore): Future[Seq[Order]] = {
Future {
val query = ds.createQuery(classOf[Order])
if (userId.nonEmpty)
query.field("consumerId").equal(userId.get)
if (sellerId.nonEmpty)
query.field("commodity.seller.sellerId").equal(sellerId.get)
query.order("-id").offset(start).limit(count) //生成时间逆序
if (status.nonEmpty)
query.field("status").in(status.get.split(",").toSeq)
if (fields.nonEmpty)
query.retrievedFields(true, fields: _*)
query.asList()
}
}
def getOrderCnt(userId: Option[Long], sellerId: Option[Long], status: Option[String])(implicit ds: Datastore): Future[Long] = {
Future {
val query = ds.createQuery(classOf[Order])
if (userId.nonEmpty)
query.field("consumerId").equal(userId.get)
if (sellerId.nonEmpty)
query.field("commodity.seller.sellerId").equal(sellerId.get)
if (status.nonEmpty) {
val queryList = status.get.split(",").toSeq
query.field("status").in(queryList)
}
query.countAll()
}
}
/**
* 获得优惠券信息
* @param id
* @return
*/
def getCoupon(id: ObjectId)(implicit ds: Datastore): Future[Option[Coupon]] = {
Future {
val query = ds.createQuery(classOf[Coupon]).field("id").equal(id)
Option(query.get)
}
}
/**
* 获得某个用户的可用优惠券列表
* @param userId
* @param ds
* @return
*/
def getCouponList(userId: Long)(implicit ds: Datastore): Future[Seq[Coupon]] = {
Future {
val query = ds.createQuery(classOf[Coupon]).field("userId").equal(userId).field("available").equal(true)
query.asList
}
}
// def createCouponTemp(userId: Long)(implicit ds: Datastore): Future[Unit] = {
// Future {
// val c = new BasicCoupon()
// c.threshold = 100
// c.desc = "清明节优惠卷"
// c.discount = 1
// c.expire = new Date()
// c.available = true
// c.userId = userId
// ds.save[Coupon](c)
// }
// }
}
|
Lvxingpai/Hanse
|
app/core/api/OrderAPI.scala
|
Scala
|
apache-2.0
| 7,989 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.logical
import org.apache.calcite.plan._
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.convert.ConverterRule
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.logical.LogicalJoin
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rex.RexNode
import org.apache.flink.table.plan.nodes.FlinkConventions
import scala.collection.JavaConverters._
class FlinkLogicalJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
left: RelNode,
right: RelNode,
condition: RexNode,
joinType: JoinRelType)
extends Join(cluster, traitSet, left, right, condition, Set.empty[CorrelationId].asJava, joinType)
with FlinkLogicalRel {
override def copy(
traitSet: RelTraitSet,
conditionExpr: RexNode,
left: RelNode,
right: RelNode,
joinType: JoinRelType,
semiJoinDone: Boolean): Join = {
new FlinkLogicalJoin(cluster, traitSet, left, right, conditionExpr, joinType)
}
override def computeSelfCost (planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val leftRowCnt = metadata.getRowCount(getLeft)
val leftRowSize = estimateRowSize(getLeft.getRowType)
val rightRowCnt = metadata.getRowCount(getRight)
val rightRowSize = estimateRowSize(getRight.getRowType)
val ioCost = (leftRowCnt * leftRowSize) + (rightRowCnt * rightRowSize)
val cpuCost = leftRowCnt + rightRowCnt
val rowCnt = leftRowCnt + rightRowCnt
planner.getCostFactory.makeCost(rowCnt, cpuCost, ioCost)
}
}
private class FlinkLogicalJoinConverter
extends ConverterRule(
classOf[LogicalJoin],
Convention.NONE,
FlinkConventions.LOGICAL,
"FlinkLogicalJoinConverter") {
override def matches(call: RelOptRuleCall): Boolean = {
val join: LogicalJoin = call.rel(0).asInstanceOf[LogicalJoin]
val joinInfo = join.analyzeCondition
hasEqualityPredicates(join, joinInfo) || isSingleRowInnerJoin(join)
}
override def convert(rel: RelNode): RelNode = {
val join = rel.asInstanceOf[LogicalJoin]
val traitSet = rel.getTraitSet.replace(FlinkConventions.LOGICAL)
val newLeft = RelOptRule.convert(join.getLeft, FlinkConventions.LOGICAL)
val newRight = RelOptRule.convert(join.getRight, FlinkConventions.LOGICAL)
new FlinkLogicalJoin(
rel.getCluster,
traitSet,
newLeft,
newRight,
join.getCondition,
join.getJoinType)
}
private def hasEqualityPredicates(join: LogicalJoin, joinInfo: JoinInfo): Boolean = {
// joins require an equi-condition or a conjunctive predicate with at least one equi-condition
// and disable outer joins with non-equality predicates(see FLINK-5520)
!joinInfo.pairs().isEmpty && (joinInfo.isEqui || join.getJoinType == JoinRelType.INNER)
}
private def isSingleRowInnerJoin(join: LogicalJoin): Boolean = {
if (join.getJoinType == JoinRelType.INNER) {
isSingleRow(join.getRight) || isSingleRow(join.getLeft)
} else {
false
}
}
/**
* Recursively checks if a [[RelNode]] returns at most a single row.
* Input must be a global aggregation possibly followed by projections or filters.
*/
private def isSingleRow(node: RelNode): Boolean = {
node match {
case ss: RelSubset => isSingleRow(ss.getOriginal)
case lp: Project => isSingleRow(lp.getInput)
case lf: Filter => isSingleRow(lf.getInput)
case lc: Calc => isSingleRow(lc.getInput)
case la: Aggregate => la.getGroupSet.isEmpty
case _ => false
}
}
}
object FlinkLogicalJoin {
val CONVERTER: ConverterRule = new FlinkLogicalJoinConverter()
}
|
hwstreaming/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/logical/FlinkLogicalJoin.scala
|
Scala
|
apache-2.0
| 4,531 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.spark.sql
import scala.language.implicitConversions
/**
* Makes Spark SQLContext accepts AsterixDB queries.
*/
package object asterix {
implicit def toSparkSQLContextFunctions(sqlContext: SQLContext): SQLContextFunctions =
new SQLContextFunctions(sqlContext)
}
|
Nullification/asterixdb-spark-connector
|
src/main/scala/org/apache/spark/sql/asterix/package.scala
|
Scala
|
apache-2.0
| 1,098 |
package org.jetbrains.plugins.scala
package annotator
package gutter
import java.util
import javax.swing.Icon
import com.intellij.codeHighlighting.Pass
import com.intellij.codeInsight.daemon.{DaemonCodeAnalyzerSettings, LineMarkerInfo, LineMarkerProvider}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.editor.colors.{CodeInsightColors, EditorColorsManager}
import com.intellij.openapi.editor.markup.{GutterIconRenderer, SeparatorPlacement}
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.TextRange
import com.intellij.psi._
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.util.NullableFunction
import org.jetbrains.plugins.scala.annotator.gutter.GutterIcons._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTrait, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.search.ScalaOverridingMemberSearcher
import org.jetbrains.plugins.scala.lang.psi.types.Signature
import scala.collection.mutable.ArrayBuffer
import scala.collection.{Seq, mutable}
/**
* User: Alexander Podkhalyuzin
* Date: 31.10.2008
*/
class ScalaLineMarkerProvider(daemonSettings: DaemonCodeAnalyzerSettings, colorsManager: EditorColorsManager)
extends LineMarkerProvider with ScalaSeparatorProvider {
def getLineMarkerInfo(element: PsiElement): LineMarkerInfo[_ <: PsiElement] = {
if (!element.isValid) return null
val gator = getGatorInfo(element)
if(daemonSettings.SHOW_METHOD_SEPARATORS && isSeparatorNeeded(element)) {
if(gator == null) {
addSeparatorInfo(createMarkerInfo(element))
} else {
addSeparatorInfo(gator)
}
} else {
gator
}
}
def createMarkerInfo(element: PsiElement): LineMarkerInfo[PsiElement] = {
val leaf = Option(PsiTreeUtil.firstChild(element)).getOrElse(element)
new LineMarkerInfo[PsiElement](
leaf, leaf.getTextRange, null, Pass.UPDATE_ALL,
NullableFunction.NULL.asInstanceOf[com.intellij.util.Function[PsiElement,String]],
null, GutterIconRenderer.Alignment.RIGHT)
}
def addSeparatorInfo(info: LineMarkerInfo[_ <: PsiElement]): LineMarkerInfo[_ <: PsiElement] = {
info.separatorColor = colorsManager.getGlobalScheme.getColor(CodeInsightColors.METHOD_SEPARATORS_COLOR)
info.separatorPlacement = SeparatorPlacement.TOP
info
}
def getGatorInfo(element: PsiElement): LineMarkerInfo[_ <: PsiElement] = {
if (element.getNode.getElementType == ScalaTokenTypes.tIDENTIFIER) {
val range = element.getTextRange
if (element.getParent.isInstanceOf[ScReferenceElement]) return null // e.g type A = /*Int*/
def getParent: PsiElement = {
var e = element
def test(x: PsiElement) = x match {
case _: ScFunction | _: ScValue | _: ScVariable | _: ScTypeDefinition | _: ScTypeAlias => true
case _ => false
}
while (e != null && !test(e)) e = e.getParent
e
}
def marker(element: PsiElement, icon: Icon, typez: ScalaMarkerType): LineMarkerInfo[PsiElement] =
new LineMarkerInfo[PsiElement](element, range, icon, Pass.UPDATE_ALL, typez.fun, typez.handler, GutterIconRenderer.Alignment.LEFT)
val parent = getParent
if (parent == null) return null
def containsNamedElement(holder: ScDeclaredElementsHolder) = holder.declaredElements.exists(_.asInstanceOf[ScNamedElement].nameId == element)
(parent, parent.getParent) match {
case (method: ScFunction, _: ScTemplateBody) if method.nameId == element =>
val signatures: Seq[Signature] = mutable.HashSet[Signature](method.superSignaturesIncludingSelfType: _*).toSeq
val icon = if (GutterUtil.isOverrides(method, signatures)) OVERRIDING_METHOD_ICON else IMPLEMENTING_METHOD_ICON
val typez = ScalaMarkerType.OVERRIDING_MEMBER
if (signatures.nonEmpty) {
return marker(method.nameId, icon, typez)
}
case (x: ScValueOrVariable, _: ScTemplateBody)
if containsNamedElement(x.asInstanceOf[ScDeclaredElementsHolder]) =>
val signatures = new ArrayBuffer[Signature]
val bindings = x match {case v: ScDeclaredElementsHolder => v.declaredElements case _ => return null}
for (z <- bindings) signatures ++= ScalaPsiUtil.superValsSignatures(z, withSelfType = true)
val icon = if (GutterUtil.isOverrides(x, signatures)) OVERRIDING_METHOD_ICON else IMPLEMENTING_METHOD_ICON
val typez = ScalaMarkerType.OVERRIDING_MEMBER
if (signatures.nonEmpty) {
return marker(x.keywordToken, icon, typez)
}
case (x: ScObject, _: ScTemplateBody) if x.nameId == element =>
val signatures = ScalaPsiUtil.superValsSignatures(x, withSelfType = true)
val icon = if (GutterUtil.isOverrides(x, signatures)) OVERRIDING_METHOD_ICON else IMPLEMENTING_METHOD_ICON
val typez = ScalaMarkerType.OVERRIDING_MEMBER
if (signatures.nonEmpty) {
return marker(x.getObjectToken, icon, typez)
}
case (td : ScTypeDefinition, _: ScTemplateBody) if !td.isObject =>
val signature = ScalaPsiUtil.superTypeMembers(td, withSelfType = true)
val icon = IMPLEMENTING_METHOD_ICON
val typez = ScalaMarkerType.OVERRIDING_MEMBER
if (signature.nonEmpty) {
return marker(td.getObjectClassOrTraitToken, icon, typez)
}
case (ta : ScTypeAlias, _: ScTemplateBody) =>
val signature = ScalaPsiUtil.superTypeMembers(ta, withSelfType = true)
val icon = IMPLEMENTING_METHOD_ICON
val typez = ScalaMarkerType.OVERRIDING_MEMBER
if (signature.nonEmpty) {
return marker(ta.getTypeToken, icon, typez)
}
case _ =>
}
parent match {
case method: ScFunctionDefinition if method.nameId == element =>
method.recursionType match {
case RecursionType.OrdinaryRecursion =>
return new LineMarkerInfo[PsiElement](method.nameId, range, RECURSION_ICON, Pass.UPDATE_ALL,
(e: PsiElement) => "Method '%s' is recursive".format(e.getText), null, GutterIconRenderer.Alignment.LEFT)
case RecursionType.TailRecursion =>
return new LineMarkerInfo[PsiElement](method.nameId, range, TAIL_RECURSION_ICON, Pass.UPDATE_ALL,
(e: PsiElement) => "Method '%s' is tail recursive".format(e.getText), null, GutterIconRenderer.Alignment.LEFT)
case RecursionType.NoRecursion => // no markers
}
case _ =>
}
}
null
}
def collectSlowLineMarkers(elements: util.List[PsiElement], result: util.Collection[LineMarkerInfo[_ <: PsiElement]]) {
ApplicationManager.getApplication.assertReadAccessAllowed()
val members = new ArrayBuffer[PsiElement]
val iterator = elements.iterator()
while (iterator.hasNext) {
val element = iterator.next()
ProgressManager.checkCanceled()
element match {
case clazz: ScTypeDefinition =>
GutterUtil.collectInheritingClasses(clazz, result)
case _ =>
}
element match {
case x: ScTypeDefinition if !x.isObject && x.getParent.isInstanceOf[ScTemplateBody] => members += x
case x: PsiMember with PsiNamedElement => members += x
case _: ScValue | _: ScVariable => members += element
case _ =>
}
}
if (members.nonEmpty) {
GutterUtil.collectOverridingMembers(members, result)
}
}
}
private object GutterUtil {
def collectInheritingClasses(clazz: ScTypeDefinition, result: util.Collection[LineMarkerInfo[_ <: PsiElement]]) {
if ("scala.ScalaObject".equals(clazz.qualifiedName)) return
val inheritor = ClassInheritorsSearch.search(clazz, false).findFirst
if (inheritor != null) {
val range = clazz.nameId.getTextRange
val icon = clazz match {
case _: ScTrait => IMPLEMENTED_INTERFACE_MARKER_RENDERER
case _ => SUBCLASSED_CLASS_MARKER_RENDERER
}
val typez = ScalaMarkerType.SUBCLASSED_CLASS
val info = new LineMarkerInfo[PsiElement](clazz.nameId, range, icon, Pass.LINE_MARKERS, typez.fun, typez.handler,
GutterIconRenderer.Alignment.RIGHT)
result.add(info)
}
}
def collectOverridingMembers(members: ArrayBuffer[PsiElement], result: util.Collection[LineMarkerInfo[_ <: PsiElement]]) {
for (member <- members if !member.isInstanceOf[PsiMethod] || !member.asInstanceOf[PsiMethod].isConstructor) {
ProgressManager.checkCanceled()
val range = new TextRange(member.getTextOffset, member.getTextOffset)
val namedElems: Seq[ScNamedElement] = member match {
case d: ScDeclaredElementsHolder => d.declaredElements.filterBy(classOf[ScNamedElement])
case td: ScTypeDefinition => Seq(td)
case ta: ScTypeAlias => Seq(ta)
case _ => Seq.empty
}
val overrides = new ArrayBuffer[PsiNamedElement]
for (named <- namedElems) overrides ++= ScalaOverridingMemberSearcher.search(named, deep = false, withSelfType = true)
if (overrides.nonEmpty) {
val icon = if (!GutterUtil.isAbstract(member)) OVERRIDEN_METHOD_MARKER_RENDERER else IMPLEMENTED_INTERFACE_MARKER_RENDERER
val typez = ScalaMarkerType.OVERRIDDEN_MEMBER
val leafElement =
namedElems
.headOption
.map(_.nameId)
.getOrElse(PsiTreeUtil.firstChild(member))
val info = new LineMarkerInfo[PsiElement](leafElement, range, icon, Pass.LINE_MARKERS, typez.fun, typez.handler, GutterIconRenderer.Alignment.RIGHT)
result.add(info)
}
}
}
def isOverrides(element: PsiElement, supers: Seq[Signature]): Boolean = {
element match {
case _: ScFunctionDeclaration => true
case _: ScValueDeclaration => true
case _: ScVariableDeclaration => true
case _ =>
val iter = supers.iterator
while (iter.hasNext) {
val s = iter.next()
ScalaPsiUtil.nameContext(s.namedElement) match {
case _: ScFunctionDefinition => return true
case _: ScFunction =>
case method: PsiMethod if !method.hasAbstractModifier => return true
case _: ScVariableDefinition | _: ScPatternDefinition => return true
case f: PsiField if !f.hasAbstractModifier => return true
case _: ScVariableDeclaration =>
case _: ScValueDeclaration =>
case _: ScParameter => return true
case _: ScTypeAliasDefinition => return true
case _: ScTypeAliasDeclaration =>
case _: PsiClass => return true
case _ =>
}
}
false
}
}
def isAbstract(element: PsiElement): Boolean = element match {
case _: ScFunctionDeclaration => true
case _: ScValueDeclaration => true
case _: ScVariableDeclaration => true
case _ => false
}
}
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/gutter/ScalaLineMarkerProvider.scala
|
Scala
|
apache-2.0
| 11,591 |
/*
*************************************************************************************
* Copyright 2013 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.api
import net.liftweb.common.Box
import net.liftweb.common.Full
import org.joda.time.DateTime
import com.normation.ldap.sdk.LDAPConnectionProvider
import com.normation.rudder.repository.ldap.LDAPEntityMapper
import com.normation.rudder.domain.RudderDit
import com.normation.ldap.sdk.RoLDAPConnection
import com.normation.ldap.sdk.RwLDAPConnection
import com.normation.rudder.services.queries.LDAPFilter
import com.normation.ldap.sdk.BuildFilter
import com.normation.rudder.domain.RudderLDAPConstants
import net.liftweb.common.Loggable
import net.liftweb.common.EmptyBox
import net.liftweb.common.Empty
import net.liftweb.common.Failure
import com.normation.inventory.ldap.core.LDAPConstants
import com.normation.rudder.domain.RudderLDAPConstants.A_API_UUID
import com.normation.rudder.repository.ldap.LDAPDiffMapper
import com.normation.rudder.repository.EventLogRepository
import com.normation.rudder.services.user.PersonIdentService
import com.normation.eventlog.ModificationId
import com.normation.eventlog.EventActor
/**
* A repository to retrieve API Accounts
*/
trait RoApiAccountRepository {
/**
* Retrieve all API Account
*/
def getAll(): Box[Seq[ApiAccount]]
def getByToken(token: ApiToken): Box[Option[ApiAccount]]
def getById(id : ApiAccountId) : Box[Option[ApiAccount]]
}
/**
* A Repository to save principals
*/
trait WoApiAccountRepository {
/**
* Save an API account
* If an account with a same name or same token exists,
* action won't be performed.
*
*/
def save(
principal : ApiAccount
, modId : ModificationId
, actor : EventActor): Box[ApiAccount]
def delete(
id : ApiAccountId
, modId : ModificationId
, actor : EventActor) : Box[ApiAccountId]
}
final class RoLDAPApiAccountRepository(
val rudderDit : RudderDit
, val ldapConnexion: LDAPConnectionProvider[RoLDAPConnection]
, val mapper : LDAPEntityMapper
) extends RoApiAccountRepository with Loggable {
override def getAll(): Box[Seq[ApiAccount]] = {
for {
ldap <- ldapConnexion
} yield {
val entries = ldap.searchOne(rudderDit.API_ACCOUNTS.dn, BuildFilter.IS(RudderLDAPConstants.OC_API_ACCOUNT))
//map to ApiAccount in a "as much as possible" way
entries.flatMap ( e => mapper.entry2ApiAccount(e) match {
case eb:EmptyBox =>
val error = eb ?~! s"Ignoring API Account with dn ${e.dn} due to mapping error"
logger.debug(error.messageChain)
None
case Full(p) => Some(p)
} )
}
}
override def getByToken(token: ApiToken): Box[Option[ApiAccount]] = {
for {
ldap <- ldapConnexion
//here, be careful to the semantic of get with a filter!
optEntry <- ldap.get(rudderDit.API_ACCOUNTS.dn, BuildFilter.EQ(RudderLDAPConstants.A_API_TOKEN, token.value)) match {
case f:Failure => f
case Empty => Full(None)
case Full(e) => Full(Some(e))
}
optRes <- optEntry match {
case None => Full(None)
case Some(e) => mapper.entry2ApiAccount(e).map( Some(_) )
}
} yield {
optRes
}
}
override def getById(id:ApiAccountId) : Box[Option[ApiAccount]] = {
for {
ldap <- ldapConnexion
optEntry <- ldap.get(rudderDit.API_ACCOUNTS.API_ACCOUNT.dn(id)) match {
case f:Failure => f
case Empty => Full(None)
case Full(e) => Full(Some(e))
}
optRes <- optEntry match {
case None => Full(None)
case Some(e) => mapper.entry2ApiAccount(e).map( Some(_) )
}
} yield {
optRes
}
}
}
final class WoLDAPApiAccountRepository(
rudderDit : RudderDit
, ldapConnexion : LDAPConnectionProvider[RwLDAPConnection]
, mapper : LDAPEntityMapper
, diffMapper : LDAPDiffMapper
, actionLogger : EventLogRepository
, personIdentService : PersonIdentService
) extends WoApiAccountRepository with Loggable {
repo =>
override def save(
principal : ApiAccount
, modId : ModificationId
, actor : EventActor) : Box[ApiAccount] = {
repo.synchronized {
for {
ldap <- ldapConnexion
existing <- ldap.get(rudderDit.API_ACCOUNTS.dn, BuildFilter.EQ(RudderLDAPConstants.A_API_TOKEN, principal.token.value)) match {
case f:Failure => f
case Empty => Full(None)
case Full(e) => if(e(A_API_UUID) == Some(principal.id.value)) {
Full(e)
} else {
Failure("An account with given token but different id already exists")
}
}
name <- ldap.get(rudderDit.API_ACCOUNTS.dn, BuildFilter.EQ(LDAPConstants.A_NAME, principal.name.value)) match {
case f:Failure => f
case Empty => Full(None)
case Full(e) => if(e(A_API_UUID) == Some(principal.id.value)) {
Full(e)
} else {
Failure(s"An account with the same name ${principal.name.value} exists")
}
}
optPrevious <- ldap.get(rudderDit.API_ACCOUNTS.API_ACCOUNT.dn(principal.id)) match {
case f:Failure => f
case Empty => Full(None)
case Full(e) => Full(Some(e))
}
entry = mapper.apiAccount2Entry(principal)
saved <- ldap.save(entry, removeMissingAttributes=true)
loggedAction <- optPrevious match {
// if there is a previous value, then it's an update
case Some(previous) =>
for {
optDiff <- diffMapper.modChangeRecords2ApiAccountDiff(
previous
, saved)
action <- optDiff match {
case Some(diff) => actionLogger.saveModifyApiAccount(modId, principal = actor, modifyDiff = diff, None) ?~! "Error when logging modification of an API Account as an event"
case None => Full("Ok")
}
} yield {
action
}
// if there is no previous value, then it's a creation
case None =>
for {
diff <- diffMapper.addChangeRecords2ApiAccountDiff(
entry.dn
, saved)
action <- actionLogger.saveCreateApiAccount(modId, principal = actor, addDiff = diff, None) ?~! "Error when logging creation of API Account as an event"
} yield {
action
}
}
} yield {
principal
}
}
}
override def delete(
id : ApiAccountId
, modId : ModificationId
, actor : EventActor) : Box[ApiAccountId] = {
for {
ldap <- ldapConnexion
entry <- ldap.get(rudderDit.API_ACCOUNTS.API_ACCOUNT.dn(id)) ?~! "Api Account with ID '%s' is not present".format(id.value)
oldAccount <- mapper.entry2ApiAccount(entry)
deleted <- ldap.delete(rudderDit.API_ACCOUNTS.API_ACCOUNT.dn(id))
diff = DeleteApiAccountDiff(oldAccount)
loggedAction <- actionLogger.saveDeleteApiAccount(modId, principal = actor, deleteDiff = diff, None)
} yield {
id
}
}
}
|
Kegeruneku/rudder
|
rudder-core/src/main/scala/com/normation/rudder/api/ApiAccountRepository.scala
|
Scala
|
agpl-3.0
| 9,886 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.partition
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
import org.apache.carbondata.core.metadata.encoder.Encoding
import org.apache.carbondata.core.metadata.schema.partition.PartitionType
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
class TestDDLForPartitionTable extends QueryTest with BeforeAndAfterAll {
override def beforeAll = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy-MM-dd HH:mm:ss")
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
dropTable
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
test("create partition table: hash partition") {
sql(
"""
| CREATE TABLE default.hashTable (empname String, designation String, doj Timestamp,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (empno int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='HASH','NUM_PARTITIONS'='3')
""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_hashTable")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("empno"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.INT)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 0)
assert(partitionInfo.getPartitionType == PartitionType.HASH)
assert(partitionInfo.getNumPartitions == 3)
}
test("create partition table: range partition") {
sql(
"""
| CREATE TABLE default.rangeTable (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2017-06-11 00:00:02, 2017-06-13 23:59:59', 'DICTIONARY_INCLUDE'='doj')
""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_rangeTable")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("doj"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.TIMESTAMP)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 3)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
assert(partitionInfo.getPartitionType == PartitionType.RANGE)
assert(partitionInfo.getRangeInfo.size == 2)
assert(partitionInfo.getRangeInfo.get(0).equals("2017-06-11 00:00:02"))
assert(partitionInfo.getRangeInfo.get(1).equals("2017-06-13 23:59:59"))
}
test("create partition table: list partition") {
sql(
"""
| CREATE TABLE default.listTable (empno int, empname String, designation String, doj Timestamp,
| workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (workgroupcategory string)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='0, 1, (2, 3)')
""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_listTable")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
assert(partitionInfo != null)
assert(partitionInfo.getColumnSchemaList.get(0).getColumnName.equalsIgnoreCase("workgroupcategory"))
assert(partitionInfo.getColumnSchemaList.get(0).getDataType == DataTypes.STRING)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.size == 1)
assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(0) == Encoding.INVERTED_INDEX)
assert(partitionInfo.getPartitionType == PartitionType.LIST)
assert(partitionInfo.getListInfo.size == 3)
assert(partitionInfo.getListInfo.get(0).size == 1)
assert(partitionInfo.getListInfo.get(0).get(0).equals("0"))
assert(partitionInfo.getListInfo.get(1).size == 1)
assert(partitionInfo.getListInfo.get(1).get(0).equals("1"))
assert(partitionInfo.getListInfo.get(2).size == 2)
assert(partitionInfo.getListInfo.get(2).get(0).equals("2"))
assert(partitionInfo.getListInfo.get(2).get(1).equals("3"))
}
test("create partition table: list partition with duplicate value") {
intercept[Exception] { sql(
"""
| CREATE TABLE default.listTableError (empno int, empname String, designation String, doj Timestamp,
| workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (workgroupcategory string)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='0, 1, (2, 3, 1)')
""".stripMargin) }
}
test("test exception if partition column is dropped") {
sql("drop table if exists test")
sql(
"create table test(a int, b string) partitioned by (c int) stored by 'carbondata' " +
"tblproperties('PARTITION_TYPE'='LIST','list_info'='0,10,5,20')")
intercept[Exception] { sql("alter table test drop columns(c)") }
}
test("test describe formatted for partition column") {
sql("drop table if exists des")
sql(
"""create table des(a int, b string) partitioned by (c string) stored by 'carbondata'
|tblproperties ('partition_type'='list','list_info'='1,2')""".stripMargin)
checkExistence(sql("describe formatted des"), true, "Partition Columns")
sql("drop table if exists des")
}
test("test exception if hash number is invalid") {
sql("DROP TABLE IF EXISTS test_hash_1")
val exception_test_hash_1: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_hash_1(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='2.1')
""".stripMargin
)
}
assert(exception_test_hash_1.getMessage.contains("Invalid partition definition"))
sql("DROP TABLE IF EXISTS test_hash_2")
val exception_test_hash_2: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_hash_2(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='abc')
""".stripMargin
)
}
assert(exception_test_hash_2.getMessage.contains("Invalid partition definition"))
sql("DROP TABLE IF EXISTS test_hash_3")
val exception_test_hash_3: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_hash_3(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='-2.1')
""".stripMargin
)
}
assert(exception_test_hash_3.getMessage.contains("Invalid partition definition"))
}
test("test exception when values in list_info can not match partition column type") {
sql("DROP TABLE IF EXISTS test_list_int")
val exception_test_list_int: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_int(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_int.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_small")
val exception_test_list_small: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_small(col1 INT, col2 STRING)
| PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_small.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_float")
val exception_test_list_float: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_float(col1 INT, col2 STRING)
| PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_float.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_double")
val exception_test_list_double: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_double(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_double.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_bigint")
val exception_test_list_bigint: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_bigint(col1 INT, col2 STRING)
| PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_bigint.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_date")
val exception_test_list_date: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_date(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_date.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_timestamp")
val exception_test_list_timestamp: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_timestamp(col1 INT, col2 STRING)
| PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_list_timestamp.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_list_decimal")
val exception_test_list_decimal: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_list_decimal(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='23.23111,2.32')
""".stripMargin)
}
assert(exception_test_list_decimal.getMessage.contains("Invalid Partition Values"))
}
test("test exception when values in range_info can not match partition column type") {
sql("DROP TABLE IF EXISTS test_range_int")
val exception_test_range_int: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_int(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_int.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_smallint")
val exception_test_range_smallint: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_smallint(col1 INT, col2 STRING)
| PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_smallint.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_float")
val exception_test_range_float: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_float(col1 INT, col2 STRING)
| PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_float.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_double")
val exception_test_range_double: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_double(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_double.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_bigint")
val exception_test_range_bigint: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_bigint(col1 INT, col2 STRING)
| PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_bigint.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_date")
val exception_test_range_date: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_date(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_date.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_timestamp")
val exception_test_range_timestamp: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_timestamp(col1 INT, col2 STRING)
| PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_timestamp.getMessage.contains("Invalid Partition Values"))
sql("DROP TABLE IF EXISTS test_range_decimal")
val exception_test_range_decimal: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_decimal(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
""".stripMargin)
}
assert(exception_test_range_decimal.getMessage.contains("Invalid Partition Values"))
}
test("Invalid Partition Range") {
val exceptionMessage: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE default.rangeTableInvalid (empno int, empname String, designation String,
| workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
| projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
| utilization int,salary int)
| PARTITIONED BY (doj Timestamp)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2017-06-11 00:00:02')
""".stripMargin)
}
assert(exceptionMessage.getMessage
.contains("Range info must define a valid range.Please check again!"))
}
override def afterAll = {
dropTable
}
def dropTable = {
sql("drop table if exists hashTable")
sql("drop table if exists rangeTable")
sql("drop table if exists listTable")
sql("drop table if exists test")
sql("DROP TABLE IF EXISTS test_hash_1")
sql("DROP TABLE IF EXISTS test_hash_2")
sql("DROP TABLE IF EXISTS test_hash_3")
sql("DROP TABLE IF EXISTS test_list_int")
sql("DROP TABLE IF EXISTS test_list_smallint")
sql("DROP TABLE IF EXISTS test_list_bigint")
sql("DROP TABLE IF EXISTS test_list_float")
sql("DROP TABLE IF EXISTS test_list_double")
sql("DROP TABLE IF EXISTS test_list_date")
sql("DROP TABLE IF EXISTS test_list_timestamp")
sql("DROP TABLE IF EXISTS test_list_decimal")
sql("DROP TABLE IF EXISTS test_range_int")
sql("DROP TABLE IF EXISTS test_range_smallint")
sql("DROP TABLE IF EXISTS test_range_bigint")
sql("DROP TABLE IF EXISTS test_range_float")
sql("DROP TABLE IF EXISTS test_range_double")
sql("DROP TABLE IF EXISTS test_range_date")
sql("DROP TABLE IF EXISTS test_range_timestamp")
sql("DROP TABLE IF EXISTS test_range_decimal")
sql("DROP TABLE IF EXISTS rangeTableInvalid")
}
}
|
HuaweiBigData/carbondata
|
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
|
Scala
|
apache-2.0
| 18,832 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.loadBalancer
import java.nio.charset.StandardCharsets
import scala.collection.immutable
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success
import org.apache.kafka.clients.producer.RecordMetadata
import akka.actor.{Actor, ActorRef, ActorRefFactory, FSM, Props}
import akka.actor.FSM.CurrentState
import akka.actor.FSM.SubscribeTransitionCallBack
import akka.actor.FSM.Transition
import akka.pattern.pipe
import akka.util.Timeout
import org.apache.openwhisk.common._
import org.apache.openwhisk.core.connector._
import org.apache.openwhisk.core.database.NoDocumentException
import org.apache.openwhisk.core.entity.ActivationId.ActivationIdGenerator
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.entity.types.EntityStore
// Received events
case object GetStatus
case object Tick
// States an Invoker can be in
sealed trait InvokerState {
val asString: String
val isUsable: Boolean
}
object InvokerState {
// Invokers in this state can be used to schedule workload to
sealed trait Usable extends InvokerState { val isUsable = true }
// No workload should be scheduled to invokers in this state
sealed trait Unusable extends InvokerState { val isUsable = false }
// A completely healthy invoker, pings arriving fine, no system errors
case object Healthy extends Usable { val asString = "up" }
// Pings are arriving fine, the invoker returns system errors though
case object Unhealthy extends Unusable { val asString = "unhealthy" }
// Pings are arriving fine, the invoker does not respond with active-acks in the expected time though
case object Unresponsive extends Unusable { val asString = "unresponsive" }
// Pings are not arriving for this invoker
case object Offline extends Unusable { val asString = "down" }
}
// Possible answers of an activation
sealed trait InvocationFinishedResult
object InvocationFinishedResult {
// The activation could be successfully executed from the system's point of view. That includes user- and application
// errors
case object Success extends InvocationFinishedResult
// The activation could not be executed because of a system error
case object SystemError extends InvocationFinishedResult
// The active-ack did not arrive before it timed out
case object Timeout extends InvocationFinishedResult
}
case class ActivationRequest(msg: ActivationMessage, invoker: InvokerInstanceId)
case class InvocationFinishedMessage(invokerInstance: InvokerInstanceId, result: InvocationFinishedResult)
// Sent to a monitor if the state changed
case class CurrentInvokerPoolState(newState: IndexedSeq[InvokerHealth])
// Data stored in the Invoker
final case class InvokerInfo(buffer: RingBuffer[InvocationFinishedResult])
/**
* Actor representing a pool of invokers
*
* The InvokerPool manages a Invokers through subactors. An new Invoker
* is registered lazily by sending it a Ping event with the name of the
* Invoker. Ping events are furthermore forwarded to the respective
* Invoker for their respective State handling.
*
* Note: An Invoker that never sends an initial Ping will not be considered
* by the InvokerPool and thus might not be caught by monitoring.
*/
class InvokerPool(childFactory: (ActorRefFactory, InvokerInstanceId) => ActorRef,
sendActivationToInvoker: (ActivationMessage, InvokerInstanceId) => Future[RecordMetadata],
pingConsumer: MessageConsumer,
monitor: Option[ActorRef])
extends Actor {
import InvokerState._
implicit val transid: TransactionId = TransactionId.invokerHealth
implicit val logging: Logging = new AkkaLogging(context.system.log)
implicit val timeout: Timeout = Timeout(5.seconds)
implicit val ec: ExecutionContext = context.dispatcher
// State of the actor. Mutable vars with immutable collections prevents closures or messages
// from leaking the state for external mutation
var instanceToRef = immutable.Map.empty[Int, ActorRef]
var refToInstance = immutable.Map.empty[ActorRef, InvokerInstanceId]
var status = IndexedSeq[InvokerHealth]()
def receive: Receive = {
case p: PingMessage =>
val invoker = instanceToRef.getOrElse(p.instance.toInt, registerInvoker(p.instance))
instanceToRef = instanceToRef.updated(p.instance.toInt, invoker)
// For the case when the invoker was restarted and got a new displayed name
val oldHealth = status(p.instance.toInt)
if (oldHealth.id != p.instance) {
status = status.updated(p.instance.toInt, new InvokerHealth(p.instance, oldHealth.status))
refToInstance = refToInstance.updated(invoker, p.instance)
}
invoker.forward(p)
case GetStatus => sender() ! status
case msg: InvocationFinishedMessage =>
// Forward message to invoker, if InvokerActor exists
instanceToRef.get(msg.invokerInstance.toInt).foreach(_.forward(msg))
case CurrentState(invoker, currentState: InvokerState) =>
refToInstance.get(invoker).foreach { instance =>
status = status.updated(instance.toInt, new InvokerHealth(instance, currentState))
}
logStatus()
case Transition(invoker, oldState: InvokerState, newState: InvokerState) =>
refToInstance.get(invoker).foreach { instance =>
status = status.updated(instance.toInt, new InvokerHealth(instance, newState))
}
logStatus()
// this is only used for the internal test action which enabled an invoker to become healthy again
case msg: ActivationRequest => sendActivationToInvoker(msg.msg, msg.invoker).pipeTo(sender)
}
def logStatus(): Unit = {
monitor.foreach(_ ! CurrentInvokerPoolState(status))
val pretty = status.map(i => s"${i.id.toInt} -> ${i.status}")
logging.info(this, s"invoker status changed to ${pretty.mkString(", ")}")
}
/** Receive Ping messages from invokers. */
val pingPollDuration: FiniteDuration = 1.second
val invokerPingFeed: ActorRef = context.system.actorOf(Props {
new MessageFeed(
"ping",
logging,
pingConsumer,
pingConsumer.maxPeek,
pingPollDuration,
processInvokerPing,
logHandoff = false)
})
def processInvokerPing(bytes: Array[Byte]): Future[Unit] = Future {
val raw = new String(bytes, StandardCharsets.UTF_8)
PingMessage.parse(raw) match {
case Success(p: PingMessage) =>
self ! p
invokerPingFeed ! MessageFeed.Processed
case Failure(t) =>
invokerPingFeed ! MessageFeed.Processed
logging.error(this, s"failed processing message: $raw with $t")
}
}
/** Pads a list to a given length using the given function to compute entries */
def padToIndexed[A](list: IndexedSeq[A], n: Int, f: (Int) => A): IndexedSeq[A] = list ++ (list.size until n).map(f)
// Register a new invoker
def registerInvoker(instanceId: InvokerInstanceId): ActorRef = {
logging.info(this, s"registered a new invoker: invoker${instanceId.toInt}")(TransactionId.invokerHealth)
// Grow the underlying status sequence to the size needed to contain the incoming ping. Dummy values are created
// to represent invokers, where ping messages haven't arrived yet
status = padToIndexed(
status,
instanceId.toInt + 1,
i => new InvokerHealth(InvokerInstanceId(i, userMemory = instanceId.userMemory), Offline))
status = status.updated(instanceId.toInt, new InvokerHealth(instanceId, Offline))
val ref = childFactory(context, instanceId)
ref ! SubscribeTransitionCallBack(self) // register for state change events
refToInstance = refToInstance.updated(ref, instanceId)
ref
}
}
object InvokerPool {
private def createTestActionForInvokerHealth(db: EntityStore, action: WhiskAction): Future[Unit] = {
implicit val tid: TransactionId = TransactionId.loadbalancer
implicit val ec: ExecutionContext = db.executionContext
implicit val logging: Logging = db.logging
WhiskAction
.get(db, action.docid)
.flatMap { oldAction =>
WhiskAction.put(db, action.revision(oldAction.rev), Some(oldAction))(tid, notifier = None)
}
.recover {
case _: NoDocumentException => WhiskAction.put(db, action, old = None)(tid, notifier = None)
}
.map(_ => {})
.andThen {
case Success(_) => logging.info(this, "test action for invoker health now exists")
case Failure(e) => logging.error(this, s"error creating test action for invoker health: $e")
}
}
/**
* Prepares everything for the health protocol to work (i.e. creates a testaction)
*
* @param controllerInstance instance of the controller we run in
* @param entityStore store to write the action to
* @return throws an exception on failure to prepare
*/
def prepare(controllerInstance: ControllerInstanceId, entityStore: EntityStore): Unit = {
InvokerPool
.healthAction(controllerInstance)
.map {
// Await the creation of the test action; on failure, this will abort the constructor which should
// in turn abort the startup of the controller.
a =>
Await.result(createTestActionForInvokerHealth(entityStore, a), 1.minute)
}
.orElse {
throw new IllegalStateException(
"cannot create test action for invoker health because runtime manifest is not valid")
}
}
def props(f: (ActorRefFactory, InvokerInstanceId) => ActorRef,
p: (ActivationMessage, InvokerInstanceId) => Future[RecordMetadata],
pc: MessageConsumer,
m: Option[ActorRef] = None): Props = {
Props(new InvokerPool(f, p, pc, m))
}
/** A stub identity for invoking the test action. This does not need to be a valid identity. */
val healthActionIdentity: Identity = {
val whiskSystem = "whisk.system"
val uuid = UUID()
Identity(Subject(whiskSystem), Namespace(EntityName(whiskSystem), uuid), BasicAuthenticationAuthKey(uuid, Secret()))
}
/** An action to use for monitoring invoker health. */
def healthAction(i: ControllerInstanceId): Option[WhiskAction] =
ExecManifest.runtimesManifest.resolveDefaultRuntime("nodejs:default").map { manifest =>
new WhiskAction(
namespace = healthActionIdentity.namespace.name.toPath,
name = EntityName(s"invokerHealthTestAction${i.asString}"),
exec = CodeExecAsString(manifest, """function main(params) { return params; }""", None),
limits = ActionLimits(memory = MemoryLimit(MemoryLimit.MIN_MEMORY)))
}
}
/**
* Actor representing an Invoker
*
* This finite state-machine represents an Invoker in its possible
* states "Healthy" and "Offline".
*/
class InvokerActor(invokerInstance: InvokerInstanceId, controllerInstance: ControllerInstanceId)
extends FSM[InvokerState, InvokerInfo] {
import InvokerState._
implicit val transid: TransactionId = TransactionId.invokerHealth
implicit val logging: Logging = new AkkaLogging(context.system.log)
val name = s"invoker${invokerInstance.toInt}"
val healthyTimeout: FiniteDuration = 10.seconds
// This is done at this point to not intermingle with the state-machine
// especially their timeouts.
def customReceive: Receive = {
case _: RecordMetadata => // The response of putting testactions to the MessageProducer. We don't have to do anything with them.
}
override def receive: Receive = customReceive.orElse(super.receive)
/** Always start UnHealthy. Then the invoker receives some test activations and becomes Healthy. */
startWith(Unhealthy, InvokerInfo(new RingBuffer[InvocationFinishedResult](InvokerActor.bufferSize)))
/** An Offline invoker represents an existing but broken invoker. This means, that it does not send pings anymore. */
when(Offline) {
case Event(_: PingMessage, _) => goto(Unhealthy)
}
// To be used for all states that should send test actions to reverify the invoker
val healthPingingState: StateFunction = {
case Event(_: PingMessage, _) => stay
case Event(StateTimeout, _) => goto(Offline)
case Event(Tick, _) =>
invokeTestAction()
stay
}
/** An Unhealthy invoker represents an invoker that was not able to handle actions successfully. */
when(Unhealthy, stateTimeout = healthyTimeout)(healthPingingState)
/** An Unresponsive invoker represents an invoker that is not responding with active acks in a timely manner */
when(Unresponsive, stateTimeout = healthyTimeout)(healthPingingState)
/**
* A Healthy invoker is characterized by continuously getting pings. It will go offline if that state is not confirmed
* for 20 seconds.
*/
when(Healthy, stateTimeout = healthyTimeout) {
case Event(_: PingMessage, _) => stay
case Event(StateTimeout, _) => goto(Offline)
}
/** Handle the completion of an Activation in every state. */
whenUnhandled {
case Event(cm: InvocationFinishedMessage, info) => handleCompletionMessage(cm.result, info.buffer)
}
/** Logging on Transition change */
onTransition {
case _ -> newState if !newState.isUsable =>
transid.mark(
this,
LoggingMarkers.LOADBALANCER_INVOKER_STATUS_CHANGE(newState.asString),
s"$name is ${newState.asString}",
akka.event.Logging.WarningLevel)
case _ -> newState if newState.isUsable => logging.info(this, s"$name is ${newState.asString}")
}
// To be used for all states that should send test actions to reverify the invoker
def healthPingingTransitionHandler(state: InvokerState): TransitionHandler = {
case _ -> `state` =>
invokeTestAction()
setTimer(InvokerActor.timerName, Tick, 1.minute, repeat = true)
case `state` -> _ => cancelTimer(InvokerActor.timerName)
}
onTransition(healthPingingTransitionHandler(Unhealthy))
onTransition(healthPingingTransitionHandler(Unresponsive))
initialize()
/**
* Handling for active acks. This method saves the result (successful or unsuccessful)
* into an RingBuffer and checks, if the InvokerActor has to be changed to UnHealthy.
*
* @param result: result of Activation
* @param buffer to be used
*/
private def handleCompletionMessage(result: InvocationFinishedResult,
buffer: RingBuffer[InvocationFinishedResult]) = {
buffer.add(result)
// If the action is successful it seems like the Invoker is Healthy again. So we execute immediately
// a new test action to remove the errors out of the RingBuffer as fast as possible.
// The actions that arrive while the invoker is unhealthy are most likely health actions.
// It is possible they are normal user actions as well. This can happen if such actions were in the
// invoker queue or in progress while the invoker's status flipped to Unhealthy.
if (result == InvocationFinishedResult.Success && stateName == Unhealthy) {
invokeTestAction()
}
// Stay in online if the activations was successful.
// Stay in offline, if an activeAck reaches the controller.
if ((stateName == Healthy && result == InvocationFinishedResult.Success) || stateName == Offline) {
stay
} else {
val entries = buffer.toList
// Goto Unhealthy or Unresponsive respectively if there are more errors than accepted in buffer, else goto Healthy
if (entries.count(_ == InvocationFinishedResult.SystemError) > InvokerActor.bufferErrorTolerance) {
gotoIfNotThere(Unhealthy)
} else if (entries.count(_ == InvocationFinishedResult.Timeout) > InvokerActor.bufferErrorTolerance) {
gotoIfNotThere(Unresponsive)
} else {
gotoIfNotThere(Healthy)
}
}
}
/**
* Creates an activation request with the given action and sends it to the InvokerPool.
* The InvokerPool redirects it to the invoker which is represented by this InvokerActor.
*/
private def invokeTestAction() = {
InvokerPool.healthAction(controllerInstance).map { action =>
val activationMessage = ActivationMessage(
// Use the sid of the InvokerSupervisor as tid
transid = transid,
action = action.fullyQualifiedName(true),
// Use empty DocRevision to force the invoker to pull the action from db all the time
revision = DocRevision.empty,
user = InvokerPool.healthActionIdentity,
// Create a new Activation ID for this activation
activationId = new ActivationIdGenerator {}.make(),
rootControllerIndex = controllerInstance,
blocking = false,
content = None,
initArgs = Set.empty)
context.parent ! ActivationRequest(activationMessage, invokerInstance)
}
}
/**
* Only change the state if the currentState is not the newState.
*
* @param newState of the InvokerActor
*/
private def gotoIfNotThere(newState: InvokerState) = {
if (stateName == newState) stay() else goto(newState)
}
}
object InvokerActor {
def props(invokerInstance: InvokerInstanceId, controllerInstance: ControllerInstanceId) =
Props(new InvokerActor(invokerInstance, controllerInstance))
val bufferSize = 10
val bufferErrorTolerance = 3
val timerName = "testActionTimer"
}
|
jeremiaswerner/openwhisk
|
core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/InvokerSupervision.scala
|
Scala
|
apache-2.0
| 18,064 |
/*
* Copyright 2009-2010 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ccf.session
trait Message {
def send(s: Session): (Session, Option[SessionResponse])
protected def send(s: Session, request: SessionRequest, channels: Set[ChannelId]): (Session, Option[SessionResponse]) = {
val nextSession = s.next(channels)
val response = s.connection.send(request.transportRequest).map(SessionResponse(_, request))
(nextSession, response)
}
}
object Message {
def apply(request: SessionRequest): Message = MessageFactory.message(request)
case class Join(channelId: ChannelId) extends Message {
def send(s: Session): (Session, Option[SessionResponse]) =
if (!s.channels(channelId)) send(s, JoinRequest(s, channelId), s.channels + channelId) else (s, None)
}
case class Part(channelId: ChannelId) extends Message {
def send(s: Session): (Session, Option[SessionResponse]) =
if (s.channels(channelId)) send(s, PartRequest(s, channelId), s.channels - channelId) else (s, None)
}
case class InChannel(requestType: String, channelId: ChannelId, content: Option[Any]) extends Message {
def send(s: Session): (Session, Option[SessionResponse]) =
if (s.channels(channelId)) send(s, InChannelRequest(s, requestType, channelId, content), s.channels) else (s, None)
}
case class OperationContext(channelId: ChannelId, context: ccf.messaging.OperationContext) extends Message {
def send(s: Session): (Session, Option[SessionResponse]) = {
if (s.channels(channelId)) send(s, OperationContextRequest(s, channelId, context), s.channels) else (s, None)
}
}
case object Shutdown
}
|
akisaarinen/ccf
|
ccf/src/main/scala/ccf/session/Message.scala
|
Scala
|
apache-2.0
| 2,200 |
/*
Deduction Tactics
Copyright (C) 2012-2017 Raymond Dodge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.rayrobdod.boardGame
import scala.collection.immutable.{Seq, Map}
/**
* A set of spaces in which their connections are determined by the indexies
* of spaceclasses in this field
*
* @group Generic
* @constructor
* @tparam SpaceClass the space model
* @tparam Index the key used to specify a space from this field
* @tparam SpaceType the spaces contained in this tiling
* @param classes the mapping from indexies to space classes
* @param generator a function which will generate the field's spaces
*/
final class Field[SpaceClass, Index, SpaceType <: SpaceLike[SpaceClass, SpaceType]](
private val classes:Map[Index, SpaceClass]
)(implicit
private val generator:Field.SpaceGenerator[SpaceClass, Index, SpaceType]
) extends Tiling[SpaceClass, Index, SpaceType] {
override def spaceClass(idx:Index):Option[SpaceClass] = classes.get(idx)
override def mapIndex[A](f:Index => A):Seq[A] = classes.keySet.to[Seq].map(f)
override def foreachIndex(f:Index => Unit):Unit = classes.keySet.foreach(f)
override def space(idx:Index):Option[SpaceType] = this.spaceClass(idx).map{sc => generator.apply(sc, idx, this)}
override def hashCode:Int = this.classes.hashCode
override def equals(other:Any):Boolean = other match {
case other2:Field[_, _, _] => {
other2.classes == this.classes &&
other2.generator == this.generator
}
case _ => false
}
}
/**
* Implicits that either are used by Field, or that add methods to certain types
* of fields
* @group Generic
*/
object Field {
/**
* A function that generates field spaces of a particular shape and which exist in the specified field
*/
trait SpaceGenerator[SpaceClass, Index, SpaceType <: SpaceLike[SpaceClass, SpaceType]] {
def apply(sc:SpaceClass, index:Index, field:Field[SpaceClass, Index, SpaceType]):SpaceType
}
/**
* Extra methods for `Field[_, ElongatedTriangularIndex, ElongatedTriangularSpace[_]]`
*/
implicit final class ElongatedTriangularFieldOps[SpaceClass](
val backing:Field[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]]
) extends AnyVal {
/** Gets the space at ((x, y, ElongatedTriangularType.Square)), but with a more specific type */
def squareSpace(x:Int, y:Int):Option[ElongatedTriangularSpace.Square[SpaceClass]] = {
backing.spaceClass(ElongatedTriangularIndex(x, y, ElongatedTriangularType.Square))
.map{sc => new MySquareElongatedTriangularSpace(sc, backing, x, y)}
}
/** Gets the space at ((x, y, ElongatedTriangularType.NorthTri)), but with a more specific type */
def northTriSpace(x:Int, y:Int):Option[ElongatedTriangularSpace.Triangle1[SpaceClass]] = {
backing.spaceClass(ElongatedTriangularIndex(x, y, ElongatedTriangularType.NorthTri))
.map{sc => new MyTriangle1ElongatedTriangularSpace(sc, backing, x, y)}
}
/** Gets the space at ((x, y, ElongatedTriangularType.SouthTri)), but with a more specific type */
def southTriSpace(x:Int, y:Int):Option[ElongatedTriangularSpace.Triangle2[SpaceClass]] = {
backing.spaceClass(ElongatedTriangularIndex(x, y, ElongatedTriangularType.SouthTri))
.map{sc => new MyTriangle2ElongatedTriangularSpace(sc, backing, x, y)}
}
}
implicit def rectangularSpaceGenerator[SpaceClass]:SpaceGenerator[SpaceClass, RectangularIndex, RectangularSpace[SpaceClass]] = new RectangularSpaceGenerator[SpaceClass]
private final class RectangularSpaceGenerator[SpaceClass] extends SpaceGenerator[SpaceClass, RectangularIndex, RectangularSpace[SpaceClass]] {
override def apply(sc:SpaceClass, index:RectangularIndex, field:Field[SpaceClass, RectangularIndex, RectangularSpace[SpaceClass]]):RectangularSpace[SpaceClass] = {
new MyRectangularSpace[SpaceClass](sc, index, field)
}
override def hashCode:Int = 23
override def equals(other:Any):Boolean = other match {
case other2:RectangularSpaceGenerator[_] => true
case _ => false
}
}
private final class MyRectangularSpace[SpaceClass](
sc:SpaceClass,
index:RectangularIndex,
private val field:Field[SpaceClass, RectangularIndex, RectangularSpace[SpaceClass]]
) extends RectangularSpace[SpaceClass] {
private val (x, y) = index
override def typeOfSpace:SpaceClass = sc
override def west:Option[RectangularSpace[SpaceClass]] = field.space((x - 1, y))
override def north:Option[RectangularSpace[SpaceClass]] = field.space((x, y - 1))
override def east:Option[RectangularSpace[SpaceClass]] = field.space((x + 1, y))
override def south:Option[RectangularSpace[SpaceClass]] = field.space((x, y + 1))
override def toString:String = s"RectangularField.Space(typ = $typeOfSpace, x = $x, y = $y, field = $field)"
override def hashCode:Int = x * 31 + y
override def equals(other:Any):Boolean = other match {
case other2:MyRectangularSpace[_] =>
other2.field == this.field &&
other2.x == this.x &&
other2.y == this.y
case _ => false
}
}
implicit def horizontalHexagonalSpaceGenerator[SpaceClass]:SpaceGenerator[SpaceClass, HorizontalHexagonalIndex, HorizontalHexagonalSpace[SpaceClass]] = new HorizontalHexagonalSpaceGenerator[SpaceClass]
private final class HorizontalHexagonalSpaceGenerator[SpaceClass] extends SpaceGenerator[SpaceClass, HorizontalHexagonalIndex, HorizontalHexagonalSpace[SpaceClass]] {
override def apply(
sc:SpaceClass
, index:HorizontalHexagonalIndex
, field:Field[SpaceClass, HorizontalHexagonalIndex, HorizontalHexagonalSpace[SpaceClass]]
):HorizontalHexagonalSpace[SpaceClass] = {
new MyHorizontalHexagonalSpace[SpaceClass](sc, index, field)
}
override def hashCode:Int = 24
override def equals(other:Any):Boolean = other match {
case other2:HorizontalHexagonalSpaceGenerator[_] => true
case _ => false
}
}
private final class MyHorizontalHexagonalSpace[SpaceClass](
sc:SpaceClass,
index:HorizontalHexagonalIndex,
private val field:Field[SpaceClass, HorizontalHexagonalIndex, HorizontalHexagonalSpace[SpaceClass]]
) extends HorizontalHexagonalSpace[SpaceClass] {
private val (ew, nwse) = index
override def typeOfSpace:SpaceClass = sc
override def northwest:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew, nwse - 1))
override def southeast:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew, nwse + 1))
override def west:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew - 1, nwse))
override def east:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew + 1, nwse))
override def northeast:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew + 1, nwse - 1))
override def southwest:Option[HorizontalHexagonalSpace[SpaceClass]] = field.space((ew - 1, nwse + 1))
override def toString:String = s"HorizontalHexagonalField.Space(typ = $typeOfSpace, ew = $ew, nwse = $nwse, field = $field)"
override def hashCode:Int = ew * 31 + nwse
override def equals(other:Any):Boolean = other match {
case other2:MyHorizontalHexagonalSpace[_] =>
other2.field == this.field &&
other2.ew == this.ew &&
other2.nwse == this.nwse
case _ => false
}
}
implicit def elongatedTriangularSpaceGenerator[SpaceClass]:SpaceGenerator[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]] = new ElongatedTriangularSpaceGenerator[SpaceClass]
private final class ElongatedTriangularSpaceGenerator[SpaceClass] extends SpaceGenerator[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]] {
override def apply(
sc:SpaceClass
, index:ElongatedTriangularIndex
, field:Field[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]]
):ElongatedTriangularSpace[SpaceClass] = {
index.typ match {
case ElongatedTriangularType.Square => new MySquareElongatedTriangularSpace(sc, field, index.x, index.y)
case ElongatedTriangularType.NorthTri => new MyTriangle1ElongatedTriangularSpace(sc, field, index.x, index.y)
case ElongatedTriangularType.SouthTri => new MyTriangle2ElongatedTriangularSpace(sc, field, index.x, index.y)
}
}
override def hashCode:Int = 25
override def equals(other:Any):Boolean = other match {
case other2:ElongatedTriangularSpaceGenerator[_] => true
case _ => false
}
}
private final class MySquareElongatedTriangularSpace[SpaceClass](
override val typeOfSpace:SpaceClass,
private val field:Field[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]],
private val x:Int,
private val y:Int
) extends ElongatedTriangularSpace.Square[SpaceClass] {
override def north:Option[ElongatedTriangularSpace.Triangle1[SpaceClass]] = field.northTriSpace(x, y)
override def south:Option[ElongatedTriangularSpace.Triangle2[SpaceClass]] = field.southTriSpace(x, y)
override def east:Option[ElongatedTriangularSpace.Square[SpaceClass]] = field.squareSpace(x + 1, y)
override def west:Option[ElongatedTriangularSpace.Square[SpaceClass]] = field.squareSpace(x - 1, y)
override def toString:String = s"ElongatedTriangularField.Square(typ = $typeOfSpace, x = $x, y = $y, field = ...)"
override def hashCode:Int = x * 93 + y * 3
override def equals(other:Any):Boolean = other match {
case other2:MySquareElongatedTriangularSpace[_] =>
other2.field == this.field &&
other2.x == this.x &&
other2.y == this.y
case _ => false
}
}
private final class MyTriangle1ElongatedTriangularSpace[SpaceClass](
override val typeOfSpace:SpaceClass,
private val field:Field[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]],
private val x:Int,
private val y:Int
) extends ElongatedTriangularSpace.Triangle1[SpaceClass] {
def south:Option[ElongatedTriangularSpace.Square[SpaceClass]] = field.squareSpace(x, y)
def northEast:Option[ElongatedTriangularSpace.Triangle2[SpaceClass]] = field.southTriSpace(x + (if (y % 2 == 0) {-1} else {0}), y - 1)
def northWest:Option[ElongatedTriangularSpace.Triangle2[SpaceClass]] = field.southTriSpace(x + (if (y % 2 == 0) {0} else {1}), y - 1)
override def toString:String = s"ElongatedTriangularField.Triangle1(typ = $typeOfSpace, x = $x, y = $y, field = ...)"
override def hashCode:Int = x * 93 + y * 3 + 1
override def equals(other:Any):Boolean = other match {
case other2:MyTriangle1ElongatedTriangularSpace[_] =>
other2.field == this.field &&
other2.x == this.x &&
other2.y == this.y
case _ => false
}
}
private final class MyTriangle2ElongatedTriangularSpace[SpaceClass](
override val typeOfSpace:SpaceClass,
private val field:Field[SpaceClass, ElongatedTriangularIndex, ElongatedTriangularSpace[SpaceClass]],
private val x:Int,
private val y:Int
) extends ElongatedTriangularSpace.Triangle2[SpaceClass] {
def north:Option[ElongatedTriangularSpace.Square[SpaceClass]] = field.squareSpace(x, y)
def southWest:Option[ElongatedTriangularSpace.Triangle1[SpaceClass]] = field.northTriSpace(x + (if (y % 2 == 0) {0} else {1}), y + 1)
def southEast:Option[ElongatedTriangularSpace.Triangle1[SpaceClass]] = field.northTriSpace(x + (if (y % 2 == 0) {-1} else {0}), y + 1)
override def toString:String = s"ElongatedTriangularField.Triangle2(typ = $typeOfSpace, x = $x, y = $y, field = ...)"
override def hashCode:Int = x * 93 + y * 3 + 2
override def equals(other:Any):Boolean = other match {
case other2:MyTriangle2ElongatedTriangularSpace[_] =>
other2.field == this.field &&
other2.x == this.x &&
other2.y == this.y
case _ => false
}
}
}
|
rayrobdod/boardGame
|
Model/src/main/scala/Field.scala
|
Scala
|
gpl-3.0
| 12,169 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.frontend.logicalplan
import slamdata.Predef._
import quasar.common.data.Data
import quasar.std.StdLib._
import matryoshka._
import matryoshka.implicits._
sealed abstract class JoinDir(val name: String) {
import structural.MapProject
val data: Data = Data.Str(name)
def const[T](implicit T: Corecursive.Aux[T, LogicalPlan]): T =
constant[T](data).embed
def projectFrom[T](lp: T)(implicit T: Corecursive.Aux[T, LogicalPlan]): T =
MapProject(lp, const).embed
}
object JoinDir {
final case object Left extends JoinDir("left")
final case object Right extends JoinDir("right")
}
|
slamdata/slamengine
|
frontend/src/main/scala/quasar/frontend/logicalplan/JoinDir.scala
|
Scala
|
apache-2.0
| 1,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.binaryfile
import java.io.{File, IOException}
import java.nio.file.{Files, StandardOpenOption}
import java.sql.Timestamp
import scala.collection.JavaConverters._
import com.google.common.io.{ByteStreams, Closeables}
import org.apache.hadoop.fs.{FileStatus, FileSystem, GlobFilter, Path}
import org.mockito.Mockito.{mock, when}
import org.apache.spark.SparkException
import org.apache.spark.sql.{DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.execution.datasources.PartitionedFile
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf.SOURCES_BINARY_FILE_MAX_LENGTH
import org.apache.spark.sql.sources._
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class BinaryFileFormatSuite extends QueryTest with SharedSparkSession {
import BinaryFileFormat._
private var testDir: String = _
private var fsTestDir: Path = _
private var fs: FileSystem = _
private var file1Status: FileStatus = _
override def beforeAll(): Unit = {
super.beforeAll()
testDir = Utils.createTempDir().getAbsolutePath
fsTestDir = new Path(testDir)
fs = fsTestDir.getFileSystem(sparkContext.hadoopConfiguration)
val year2014Dir = new File(testDir, "year=2014")
year2014Dir.mkdir()
val year2015Dir = new File(testDir, "year=2015")
year2015Dir.mkdir()
val file1 = new File(year2014Dir, "data.txt")
Files.write(
file1.toPath,
Seq("2014-test").asJava,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
file1Status = fs.getFileStatus(new Path(file1.getPath))
val file2 = new File(year2014Dir, "data2.bin")
Files.write(
file2.toPath,
"2014-test-bin".getBytes,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val file3 = new File(year2015Dir, "bool.csv")
Files.write(
file3.toPath,
Seq("bool", "True", "False", "true").asJava,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
val file4 = new File(year2015Dir, "data.bin")
Files.write(
file4.toPath,
"2015-test".getBytes,
StandardOpenOption.CREATE, StandardOpenOption.WRITE
)
}
test("BinaryFileFormat methods") {
val format = new BinaryFileFormat
assert(format.shortName() === "binaryFile")
assert(format.isSplitable(spark, Map.empty, new Path("any")) === false)
assert(format.inferSchema(spark, Map.empty, Seq.empty) === Some(BinaryFileFormat.schema))
assert(BinaryFileFormat.schema === StructType(Seq(
StructField("path", StringType, false),
StructField("modificationTime", TimestampType, false),
StructField("length", LongType, false),
StructField("content", BinaryType, true))))
}
def testBinaryFileDataSource(pathGlobFilter: String): Unit = {
val dfReader = spark.read.format(BINARY_FILE)
if (pathGlobFilter != null) {
dfReader.option("pathGlobFilter", pathGlobFilter)
}
val resultDF = dfReader.load(testDir).select(
col(PATH),
col(MODIFICATION_TIME),
col(LENGTH),
col(CONTENT),
col("year") // this is a partition column
)
val expectedRowSet = new collection.mutable.HashSet[Row]()
val globFilter = if (pathGlobFilter == null) null else new GlobFilter(pathGlobFilter)
for (partitionDirStatus <- fs.listStatus(fsTestDir)) {
val dirPath = partitionDirStatus.getPath
val partitionName = dirPath.getName.split("=")(1)
val year = partitionName.toInt // partition column "year" value which is `Int` type
for (fileStatus <- fs.listStatus(dirPath)) {
if (globFilter == null || globFilter.accept(fileStatus.getPath)) {
val fpath = fileStatus.getPath.toString
val flen = fileStatus.getLen
val modificationTime = new Timestamp(fileStatus.getModificationTime)
val fcontent = {
val stream = fs.open(fileStatus.getPath)
val content = try {
ByteStreams.toByteArray(stream)
} finally {
Closeables.close(stream, true)
}
content
}
val row = Row(fpath, modificationTime, flen, fcontent, year)
expectedRowSet.add(row)
}
}
}
checkAnswer(resultDF, expectedRowSet.toSeq)
}
test("binary file data source test") {
testBinaryFileDataSource(null)
testBinaryFileDataSource("*.*")
testBinaryFileDataSource("*.bin")
testBinaryFileDataSource("*.txt")
testBinaryFileDataSource("*.{txt,csv}")
testBinaryFileDataSource("*.json")
}
test("binary file data source do not support write operation") {
val df = spark.read.format(BINARY_FILE).load(testDir)
withTempDir { tmpDir =>
val thrown = intercept[UnsupportedOperationException] {
df.write
.format(BINARY_FILE)
.save(tmpDir + "/test_save")
}
assert(thrown.getMessage.contains("Write is not supported for binary file data source"))
}
}
def mockFileStatus(length: Long, modificationTime: Long): FileStatus = {
val status = mock(classOf[FileStatus])
when(status.getLen).thenReturn(length)
when(status.getModificationTime).thenReturn(modificationTime)
when(status.toString).thenReturn(
s"FileStatus($LENGTH=$length, $MODIFICATION_TIME=$modificationTime)")
status
}
def testCreateFilterFunction(
filters: Seq[Filter],
testCases: Seq[(FileStatus, Boolean)]): Unit = {
val funcs = filters.map(BinaryFileFormat.createFilterFunction)
testCases.foreach { case (status, expected) =>
assert(funcs.forall(f => f(status)) === expected,
s"$filters applied to $status should be $expected.")
}
}
test("createFilterFunction") {
// test filter applied on `length` column
val l1 = mockFileStatus(1L, 0L)
val l2 = mockFileStatus(2L, 0L)
val l3 = mockFileStatus(3L, 0L)
testCreateFilterFunction(
Seq(LessThan(LENGTH, 2L)),
Seq((l1, true), (l2, false), (l3, false)))
testCreateFilterFunction(
Seq(LessThanOrEqual(LENGTH, 2L)),
Seq((l1, true), (l2, true), (l3, false)))
testCreateFilterFunction(
Seq(GreaterThan(LENGTH, 2L)),
Seq((l1, false), (l2, false), (l3, true)))
testCreateFilterFunction(
Seq(GreaterThanOrEqual(LENGTH, 2L)),
Seq((l1, false), (l2, true), (l3, true)))
testCreateFilterFunction(
Seq(EqualTo(LENGTH, 2L)),
Seq((l1, false), (l2, true), (l3, false)))
testCreateFilterFunction(
Seq(Not(EqualTo(LENGTH, 2L))),
Seq((l1, true), (l2, false), (l3, true)))
testCreateFilterFunction(
Seq(And(GreaterThan(LENGTH, 1L), LessThan(LENGTH, 3L))),
Seq((l1, false), (l2, true), (l3, false)))
testCreateFilterFunction(
Seq(Or(LessThanOrEqual(LENGTH, 1L), GreaterThanOrEqual(LENGTH, 3L))),
Seq((l1, true), (l2, false), (l3, true)))
// test filter applied on `modificationTime` column
val t1 = mockFileStatus(0L, 1L)
val t2 = mockFileStatus(0L, 2L)
val t3 = mockFileStatus(0L, 3L)
testCreateFilterFunction(
Seq(LessThan(MODIFICATION_TIME, new Timestamp(2L))),
Seq((t1, true), (t2, false), (t3, false)))
testCreateFilterFunction(
Seq(LessThanOrEqual(MODIFICATION_TIME, new Timestamp(2L))),
Seq((t1, true), (t2, true), (t3, false)))
testCreateFilterFunction(
Seq(GreaterThan(MODIFICATION_TIME, new Timestamp(2L))),
Seq((t1, false), (t2, false), (t3, true)))
testCreateFilterFunction(
Seq(GreaterThanOrEqual(MODIFICATION_TIME, new Timestamp(2L))),
Seq((t1, false), (t2, true), (t3, true)))
testCreateFilterFunction(
Seq(EqualTo(MODIFICATION_TIME, new Timestamp(2L))),
Seq((t1, false), (t2, true), (t3, false)))
testCreateFilterFunction(
Seq(Not(EqualTo(MODIFICATION_TIME, new Timestamp(2L)))),
Seq((t1, true), (t2, false), (t3, true)))
testCreateFilterFunction(
Seq(And(GreaterThan(MODIFICATION_TIME, new Timestamp(1L)),
LessThan(MODIFICATION_TIME, new Timestamp(3L)))),
Seq((t1, false), (t2, true), (t3, false)))
testCreateFilterFunction(
Seq(Or(LessThanOrEqual(MODIFICATION_TIME, new Timestamp(1L)),
GreaterThanOrEqual(MODIFICATION_TIME, new Timestamp(3L)))),
Seq((t1, true), (t2, false), (t3, true)))
// test filters applied on both columns
testCreateFilterFunction(
Seq(And(GreaterThan(LENGTH, 2L), LessThan(MODIFICATION_TIME, new Timestamp(2L)))),
Seq((l1, false), (l2, false), (l3, true), (t1, false), (t2, false), (t3, false)))
// test nested filters
testCreateFilterFunction(
// NOT (length > 2 OR modificationTime < 2)
Seq(Not(Or(GreaterThan(LENGTH, 2L), LessThan(MODIFICATION_TIME, new Timestamp(2L))))),
Seq((l1, false), (l2, false), (l3, false), (t1, false), (t2, true), (t3, true)))
}
test("buildReader") {
def testBuildReader(fileStatus: FileStatus, filters: Seq[Filter], expected: Boolean): Unit = {
val format = new BinaryFileFormat
val reader = format.buildReaderWithPartitionValues(
sparkSession = spark,
dataSchema = schema,
partitionSchema = StructType(Nil),
requiredSchema = schema,
filters = filters,
options = Map.empty,
hadoopConf = spark.sessionState.newHadoopConf())
val partitionedFile = mock(classOf[PartitionedFile])
when(partitionedFile.filePath).thenReturn(fileStatus.getPath.toString)
assert(reader(partitionedFile).nonEmpty === expected,
s"Filters $filters applied to $fileStatus should be $expected.")
}
testBuildReader(file1Status, Seq.empty, true)
testBuildReader(file1Status, Seq(LessThan(LENGTH, file1Status.getLen)), false)
testBuildReader(file1Status, Seq(
LessThan(MODIFICATION_TIME, new Timestamp(file1Status.getModificationTime))
), false)
testBuildReader(file1Status, Seq(
EqualTo(LENGTH, file1Status.getLen),
EqualTo(MODIFICATION_TIME, file1Status.getModificationTime)
), true)
}
private def readBinaryFile(file: File, requiredSchema: StructType): Row = {
val format = new BinaryFileFormat
val reader = format.buildReaderWithPartitionValues(
sparkSession = spark,
dataSchema = schema,
partitionSchema = StructType(Nil),
requiredSchema = requiredSchema,
filters = Seq.empty,
options = Map.empty,
hadoopConf = spark.sessionState.newHadoopConf()
)
val partitionedFile = mock(classOf[PartitionedFile])
when(partitionedFile.filePath).thenReturn(file.getPath)
val encoder = RowEncoder(requiredSchema).resolveAndBind()
encoder.fromRow(reader(partitionedFile).next())
}
test("column pruning") {
withTempPath { file =>
val content = "123".getBytes
Files.write(file.toPath, content, StandardOpenOption.CREATE, StandardOpenOption.WRITE)
val actual = readBinaryFile(file, StructType(schema.takeRight(3)))
val expected = Row(new Timestamp(file.lastModified()), content.length, content)
assert(actual === expected)
}
}
test("column pruning - non-readable file") {
withTempPath { file =>
val content = "abc".getBytes
Files.write(file.toPath, content, StandardOpenOption.CREATE, StandardOpenOption.WRITE)
file.setReadable(false)
// If content is selected, it throws an exception because it's not readable.
intercept[IOException] {
readBinaryFile(file, StructType(schema(CONTENT) :: Nil))
}
// Otherwise, it should be able to read.
assert(
readBinaryFile(file, StructType(schema(LENGTH) :: Nil)) === Row(content.length),
"Get length should not read content.")
assert(
spark.read.format(BINARY_FILE).load(file.getPath).count() === 1,
"Count should not read content.")
}
}
test("fail fast and do not attempt to read if a file is too big") {
assert(spark.conf.get(SOURCES_BINARY_FILE_MAX_LENGTH) === Int.MaxValue)
withTempPath { file =>
val path = file.getPath
val content = "123".getBytes
Files.write(file.toPath, content, StandardOpenOption.CREATE, StandardOpenOption.WRITE)
def readContent(): DataFrame = {
spark.read.format(BINARY_FILE)
.load(path)
.select(CONTENT)
}
val expected = Seq(Row(content))
QueryTest.checkAnswer(readContent(), expected)
withSQLConf(SOURCES_BINARY_FILE_MAX_LENGTH.key -> content.length.toString) {
QueryTest.checkAnswer(readContent(), expected)
}
// Disable read. If the implementation attempts to read, the exception would be different.
file.setReadable(false)
val caught = intercept[SparkException] {
withSQLConf(SOURCES_BINARY_FILE_MAX_LENGTH.key -> (content.length - 1).toString) {
QueryTest.checkAnswer(readContent(), expected)
}
}
assert(caught.getMessage.contains("exceeds the max length allowed"))
}
}
test("SPARK-28030: support chars in file names that require URL encoding") {
withTempDir { dir =>
val file = new File(dir, "test space.txt")
val content = "123".getBytes
Files.write(file.toPath, content, StandardOpenOption.CREATE, StandardOpenOption.WRITE)
val df = spark.read.format(BINARY_FILE).load(dir.getPath)
df.select(col(PATH), col(CONTENT)).first() match {
case Row(p: String, c: Array[Byte]) =>
assert(p.endsWith(file.getAbsolutePath), "should support space in file name")
assert(c === content, "should read file with space in file name")
}
}
}
}
|
bdrillard/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/binaryfile/BinaryFileFormatSuite.scala
|
Scala
|
apache-2.0
| 14,561 |
package com.kakao.mango
import scala.language.implicitConversions
/** enables the usage of JSON conversions by importing this package
*
* {{{
* import com.kakao.mango.json._
*
* case class Example(hello: String)
*
* val obj = Example("world")
* println(toJson(obj)) // prints {"hello":"world"}
*
* val json = """{"hello":"world"}"""
* fromJson[Example](json) // returns Example("world")
* parseJson(json) // returns Map("hello" -> "world")
* }}}
*/
package object json extends JsonConverters
|
kakao/mango
|
mango-core/src/main/scala/com/kakao/mango/json/package.scala
|
Scala
|
apache-2.0
| 550 |
package io.questions.model.questionnaire
import io.questions.testdata.ExampleComponents
import io.questions.model.questionnaire.Element.{ NonRepeatingParent, Primitive, RepeatingParent }
import io.questions.model.questionnaire.FieldName.FieldNameStringSyntax
import io.questions.model.questionnaire.NodeMetadata.{ PageTag, SectionTag }
import io.questions.model.questionnaire.PrimitiveAnswer.{ IntAnswer, StringAnswer }
import io.questions.model.questionnaire.QuestionText.QuestionTextSyntax
import io.questions.model.questionnaire.nodekey.{ NodeKey, NodeKeyExtension }
import io.questions.testdata.ExampleComponents
import io.questions.testdata.samples.enumerations.Country
import io.questions.testdata.samples.samplequestionnaire.QuestionTypes.countryQuestion
object TestQuestionnaire {
// a questionnaire
val address: QuestionnaireNode =
QuestionnaireNode(
NodeKey("address"),
"address".fieldName,
"Address".text,
NonRepeatingParent(
QuestionnaireNode(nodekey.NodeKey("line1"), FieldName("line1"), QuestionText("Line 1"), Primitive(StringAnswer(None))),
QuestionnaireNode(nodekey.NodeKey("line2"), FieldName("line2"), QuestionText("Line 2"), Primitive(StringAnswer(None))),
QuestionnaireNode(nodekey.NodeKey("fromDate"), FieldName("fromDate"), QuestionText("From"), Primitive(StringAnswer(None))),
)
)
val firstName: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("firstName"), FieldName("firstName"), QuestionText("First name"), Primitive(StringAnswer(None)))
val lastName: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("lastName"), FieldName("lastName"), QuestionText("Last name"), Primitive(StringAnswer(None)))
val age: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("age"), FieldName("age"), QuestionText("Age"), Primitive(IntAnswer(None)))
val homeAddresses: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("homeAddresses"),
FieldName("homeAddresses"),
QuestionText("Home Addresses"),
RepeatingParent(address))
val countryOfNationality: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("countryOfNationality"),
FieldName("nationality"),
QuestionText("Nationality"),
Primitive(StringAnswer(None)))
val countryOfBirth: QuestionnaireNode = countryQuestion("countryOfBirth".fieldName, "Country of birth".text)
val countriesOfNationality: QuestionnaireNode =
QuestionnaireNode(nodekey.NodeKey("countriesOfNationality"),
FieldName("nationalities"),
QuestionText("Nationalities"),
RepeatingParent(countryOfNationality))
val firstPage: QuestionnaireNode =
QuestionnaireNode(
NodeKey("firstPage"),
FieldName("firstPage"),
QuestionText("First Page"),
NonRepeatingParent(firstName, lastName),
metadata = Seq(PageTag)
)
val secondPage: QuestionnaireNode =
QuestionnaireNode(
NodeKey("secondPage"),
FieldName("secondPage"),
QuestionText("Second Page"),
NonRepeatingParent(homeAddresses),
metadata = Seq(PageTag)
)
val thirdPage: QuestionnaireNode =
QuestionnaireNode(
NodeKey("thirdPage"),
FieldName("thirdPage"),
QuestionText("Third Page"),
NonRepeatingParent(age),
metadata = Seq(PageTag)
)
val fourthPage: QuestionnaireNode =
QuestionnaireNode(
NodeKey("fourthPage"),
FieldName("fourthPage"),
QuestionText("Fourth Page"),
NonRepeatingParent(countryOfBirth, countriesOfNationality),
metadata = Seq(PageTag)
)
val firstSection: QuestionnaireNode =
QuestionnaireNode(
NodeKey("firstSection"),
FieldName("firstSection"),
QuestionText("First Section"),
NonRepeatingParent(firstPage, secondPage),
metadata = Seq(SectionTag)
)
val secondSection: QuestionnaireNode =
QuestionnaireNode(
NodeKey("secondSection"),
FieldName("secondSection"),
QuestionText("Second Section"),
NonRepeatingParent(thirdPage, fourthPage),
metadata = Seq(SectionTag)
)
def questionnaire: QuestionnaireNode =
ExampleComponents
.standard(
NodeKey("personalQuestions"),
"personalQuestions".fieldName,
"Personal Questions".text,
NonRepeatingParent(firstSection, secondSection),
enums = Map(
Country.name → Country.values
)
)
implicit private def stringToFieldName(s: String): FieldName = FieldName(s)
private val homeAddressesKey = (questionnaire /-/ "homeAddresses").key
private val nationalitiesKey = (questionnaire /-/ "nationalities").key
val filledQuestionnaire: QuestionnaireNode = {
for {
q1 ← questionnaire.answerByPath(_ /-/ "firstName", StringAnswer(Some("Channing")))
q2 ← q1.answerByPath(_ /-/ "lastName", StringAnswer(Some("Walton")))
q3 ← q2.answerByPath(_ /-/ "age", IntAnswer(Some(60)))
q4 ← q3.answerByPath(_ /-/ "homeAddresses" / 0 / "line1", StringAnswer(Some("14 Orchid Drive")))
q5 ← q4.answerByPath(_ /-/ "homeAddresses" / 0 / "line2", StringAnswer(Some("Woking")))
q6 ← q5.appendChild(homeAddressesKey, NodeKeyExtension.random)
q7 ← q6.answerByPath(_ /-/ "homeAddresses" / 1 / "line1", StringAnswer(Some("Bloomfield Close")))
q8 ← q7.answerByPath(_ /-/ "homeAddresses" / 1 / "line2", StringAnswer(Some("Woking")))
q9 ← q8.appendChild(homeAddressesKey, NodeKeyExtension.random)
q10 ← q9.answerByPath(_ /-/ "homeAddresses" / 2 / "line1", StringAnswer(Some("Bisley")))
q11 ← q10.answerByPath(_ /-/ "homeAddresses" / 2 / "line2", StringAnswer(Some("Woking")))
q12 ← q11.answerByPath(_ /-/ "nationalities" / 0, StringAnswer(Some("UK")))
q13 ← q12.appendChild(nationalitiesKey, NodeKeyExtension.random)
q14 ← q13.answerByPath(_ /-/ "nationalities" / 1, StringAnswer(Some("USA")))
} yield q14
}.fold(err ⇒ throw new RuntimeException(err), identity)
}
// scalastyle:off
object PrintTestQuestionnaire extends App {
import TestQuestionnaire._
println(QuestionnaireNode.show(filledQuestionnaire))
import QuestionnaireNodePredicate._
import Path._
implicit private def stringToFieldName(s: String): FieldName = FieldName(s)
println(
(root / homeAddresses.keyBase) Ǝ (relative / (filledQuestionnaire /-/ "homeAddresses" / 0 / "line1").keyBase === "14 Orchid Drive") apply filledQuestionnaire
)
println(
(root / homeAddresses.keyBase) ∀ (relative / (filledQuestionnaire /-/ "homeAddresses" / 0 / "line2").keyBase === "Woking") apply filledQuestionnaire
)
println(
(root / homeAddresses.keyBase) Ǝ (root / (filledQuestionnaire /-/ "age").keyBase === 60) apply filledQuestionnaire
)
}
|
channingwalton/qanda
|
questionnaire/src/test/scala/io/questions/model/questionnaire/TestQuestionnaire.scala
|
Scala
|
mit
| 6,900 |
package io.github.mandar2812.dynaml.kernels
import breeze.linalg._
import io.github.mandar2812.dynaml.algebra.{PartitionedMatrix, PartitionedPSDMatrix}
import io.github.mandar2812.dynaml.utils
import org.apache.log4j.Logger
/**
* Defines an abstract class outlines the basic
* functionality requirements of an SVM Kernel
* */
trait SVMKernel[M] extends
CovarianceFunction[DenseVector[Double], Double, M]
with Serializable {
/**
* Builds an approximate nonlinear feature map
* which corresponds to an SVM Kernel. This is
* done using the Nystrom method i.e. approximating
* the eigenvalues and eigenvectors of the Kernel
* matrix of some data set.
*
* For each data point,
* calculate m dimensions of the
* feature map where m is the number
* of eigenvalues/vectors obtained from
* the Eigen Decomposition.
*
* phi_i(x) = (1/sqrt(eigenvalue(i)))*Sum(k, 1, m, K(k, x)*eigenvector(i)(k))
*
* @param decomposition The Eigenvalue decomposition calculated
* from the kernel matrix of the prototype
* subset.
* @param prototypes The prototype subset.
*
* @param data The dataset on which the feature map
* is to be applied.
*
* */
def featureMapping(decomposition: (DenseVector[Double], DenseMatrix[Double]))
(prototypes: List[DenseVector[Double]])
(data: DenseVector[Double])
: DenseVector[Double] = {
val kernel = DenseVector(prototypes.map((p) => this.evaluate(p, data)).toArray)
val buff: Transpose[DenseVector[Double]] = kernel.t * decomposition._2
val lambda: DenseVector[Double] = decomposition._1.map(lam => 1/math.sqrt(lam))
val ans = buff.t
ans *:* lambda
}
}
/**
* Defines a global singleton object [[SVMKernel]]
* having functions which can construct kernel matrices.
*/
object SVMKernel {
private val logger = Logger.getLogger(this.getClass)
/**
* This function constructs an [[SVMKernelMatrix]]
*
* @param mappedData Graphical model
* @param length Number of data points
* @param eval A function which calculates the value of the Kernel
* given two feature vectors.
*
* @return An [[SVMKernelMatrix]] object.
*
* */
def buildSVMKernelMatrix[S <: Seq[T], T](
mappedData: S,
length: Int,
eval: (T, T) => Double):
KernelMatrix[DenseMatrix[Double]] = {
val kernelIndex =
utils.combine(Seq(mappedData.zipWithIndex, mappedData.zipWithIndex))
.filter(s => s.head._2 >= s.last._2)
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
val kernel = DenseMatrix.tabulate[Double](length, length){
(i, j) => if (i >= j) kernelIndex((i,j)) else kernelIndex((j,i))
}
//println(" Dimensions: " + kernel.rows + " x " + kernel.cols)
new SVMKernelMatrix(kernel, length)
}
def crossKernelMatrix[S <: Seq[T], T](data1: S, data2: S,
eval: (T, T) => Double)
: DenseMatrix[Double] = {
val kernelIndex =
utils.combine(Seq(data1.zipWithIndex, data2.zipWithIndex))
.map(s => ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
.toMap
//println(" Dimensions: " + data1.length + " x " + data2.length)
DenseMatrix.tabulate[Double](data1.length, data2.length){
(i, j) => kernelIndex((i,j))
}
}
def buildKernelGradMatrix[S <: Seq[T], T](
data1: S,
hyper_parameters: Seq[String],
eval: (T, T) => Double,
evalGrad: String => (T, T) => Double):
Map[String, DenseMatrix[Double]] = {
val (rows, cols) = (data1.length, data1.length)
//println("Constructing Kernel/Grad Matrices")
//println(" Dimensions: " + rows + " x " + cols)
val keys = Seq("kernel-matrix") ++ hyper_parameters
utils.combine(Seq(data1.zipWithIndex, data1.zipWithIndex))
.filter(s => s.head._2 >= s.last._2)
.flatMap(s => {
keys.map(k =>
if(k == "kernel-matrix") (k, ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
else (k, ((s.head._2, s.last._2), evalGrad(k)(s.head._1, s.last._1))))
}).groupBy(_._1).map(cl => {
//if (cl._1 == "kernel-matrix") //println("Constructing Kernel Matrix")
//else //println("Constructing Grad Matrix for: "+cl._1)
val kernelIndex = cl._2.map(_._2).toMap
(
cl._1,
DenseMatrix.tabulate[Double](rows, cols){
(i, j) => if (i >= j) kernelIndex((i,j)) else kernelIndex((j,i))
}
)
})
}
/**
* Returns the kernel matrix along with
* its derivatives for each hyper-parameter.
* */
def buildCrossKernelGradMatrix[S <: Seq[T], T](
data1: S, data2: S,
hyper_parameters: Seq[String],
eval: (T, T) => Double,
evalGrad: (String) => (T, T) => Double):
Map[String, DenseMatrix[Double]] = {
val (rows, cols) = (data1.length, data2.length)
//println("Constructing Kernel/Grad Matrices")
//println(" Dimensions: " + rows + " x " + cols)
val keys = Seq("kernel-matrix") ++ hyper_parameters
utils.combine(Seq(data1.zipWithIndex, data2.zipWithIndex))
.flatMap(s => {
keys.map(k =>
if(k == "kernel-matrix") (k, ((s.head._2, s.last._2), eval(s.head._1, s.last._1)))
else (k, ((s.head._2, s.last._2), evalGrad(k)(s.head._1, s.last._1))))
}).groupBy(_._1).map(cl => {
//if (cl._1 == "kernel-matrix") //println("Constructing Kernel Matrix")
//else //println("Constructing Grad Matrix for: "+cl._1)
val kernelIndex = cl._2.map(_._2).toMap
(
cl._1,
DenseMatrix.tabulate[Double](rows, cols){
(i, j) => kernelIndex((i,j))
}
)
})
}
def buildPartitionedKernelMatrix[S <: Seq[T], T](
data: S,
length: Long,
numElementsPerRowBlock: Int,
numElementsPerColBlock: Int,
eval: (T, T) => Double): PartitionedPSDMatrix = {
val (rows, cols) = (length, length)
//println("Constructing partitioned kernel matrix.")
//println("Dimension: " + rows + " x " + cols)
val (num_R_blocks, num_C_blocks) = (
math.ceil(rows.toDouble/numElementsPerRowBlock).toLong,
math.ceil(cols.toDouble/numElementsPerColBlock).toLong)
//println("Blocks: " + num_R_blocks + " x " + num_C_blocks)
val partitionedData = data.grouped(numElementsPerRowBlock).zipWithIndex.toStream
//println("~~~~~~~~~~~~~~~~~~~~~~~")
//println("Constructing Partitions")
new PartitionedPSDMatrix(
utils.combine(Seq(partitionedData, partitionedData))
.filter(c => c.head._2 >= c.last._2)
.toStream.map(c => {
val partitionIndex = (c.head._2.toLong, c.last._2.toLong)
//println(":- Partition: "+partitionIndex)
val matrix =
if(partitionIndex._1 == partitionIndex._2)
buildSVMKernelMatrix(c.head._1, c.head._1.length, eval).getKernelMatrix()
else crossKernelMatrix(c.head._1, c.last._1, eval)
(partitionIndex, matrix)
})
, rows, cols, num_R_blocks, num_C_blocks)
}
def crossPartitonedKernelMatrix[T, S <: Seq[T]](
data1: S, data2: S,
numElementsPerRowBlock: Int,
numElementsPerColBlock: Int,
eval: (T, T) => Double): PartitionedMatrix = {
val (rows, cols) = (data1.length, data2.length)
//println("Constructing cross partitioned kernel matrix.")
//println("Dimension: " + rows + " x " + cols)
val (num_R_blocks, num_C_blocks) = (
math.ceil(rows.toDouble/numElementsPerRowBlock).toLong,
math.ceil(cols.toDouble/numElementsPerColBlock).toLong)
//println("Blocks: " + num_R_blocks + " x " + num_C_blocks)
//println("~~~~~~~~~~~~~~~~~~~~~~~")
//println("Constructing Partitions")
new PartitionedMatrix(utils.combine(Seq(
data1.grouped(numElementsPerRowBlock).zipWithIndex.toStream,
data2.grouped(numElementsPerColBlock).zipWithIndex.toStream)
).toStream.map(c => {
val partitionIndex = (c.head._2.toLong, c.last._2.toLong)
//println(":- Partition: "+partitionIndex)
val matrix = crossKernelMatrix(c.head._1, c.last._1, eval)
(partitionIndex, matrix)
}), rows, cols, num_R_blocks, num_C_blocks)
}
def buildPartitionedKernelGradMatrix[S <: Seq[T], T](
data: S, length: Long,
numElementsPerRowBlock: Int,
numElementsPerColBlock: Int,
hyper_parameters: Seq[String],
eval: (T, T) => Double,
evalGrad: (String) => (T, T) => Double): Map[String, PartitionedPSDMatrix] = {
val (rows, cols) = (length, length)
//println("Constructing partitioned kernel matrix and its derivatives")
//println("Dimension: " + rows + " x " + cols)
val (num_R_blocks, num_C_blocks) = (
math.ceil(rows.toDouble/numElementsPerRowBlock).toLong,
math.ceil(cols.toDouble/numElementsPerColBlock).toLong)
//println("Blocks: " + num_R_blocks + " x " + num_C_blocks)
val partitionedData = data.grouped(numElementsPerRowBlock).zipWithIndex.toStream
//println("~~~~~~~~~~~~~~~~~~~~~~~")
//println("Constructing Partitions")
//Build the result using flatMap - reduce
utils.combine(Seq(partitionedData, partitionedData))
.filter(c => c.head._2 >= c.last._2)
.toStream.flatMap(c => {
val partitionIndex = (c.head._2.toLong, c.last._2.toLong)
print("\\n")
//println(":- Partition: "+partitionIndex)
if(partitionIndex._1 == partitionIndex._2) {
SVMKernel.buildKernelGradMatrix(
c.head._1,
hyper_parameters,
eval, evalGrad).map(cluster => {
(cluster._1, (partitionIndex, cluster._2))
}).toSeq
} else {
SVMKernel.buildCrossKernelGradMatrix(
c.head._1, c.last._1,
hyper_parameters,
eval, evalGrad).map(cluster => {
(cluster._1, (partitionIndex, cluster._2))
}).toSeq
}
}).groupBy(_._1).map(cluster => {
val hyp = cluster._1
val matData = cluster._2.map(_._2)
(hyp, new PartitionedPSDMatrix(matData, rows, cols, num_R_blocks, num_C_blocks))
})
}
}
|
transcendent-ai-labs/DynaML
|
dynaml-core/src/main/scala/io/github/mandar2812/dynaml/kernels/SVMKernel.scala
|
Scala
|
apache-2.0
| 10,174 |
package postgresweb.css
import japgolly.scalajs.react.vdom.TagMod
import scalacss.Defaults._
import scalacss.ScalaCssReact._
/**
* Created by andreaminetti on 22/02/16.
*/
object CommonStyles extends StyleSheet.Inline {
import dsl._
val layout:TagMod = style(addClassNames("mdl-layout","mdl-js-layout","mdl-layout--fixed-drawer","mdl-layout--fixed-header","mdl-layout--fixed-tabs"))
val spacer:TagMod = style(addClassNames("mdl-layout-spacer"))
val navigation:TagMod = style(addClassNames("mdl-navigation"))
val navigationLink:TagMod = style(addClassNames("mdl-navigation__link"))
val title:TagMod = style(addClassNames("mdl-layout-title"))
val row:TagMod = style(
addClassNames("mdl-grid"),
margin(0.px),
padding(0.px)
)
val fullWidth:TagMod = style(
addClassNames("mdl-cell","mdl-cell--12-col"),
margin(0.px),
width(100.%%)
)
val card:TagMod = style(addClassNames("mdl-card","mdl-shadow--2dp"))
val scroll:TagMod = style(overflow.auto)
}
|
minettiandrea/postgres-web
|
src/main/scala/postgresweb/css/CommonStyles.scala
|
Scala
|
apache-2.0
| 1,001 |
package com.twitter.util.security
import com.twitter.logging.Logger
import com.twitter.util.Try
import com.twitter.util.security.X509CertificateFile._
import java.io.{ByteArrayInputStream, File}
import java.security.cert.{CertificateFactory, X509Certificate}
/**
* A representation of an X.509 Certificate PEM-encoded and stored
* in a file.
*
* @example
* -----BEGIN CERTIFICATE-----
* base64encodedbytes
* -----END CERTIFICATE-----
*/
class X509CertificateFile(file: File) {
private[this] def logException(ex: Throwable): Unit =
log.warning(s"X509Certificate (${file.getName()}) failed to load: ${ex.getMessage()}.")
private[this] def generateX509Certificate(decodedMessage: Array[Byte]): X509Certificate = {
val certFactory = CertificateFactory.getInstance("X.509")
certFactory
.generateCertificate(new ByteArrayInputStream(decodedMessage))
.asInstanceOf[X509Certificate]
}
/**
* Attempts to read the contents of the X.509 Certificate from the file.
*/
def readX509Certificate(): Try[X509Certificate] = {
val pemFile = new PemFile(file)
pemFile
.readMessage(MessageType)
.map(generateX509Certificate)
.onFailure(logException)
}
/**
* Attempts to read the contents of multiple X.509 Certificates from the file.
*/
def readX509Certificates(): Try[Seq[X509Certificate]] = {
val pemFile = new PemFile(file)
pemFile
.readMessages(MessageType)
.map(certBytes => certBytes.map(generateX509Certificate))
.onFailure(logException)
}
}
private object X509CertificateFile {
private val MessageType: String = "CERTIFICATE"
private val log = Logger.get("com.twitter.util.security")
}
|
BuoyantIO/twitter-util
|
util-security/src/main/scala/com/twitter/util/security/X509CertificateFile.scala
|
Scala
|
apache-2.0
| 1,701 |
package sri.mobile
import scala.scalajs.js
@js.native
trait ViewPagerAndroidEvent extends js.Object {
val position : Int = js.native
val offset : Double = js.native
}
@js.native
trait DrawerLayoutAndroidEvent extends js.Object
@js.native
trait WebViewEvent extends js.Object {
}
@js.native
trait MapViewEvent extends js.Object
|
chandu0101/sri
|
mobile/src/main/scala/sri/mobile/ReactNativeEvents.scala
|
Scala
|
apache-2.0
| 341 |
package fr.laas.fape.anml.model
import fr.laas.fape.anml.ANMLException
import fr.laas.fape.anml.model.concrete.{InstanceRef, RefCounter}
import fr.laas.fape.anml.parser.{PDisjunctiveType, PSimpleType, PType}
import scala.collection.JavaConverters._
import scala.collection.mutable
class InstanceManager(val refCounter: RefCounter) {
/** Root of the type Hierarchy */
private val NON_NUM_SOURCE_TYPE = "__NON_NUM_SOURCE_TYPE__"
/** Maps every type name to a full type definition */
private val simpleTypes = mutable.Map[String, SimpleType]()
private val disjunctiveTypes = mutable.Map[PDisjunctiveType, UnionType]()
/** Maps an instance name to a (type, GlobalReference) pair */
private val instancesDef = mutable.Map[String, InstanceRef]()
// predefined ANML types and instances
addType(NON_NUM_SOURCE_TYPE,"")
addType("boolean", "")
// addType("integer", "")
addInstance("true", "boolean", refCounter)
addInstance("false", "boolean", refCounter)
addType("typeOfUnknown", "")
addInstance("unknown", "typeOfUnknown", refCounter)
addType("_decompositionID_", "")
for(i <- 0 until 20)
addInstance("decnum:"+i, "_decompositionID_", refCounter)
/** Creates a new instance of a certain type.
*
* @param name Name of the instance.
* @param t Type of the instance.
*/
def addInstance(name:String, t:String, refCounter: RefCounter) {
assert(!instancesDef.contains(name), "Instance already declared: " + name)
assert(simpleTypes.contains(t), "Unknown type: " + t)
val newInstance = new InstanceRef(name, asType(t), refCounter)
instancesDef(name) = newInstance
simpleTypes(t).addInstance(newInstance)
}
def addTypes(typeList: List[(String,String)]): Unit = {
val q = mutable.Queue[(String,String)]()
q.enqueue(typeList: _*)
while(q.nonEmpty) {
q.dequeue() match {
case (typ,"") =>
q.enqueue((typ, NON_NUM_SOURCE_TYPE))
case t@(_,parent) if !simpleTypes.contains(parent) =>
q.enqueue(t)
case (typ,parentName) =>
addType(typ, parentName)
}
}
}
def asType(typeName: String) = simpleTypes(typeName)
def asType(typ: PType) : Type = typ match {
case PSimpleType(typeName) => asType(typeName)
case t@PDisjunctiveType(l) =>
if(!disjunctiveTypes.contains(t))
disjunctiveTypes.put(t, UnionType(l.map(st => asType(st).asInstanceOf[SimpleType])))
disjunctiveTypes(t)
}
/** Records a new type.
*
* @param name Name of the type
* @param parent Name of the parent type. If empty (""), no parent is set for this type.
*/
def addType(name:String, parent:String) {
assert(!simpleTypes.contains(name), "Error: type \\""+name+"\\" is already recorded.")
assert(parent.isEmpty || simpleTypes.contains(parent), s"Parent type \\'$parent\\' of \\'$name\\' is not defined yet.")
simpleTypes(name) = parent match {
case "" => new SimpleType(name, None)
case par => new SimpleType(name, Some(simpleTypes(par)))
}
}
/**
*
* @param instanceName Name of the instance to lookup
* @return True if an instance of name `instanceName` is known
*/
def containsInstance(instanceName:String) = instancesDef.contains(instanceName)
/** Returns true if the type with the given name exists */
def containsType(typeName:String) = simpleTypes.contains(typeName)
/** Returns the type of a given instance */
def typeOf(instanceName:String) = instancesDef(instanceName).getType
/** Returns all (including indirect) subtypes of the given parameter, including itself.
*
* @param typeName Name of the type to inspect.
* @return All subtypes including itself.
*/
def subTypes(typeName :String) : java.util.Set[String] = setAsJavaSet(simpleTypes(typeName).allSubTypes.map(_.name))
/** Returns all parents of this type */
def parents(typeName: String) : java.util.Set[String] = {
setAsJavaSet(simpleTypes(typeName).parents.map(_.toString))
}
/** Return a collection containing all instances. */
def allInstances : java.util.Collection[String] = asJavaCollection(instancesDef.keys)
def allSimpleTypes : Iterable[SimpleType] = simpleTypes.values
/** Retrieves the variable reference linked to this instance
*
* @param name Name of the instance to lookup
* @return The global variable reference linked to this instance
*/
def referenceOf(name: String) : InstanceRef = instancesDef(name)
def referenceOf(value: Int) : InstanceRef = {
if (!instancesDef.contains(value.toString))
addInstance(value.toString, "integer", refCounter)
instancesDef(value.toString)
}
/** Lookup for all instances of this types (including all instances of any of its subtypes). */
def instancesOfType(tipe:String) : java.util.List[String] = seqAsJavaList(instancesOfTypeRec(tipe))
/** Returns all instances of the given type */
private def instancesOfTypeRec(tipe:String) : List[String] = {
assert(tipe != "integer", "Requested instances of type integer.")
assert(simpleTypes.contains(tipe), s"Unknown type: $tipe")
simpleTypes(tipe).instances.toList.map(_.instance)
}
/**
* Checks if the type an accept the given value. This is true if
* - the value's type is subtype of typ
* - the value is "unknown" (always aceptable value)
*
* @param value Value to be checked.
* @param typ Type that should accept the value.
* @param context Context in which the value is declared (used to retrieve its type.
* @return True if the value is acceptable.
*/
def isValueAcceptableForType(value:LVarRef, typ:String, context: AbstractContext) : Boolean =
subTypes(typ).contains(value.getType) || value.id == "unknown"
/** Returns all instances of the given type */
def jInstancesOfType(tipe:String) = instancesOfType(tipe)
/** Return a fully qualified function definition in the form
* [Robot, location].
*
* @param typeName base type in which the method is used
* @param methodName name of the method
* @return
*/
def getQualifiedFunction(typeName:String, methodName:String) : List[String] = {
assert(simpleTypes.contains(typeName), s"Type $typeName does not seem to exist.")
val ret = simpleTypes(typeName).getQualifiedFunction(methodName)
assert(ret.nonEmpty)
ret.split("\\\\.").toList
}
}
|
athy/fape
|
anml-parser/src/main/scala/fr/laas/fape/anml/model/InstanceManager.scala
|
Scala
|
bsd-2-clause
| 6,365 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import java.util
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.catalog.{Table, TableProvider}
import org.apache.spark.sql.connector.expressions.Transform
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
/**
* A base interface for data source v2 implementations of the built-in file-based data sources.
*/
trait FileDataSourceV2 extends TableProvider with DataSourceRegister {
/**
* Returns a V1 [[FileFormat]] class of the same file data source.
* This is a solution for the following cases:
* 1. File datasource V2 implementations cause regression. Users can disable the problematic data
* source via SQL configuration and fall back to FileFormat.
* 2. Catalog support is required, which is still under development for data source V2.
*/
def fallbackFileFormat: Class[_ <: FileFormat]
lazy val sparkSession = SparkSession.active
protected def getPaths(map: CaseInsensitiveStringMap): Seq[String] = {
val objectMapper = new ObjectMapper()
val paths = Option(map.get("paths")).map { pathStr =>
objectMapper.readValue(pathStr, classOf[Array[String]]).toSeq
}.getOrElse(Seq.empty)
paths ++ Option(map.get("path")).toSeq
}
protected def getOptionsWithoutPaths(map: CaseInsensitiveStringMap): CaseInsensitiveStringMap = {
val withoutPath = map.asCaseSensitiveMap().asScala.filterKeys { k =>
!k.equalsIgnoreCase("path") && !k.equalsIgnoreCase("paths")
}
new CaseInsensitiveStringMap(withoutPath.asJava)
}
protected def getTableName(map: CaseInsensitiveStringMap, paths: Seq[String]): String = {
val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(
map.asCaseSensitiveMap().asScala.toMap)
val name = shortName() + " " + paths.map(qualifiedPathName(_, hadoopConf)).mkString(",")
Utils.redact(sparkSession.sessionState.conf.stringRedactionPattern, name)
}
private def qualifiedPathName(path: String, hadoopConf: Configuration): String = {
val hdfsPath = new Path(path)
val fs = hdfsPath.getFileSystem(hadoopConf)
hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory).toString
}
// TODO: To reduce code diff of SPARK-29665, we create stub implementations for file source v2, so
// that we don't need to touch all the file source v2 classes. We should remove the stub
// implementation and directly implement the TableProvider APIs.
protected def getTable(options: CaseInsensitiveStringMap): Table
protected def getTable(options: CaseInsensitiveStringMap, schema: StructType): Table = {
throw new UnsupportedOperationException("user-specified schema")
}
override def supportsExternalMetadata(): Boolean = true
private var t: Table = null
override def inferSchema(options: CaseInsensitiveStringMap): StructType = {
if (t == null) t = getTable(options)
t.schema()
}
// TODO: implement a light-weight partition inference which only looks at the path of one leaf
// file and return partition column names. For now the partition inference happens in
// `getTable`, because we don't know the user-specified schema here.
override def inferPartitioning(options: CaseInsensitiveStringMap): Array[Transform] = {
Array.empty
}
override def getTable(
schema: StructType,
partitioning: Array[Transform],
properties: util.Map[String, String]): Table = {
// If the table is already loaded during schema inference, return it directly.
if (t != null) {
t
} else {
getTable(new CaseInsensitiveStringMap(properties), schema)
}
}
}
|
rednaxelafx/apache-spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileDataSourceV2.scala
|
Scala
|
apache-2.0
| 4,886 |
import java.io.{File, FileInputStream}
import java.util.Properties
import com.indvd00m.ascii.render.api.{ICanvas, IContextBuilder, IRender}
import com.indvd00m.ascii.render.elements.{Label, Line, Rectangle}
import com.indvd00m.ascii.render.{Point, Render}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import io.amient.affinity.core.config.CfgStruct
import io.amient.affinity.core.storage.{LogEntry, LogStorage, LogStorageConf}
import io.amient.affinity.core.util.{EventTime, TimeRange}
import io.amient.affinity.kafka.KafkaLogStorage
import io.amient.affinity.kafka.KafkaStorage.KafkaStorageConf
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
object TimeLogTool extends Tool {
class TimeLogConf extends CfgStruct[TimeLogConf] {
doc("Utility for analyzing log compaction")
}
object Conf extends TimeLogConf {
override def apply(config: Config) = new TimeLogConf().apply(config)
}
private val logger = LoggerFactory.getLogger(this.getClass)
val width = 180
val height = 41
private var minTimestamp = Long.MaxValue
private var maxTimestamp = Long.MinValue
private var maxPosition = Long.MinValue
private var minPosition = Long.MaxValue
private var numRecords = 0
private val blocks = ListBuffer[(TimeRange, Long, Long)]()
override def apply(config: Config): Unit = ???
override def apply(args: List[String], config: Config): Unit = args match {
case bootstrap :: topic :: partition :: fuzz :: from :: until :: fromOffset :: toOffset :: Nil => apply(bootstrap, topic, partition.toInt, fuzz.toLong, new TimeRange(from, until), fromOffset.toLong -> toOffset.toLong)
case bootstrap :: topic :: partition :: fuzz :: from :: until :: fromOffset :: Nil => apply(bootstrap, topic, partition.toInt, fuzz.toLong, new TimeRange(from, until), fromOffset.toLong -> Long.MaxValue)
case bootstrap :: topic :: partition :: fuzz :: from :: until :: Nil if (from.contains("T")) => apply(bootstrap, topic, partition.toInt, fuzz.toLong, new TimeRange(from, until))
case bootstrap :: topic :: partition :: fuzz :: from :: Nil if (from.contains("T")) => apply(bootstrap, topic, partition.toInt, fuzz.toLong, TimeRange.since(from))
case bootstrap :: topic :: partition :: fuzz :: from :: until :: Nil => apply(bootstrap, topic, partition.toInt, fuzz.toLong, TimeRange.UNBOUNDED, from.toLong -> until.toLong)
case bootstrap :: topic :: partition :: fuzz :: from :: Nil => apply(bootstrap, topic, partition.toInt, fuzz.toLong, TimeRange.UNBOUNDED, from.toLong -> Long.MaxValue)
case bootstrap :: topic :: partition :: fuzz :: Nil => apply(bootstrap, topic, partition.toInt, fuzz.toLong)
case bootstrap :: topic :: partition :: Nil => apply(bootstrap, topic, partition.toInt)
case bootstrap :: topic :: Nil => apply(bootstrap, topic)
case _ => printHelp()
}
def printHelp(): Unit = {
println("Usage: timelog <kafka-bootstrap|consumer-props-file> <topic> [<partition> [<resolution-minutes>] [<from-datetime> [<to-datetime> [<from-offset> [<to-offset>]]]]]\n")
}
def apply(bootstrap: String, topic: String): Unit = {
println("Available partitions: 0 - " + (getKafkaLog(bootstrap, topic).getNumPartitions-1))
}
def apply(bootstrap: String,
topic: String,
partition: Int,
fuzzMinutes: Long = 5,
range: TimeRange = TimeRange.UNBOUNDED,
offsetRange: (Long, Long) = (Long.MinValue, Long.MaxValue)): Unit = {
val log = getKafkaLog(bootstrap, topic)
println(s"calculating compaction stats for range: $range..\n")
log.reset(partition, range)
val (limitOffsetStart, limitOffsetStop) = offsetRange
if (limitOffsetStart> 0) log.reset(partition, limitOffsetStart)
var blockmints = Long.MaxValue
var blockmaxts = Long.MinValue
var startpos = -1L
var endpos = -1L
var lastts = Long.MinValue
def addblock(): Unit = {
val timerange: TimeRange = new TimeRange(blockmints, blockmaxts)
blocks += ((timerange, startpos, endpos))
logger.debug(s"Block $startpos : $endpos -> $timerange")
startpos = -1L
endpos = -1L
blockmaxts = Long.MinValue
blockmints = Long.MaxValue
lastts = Long.MinValue
}
def maybeAddBlock(entry: LogEntry[java.lang.Long]): Unit = {
if (lastts == Long.MinValue) return
if (entry.timestamp > lastts - fuzzMinutes * 60000 && entry.timestamp < lastts + fuzzMinutes * 60000) return
addblock()
}
log.boundedIterator.asScala.takeWhile(_.position < limitOffsetStop).foreach {
entry =>
maybeAddBlock(entry)
if (startpos == -1) startpos = entry.position
minPosition = math.min(minPosition, entry.position)
maxPosition = math.max(maxPosition, entry.position)
endpos = entry.position
lastts = entry.timestamp
blockmints = math.min(blockmints, entry.timestamp)
blockmaxts = math.max(blockmaxts, entry.timestamp)
minTimestamp = math.min(minTimestamp, entry.timestamp)
maxTimestamp = math.max(maxTimestamp, entry.timestamp)
numRecords += 1
}
if (startpos > -1) addblock()
println("number of records: " + numRecords)
println("minimum timestamp: " + pretty(minTimestamp))
println("maximum timestamp: " + pretty(maxTimestamp))
println("minimum offset: " + minPosition)
println("maximum offset: " + maxPosition)
plot(blocks.toList)
}
private def getKafkaLog(bootstrapOrConfigFile: String, topic: String): KafkaLogStorage = {
val configFile = new File(bootstrapOrConfigFile)
val config = ConfigFactory.parseMap(Map(
LogStorage.StorageConf.Class.path -> classOf[KafkaLogStorage].getName(),
KafkaStorageConf.Topic.path -> topic
).asJava)
val conf = new LogStorageConf().apply(if (configFile.exists) {
logger.info(s"initializing $topic from consumer properties file: $bootstrapOrConfigFile")
val consumerProps = new Properties()
consumerProps.load(new FileInputStream(configFile))
config
.withValue(KafkaStorageConf.BootstrapServers.path, ConfigValueFactory.fromAnyRef(consumerProps.getProperty("bootstrap.servers")))
.withFallback(ConfigFactory.parseMap(consumerProps.asScala.asJava)
.withoutPath("bootstrap.servers").atPath(KafkaStorageConf.Consumer.path))
} else {
logger.info(s"initializing $topic from bootstrap: $bootstrapOrConfigFile")
config.withValue(KafkaStorageConf.BootstrapServers.path, ConfigValueFactory.fromAnyRef(bootstrapOrConfigFile))
})
LogStorage.newInstance(conf).asInstanceOf[KafkaLogStorage]
}
private def pretty(unix: Long): String = {
EventTime.local(unix).toString.replace("Z", "").replace("T", " ")
}
private def plot(blocks: List[(TimeRange, Long, Long)]): Unit = {
val render: IRender = new Render
val builder: IContextBuilder = render.newBuilder
builder.width(width).height(height)
val xratio = width.toDouble / (maxTimestamp - minTimestamp)
val yratio = height.toDouble / (maxPosition - minPosition)
blocks.foreach {
case (timerange, startpos, endpos) =>
val x = ((timerange.start - minTimestamp) * xratio).toInt
val y = height - ((endpos - minPosition) * yratio).toInt
val w = math.max(0, ((timerange.end - timerange.start) * xratio).toInt)
val h = math.max(0, ((endpos - startpos) * yratio).toInt)
if (w < 2 || h < 2) {
builder.element(new Line(new Point(x, y), new Point(x + w, y + h)))
} else {
builder.element(new Rectangle(x, y, w, h))
if (w > 20) {
builder.element(new Label(pretty(timerange.end).toString, x + w - 20, y + 1))
if (h > 3) {
builder.element(new Label(endpos.toString.reverse.padTo(19, ' ').reverse, x + w - 20, y + 2))
}
if (w > 42 || h > 4) {
builder.element(new Label(startpos.toString, x + 1, y + h - 3))
if (h > 1) {
builder.element(new Label(pretty(timerange.start).toString, x + 1, y + h - 2))
}
} else if (h > 3) {
builder.element(new Label(startpos.toString, x + 1, y + h - 2))
}
}
}
}
val canvas: ICanvas = render.render(builder.build)
println(canvas.getText)
}
}
|
amient/affinity
|
cli/src/main/scala/TimeLogTool.scala
|
Scala
|
apache-2.0
| 8,423 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.