code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
object Test {
def add2(x:Long,y:Long): Long = x + y
def add[Long](x: List[Long], y: List[Long]): List[Long] =
if (x.isEmpty || y.isEmpty) Nil
else add2(x.head, y.head) :: add(x.tail, y.tail)
}
|
yusuke2255/dotty
|
tests/untried/neg/typeerror.scala
|
Scala
|
bsd-3-clause
| 206 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Vishnu Gowda Harish, John Miller,
* @version 1.3
* @date Thu Dec 22 17:00:46 EST 2016
* @see LICENSE (MIT style license file).
*/
package apps.analytics
import scalation.analytics.BASE_DIR
import scalation.linalgebra.{MatrixD, MatrixKind, RleMatrixD, RleVectorD, VectorD}
//import scalation.linalgebra.MatrixKind
//import scalation.relalgebra.Relation
import scalation.util.{timed, time}
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Census` object is a sample application that performs basic analytics on
* census data.
* FIX: must download the following file.
* @see archive.ics.uci.edu/ml/machine-learning-databases/census1990-mld/
* > run-main apps.analytics.Census
*/
object Census extends App
{
val fName = BASE_DIR + "USCensus1990_data.csv"
// var censusRel = Relation (fName, "census", "I" * 69, -1, ",") // FIX : to check why it is slow
// val censusMat = censusRel.toMatriI (1 to 68)
// val censusRleMat = censusRel.toMatriI (1 to 68, MatrixKind.COMPRESSED).asInstanceOf [RleMatrixI]
print ("Build dense matrix"); val censusMat = time {MatrixD (fName, 1)}
print ("Build rle matrix"); val censusRleMat = time {RleMatrixD (censusMat)}
val jobDense = censusMat.col (4). filterPos (x => x == 1.0)
val jobRle = censusRleMat.col (4). filterPos (x => x == 1.0)
val usBornDense = censusMat.col (5). filterPos (x => x == 0.0)
val usBornRle = censusRleMat.col (5).filterPos (x => x == 0.0)
// Print the statistics
println (s" Dense Matrix: Number of US Born = ${usBornDense.size}")
println (s" Rle Matrix: Number of US Born = ${usBornRle.size}")
println (s" Dense Matrix: Number of people who have a job = ${jobDense.size}")
println (s" Rle Matrix: Number of people who have a job = ${jobRle.size}")
println (s" Dense Matrix: Born in the US and have a job = ${usBornDense.intersect(jobDense).size}")
println (s" Rle Matrix: Born in the US and have a job = ${usBornRle.intersect(jobRle).size}")
// Space information before and after RLE compression
val compRatios = (censusRleMat.csize.toDense.toDouble/censusMat.dim1.toDouble).recip
val avgcompRatios = compRatios.mean
println (s" Size of columns in dense matrix: ${censusMat.dim1}")
println (s" Size of columns in Rle: ${censusRleMat.csize}")
println (s" Compression ratio column wise: ${compRatios}")
println (s" Average compression ratio of columns: ${avgcompRatios}")
//Compute the time
val itr = 6
val denseTimeVec = new VectorD (itr)
val rleTimeVec = new RleVectorD (itr)
for (i <- 0 until itr) {
denseTimeVec (i) = timed { val usBornDense = censusMat.col (5). filterPos (x => x == 0.0)
val jobDense = censusMat.col (4). filterPos (x => x == 1.0)
}._2
rleTimeVec (i) = timed { val usBornRle = censusRleMat.col (5). filterPos (x => x == 0.0)
val jobRle = censusRleMat.col (4). filterPos (x => x == 1.0)
}._2
} // for
println (s"Dense: $denseTimeVec")
println (s"Rle: $rleTimeVec")
println ("Average time taken by Dense: "+denseTimeVec.slice (1).mean+" ms")
println ("Average time taken by Rle: "+rleTimeVec.slice (1).mean+" ms")
} // Census object
|
NBKlepp/fda
|
scalation_1.3/scalation_models/src/main/scala/apps/analytics/Census.scala
|
Scala
|
mit
| 3,677 |
package example.akkawschat.cli
import akka.stream.stage.{ InHandler, GraphStageLogic, GraphStage }
import akka.stream._
import akka.stream.scaladsl.{ GraphDSL, Source, Flow }
import scala.collection.immutable
import scala.concurrent.ExecutionContext
/** Infrastructure for a small DSL that allows to write stateful concurrent console apps of a certain kind */
trait ConsoleDSL[T] {
type State <: AnyRef
def initialState: State
/** Returns a Flow that implements the console logic. */
def consoleHandler(implicit ec: ExecutionContext): Flow[Command, T, Any] = {
val characters = Source.fromGraph(new ConsoleInput)
val graph =
GraphDSL.create() { implicit b β
import GraphDSL.Implicits._
val prompt = b.add(ConsoleStage)
characters ~> prompt.characterInput
FlowShape(prompt.commandIn, prompt.output)
}
Flow.fromGraph(graph)
}
case class ConsoleStageShape(characterInput: Inlet[Char], commandIn: Inlet[Command], output: Outlet[T]) extends Shape {
def inlets = Vector(characterInput, commandIn)
def outlets = Vector(output)
def deepCopy(): Shape = ConsoleStageShape(characterInput.carbonCopy(), commandIn.carbonCopy(), output.carbonCopy())
def copyFromPorts(inlets: immutable.Seq[Inlet[_]], outlets: immutable.Seq[Outlet[_]]): Shape =
ConsoleStageShape(inlets(0).asInstanceOf[Inlet[Char]], inlets(1).asInstanceOf[Inlet[Command]], outlets(0).asInstanceOf[Outlet[T]])
}
object ConsoleStage extends GraphStage[ConsoleStageShape] {
import TTY._
val shape: ConsoleStageShape = ConsoleStageShape(Inlet[Char]("characterInput"), Inlet[Command]("commandIn"), Outlet[T]("output"))
import shape.{ characterInput, commandIn, output }
def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) {
var inputHandler: State β PartialFunction[Char, Command] = (_ β PartialFunction.empty)
var promptLine: State β String = (_ β "")
var state: State = initialState
setHandler(characterInput, new InHandler {
def onPush(): Unit = {
val input = grab(characterInput)
if (input == 4) {
outputLine("Goodbye!")
completeStage()
} else {
val cmd = inputHandler(state).applyOrElse[Char, Command](input, _ β Command.Empty)
runCommand(cmd)
pull(characterInput)
}
}
})
setHandler(commandIn, new InHandler {
def onPush(): Unit = {
runCommand(grab(commandIn))
pull(commandIn)
}
})
setHandler(output, eagerTerminateOutput)
import Command._
def runCommand(command: Command): Unit = command match {
case Empty β
case Multiple(cmds) β cmds foreach runCommand
case PrintLine(line) β
outputLine(line)
updatePrompt()
case StatefulPrompt(newPrompt) β
promptLine = newPrompt
updatePrompt()
case SetStatefulInputHandler(newHandler) β inputHandler = newHandler
case UpdateState(modify) β
state = modify(state)
updatePrompt()
case Emit(element) β push(output, element)
case Complete β completeStage()
}
def outputLine(line: String): Unit = print(s"$RESTORE$ERASE_LINE$line\\n$SAVE")
def updatePrompt(): Unit = print(s"$RESTORE$ERASE_LINE$SAVE${promptLine(state)}")
override def preStart(): Unit = {
pull(commandIn)
pull(characterInput)
print(SAVE) // to prevent jumping before the current output
}
}
}
sealed trait Command {
def ~(other: Command): Command = Command.Multiple(Seq(this, other))
}
object Command {
val Empty = Multiple(Nil)
case class PrintLine(line: String) extends Command
def SetPrompt(prompt: String): Command = StatefulPrompt(_ β prompt)
case class StatefulPrompt(prompt: State β String) extends Command
def SetState(state: State): Command = UpdateState(_ β state)
case class UpdateState(modify: State β State) extends Command
def SetInputHandler(handler: PartialFunction[Char, Command]): Command = SetStatefulInputHandler(_ β handler)
case class SetStatefulInputHandler(handler: State β PartialFunction[Char, Command]) extends Command
case class Emit(element: T) extends Command
case object Complete extends Command
case class Multiple(commands: Seq[Command]) extends Command {
override def ~(other: Command): Command = Multiple(commands :+ other) // don't nest
}
}
import Command._
def readLineStatefulPrompt(prompt: State β String, currentInput: String = "")(andThen: String β Command): Command =
StatefulPrompt(state β s"${prompt(state)}$currentInput") ~
SetInputHandler {
case '\\r' β andThen(currentInput)
case x if x >= 0x20 && x < 0x7e β readLineStatefulPrompt(prompt, currentInput + x)(andThen)
case 127 /* backspace */ β readLineStatefulPrompt(prompt, currentInput.dropRight(1))(andThen)
}
def readLine(prompt: String = "> ")(andThen: String β Command): Command =
readLineStatefulPrompt(_ β prompt)(andThen)
}
|
yukihirai0505/tutorial-program
|
programming/scala/websocket/cli/src/main/scala/example/akkawschat/cli/ConsoleDSL.scala
|
Scala
|
mit
| 5,365 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
import org.junit.Test
// scalastyle:off magic.number
class ScalaDocCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "scaladoc"
val classUnderTest = classOf[ScalaDocChecker]
import ScalaDocChecker._ // scalastyle:ignore underscore.import
@Test def noParamsCCTO(): Unit = {
def al(access: String = "", checked: Boolean): Unit = {
val traitSource = s"%s${access}trait Foo"
val classSource = s"%s${access}class Foo"
val caseClassSource = s"%s${access}case class Foo()"
val objectSource = s"%s${access}object Foo"
val doc =
"""
|/**
| * This is the documentation for whatever follows with no params, no tparams, no return, no throws
| */
""".stripMargin
List(traitSource, classSource, caseClassSource, objectSource).foreach { source =>
assertErrors(Nil, source format doc)
assertErrors(if (checked) List(lineError(1, List(Missing))) else Nil, source format "")
}
}
al("", true)
al("private[pkg] ", true)
al("protected[pkg] ", true)
al("protected ", true)
al("private ", false)
}
@Test def classParams(): Unit = {
val classSource = "%sclass Foo(a: Int, b: Int)"
val caseClassSource = "%scase class Foo(a: Int, b: Int)"
val annotatedCaseClassSource = s"%scase class Foo @JpaAbomination() (@Field a: Int, @Field b: Int)"
val annotatedCaseClassSource2 = s"""%scase class Foo @JpaAbomination(me) (@Field(a = 4, b = "foo") a: Int, @Field() b: Int)"""
val missingParamDoc =
"""
|/**
| * This is the documentation for whatever follows
| */
""".stripMargin
val doc =
"""
|/**
| * This is the documentation for whatever follows
| *
| * @param a the value of a
| * @param b the value of b
| */
""".stripMargin
List(classSource, caseClassSource, annotatedCaseClassSource, annotatedCaseClassSource2).foreach { source =>
assertErrors(Nil, source format doc)
assertErrors(List(lineError(1, List(Missing))), source format "")
assertErrors(List(lineError(5, List(missingParam("a"))), lineError(5, List(missingParam("b")))), source format missingParamDoc)
}
}
@Test def typeParamsCCT(): Unit = {
val traitSource = "%strait Foo[A, B]"
val classSource = "%sclass Foo[A, B]"
val caseClassSource = "%scase class Foo[A, B]()"
val malformedDoc =
"""
|/**
| * This is the documentation for whatever follows
| */
""".stripMargin
val doc =
"""
|/**
| * This is the documentation for whatever follows with tparams
| *
| * @tparam A the type A
| * @tparam B the type B
| */
""".stripMargin
List(traitSource, classSource, caseClassSource).foreach { source =>
assertErrors(Nil, source format doc)
assertErrors(List(lineError(1, List(Missing))), source format "")
assertErrors(List(lineError(5, List(MalformedTypeParams))), source format malformedDoc)
}
}
@Test def publicMethodWithEverything(): Unit = {
def al(access: String = "", checked: Boolean): Unit = {
val fun =
s"""
|/**
| * XXX
| */
|trait X {
| %s${access} def foo[A, B, U](a: A, b: B): U = ???
|}
""".stripMargin
val annotatedFun =
s"""
|/**
| * XXX
| */
|trait X {
| %s${access} def foo[@unchecked A, @annotated B, U](@Field() a: A, @Field("b") b: B): U = ???
|}
""".stripMargin
val proc1 =
s"""
|/**
| * XXX
| */
|trait X {
| %s${access} def foo[A, B, U](a: A, b: B): Unit = ()
|}
""".stripMargin
val proc2 =
s"""
|/**
| * XXX
| */
|trait X {
| %s${access} def foo[A, B, U](a: A, b: B) {}
|}
""".stripMargin
def doc(proc: Boolean) =
"""
|/**
| * Does foo
| * @param a the A
| * @param b the B
| * @tparam A the A
| * @tparam B the B
| * @tparam U the U%s
| */
""".stripMargin format (if (proc) "" else "\\n * @return some u")
def missingTypeParamsDoc(proc: Boolean) =
"""
|/**
| * Does foo
| * @param a the A
| * @param b the B
| * @tparam A the A
| * @tparam U the U%s
| */
| """.stripMargin format (if (proc) "" else "\\n * @return some u")
def missingParamsDoc(proc: Boolean) =
"""
|/**
| * Does foo
| * @param a the A
| * @tparam A the A
| * @tparam B the B
| * @tparam U the U%s
| */
| """.stripMargin format (if (proc) "" else "\\n * @return some u")
val missingReturnDoc =
"""
|/**
| * Does foo
| * @param a the A
| * @param b the b
| * @tparam A the A
| * @tparam B the B
| * @tparam U the U
| */
| """.stripMargin
List(fun, annotatedFun).foreach { source =>
assertErrors(Nil, source format doc(false))
assertErrors(if (checked) List(lineError(6, List(Missing))) else Nil, source format "")
assertErrors(if (checked) List(lineError(15, List(missingParam("b")))) else Nil, source format missingParamsDoc(false))
assertErrors(if (checked) List(lineError(15, List(MalformedTypeParams))) else Nil, source format missingTypeParamsDoc(false))
assertErrors(if (checked) List(lineError(15, List(MalformedReturn))) else Nil, source format missingReturnDoc)
}
List(proc1, proc2).foreach { source =>
assertErrors(Nil, source format doc(false))
assertErrors(if (checked) List(lineError(6, List(Missing))) else Nil, source format "")
assertErrors(if (checked) List(lineError(14, List(missingParam("b")))) else Nil, source format missingParamsDoc(true))
assertErrors(if (checked) List(lineError(14, List(MalformedTypeParams))) else Nil, source format missingTypeParamsDoc(true))
}
}
List("", "final ").foreach { modifier =>
al(modifier, true)
al(s"private[xxx] $modifier", true)
al(s"protected[xxx] $modifier", true)
al(s"protected $modifier", true)
al(s"private $modifier", false)
al(s"@tailrec @another(a = b) $modifier", true)
al(s"@tailrec @another(a = b) private[xxx] $modifier", true)
al(s"@tailrec @another(a = b) protected[xxx] $modifier", true)
al(s"@tailrec @another(a = b) protected $modifier", true)
al(s"@tailrec @another(a = b) private $modifier", false)
}
}
@Test def returnAsParamDescription(): Unit = {
val source =
"""
|/**
| * Doc
| */
|object X {
|
| /**
| * Foo does some foos. With a
| *
| * ```
| * code example here
| * ```
| * and something or other else with ``code`` and (link)[to]
| *
| * @param a
| * Some text for parameter A
| * More for A
| * @param b B
| * @param c
| * @return some integer
| */
| def foo(a: Int, b: Int, c: Int): Int = a + b
|}
""".stripMargin
assertErrors(List(lineError(22, List(emptyParam("c")))), source)
}
@Test def valsVarsAndTypes(): Unit = {
def al(what: String = "", checked: Boolean): Unit = {
val tlDoc =
"""
|/**
| * Top-level doc
| */
""".stripMargin
def source(container: String) =
s"""
|$tlDoc
|$container Foo {
| %s${what}
|}
""".stripMargin
val doc =
"""
|/**
| * This is the documentation for whatever follows with no params, no tparams, no return, no throws
| */
""".stripMargin
List(source("class"), source("case class"), source("object ")).foreach { source =>
assertErrors(Nil, source format doc)
assertErrors(if (checked) List(lineError(8, List(Missing))) else Nil, source format "")
}
}
List("val a = 1", "var a = 2", "type X = String").foreach { member =>
al(member, true)
al(s"private[pkg] $member", true)
al(s"protected[pkg] $member", true)
al(s"protected $member", true)
al(s"private $member", false)
}
}
}
|
dwango/scalastyle
|
src/test/scala/org/scalastyle/scalariform/ScalaDocCheckerTest.scala
|
Scala
|
apache-2.0
| 9,529 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.immutable.HashSet
import scala.collection.mutable.{ArrayBuffer, Stack}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects.AssertNotNull
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/*
* Optimization rules defined in this file should not affect the structure of the logical plan.
*/
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values.
*/
object ConstantFolding extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
// Skip redundant folding of literals. This rule is technically not necessary. Placing this
// here avoids running the next rule for Literal values, which would create a new Literal
// object and running eval unnecessarily.
case l: Literal => l
// Fold expressions that are foldable.
case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
}
}
}
/**
* Substitutes [[Attribute Attributes]] which can be statically evaluated with their corresponding
* value in conjunctive [[Expression Expressions]]
* eg.
* {{{
* SELECT * FROM table WHERE i = 5 AND j = i + 3
* ==> SELECT * FROM table WHERE i = 5 AND j = 8
* }}}
*
* Approach used:
* - Populate a mapping of attribute => constant value by looking at all the equals predicates
* - Using this mapping, replace occurrence of the attributes with the corresponding constant values
* in the AND node.
*/
object ConstantPropagation extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case f: Filter =>
val (newCondition, _) = traverse(f.condition, replaceChildren = true)
if (newCondition.isDefined) {
f.copy(condition = newCondition.get)
} else {
f
}
}
type EqualityPredicates = Seq[((AttributeReference, Literal), BinaryComparison)]
/**
* Traverse a condition as a tree and replace attributes with constant values.
* - On matching [[And]], recursively traverse each children and get propagated mappings.
* If the current node is not child of another [[And]], replace all occurrences of the
* attributes with the corresponding constant values.
* - If a child of [[And]] is [[EqualTo]] or [[EqualNullSafe]], propagate the mapping
* of attribute => constant.
* - On matching [[Or]] or [[Not]], recursively traverse each children, propagate empty mapping.
* - Otherwise, stop traversal and propagate empty mapping.
* @param condition condition to be traversed
* @param replaceChildren whether to replace attributes with constant values in children
* @return A tuple including:
* 1. Option[Expression]: optional changed condition after traversal
* 2. EqualityPredicates: propagated mapping of attribute => constant
*/
private def traverse(condition: Expression, replaceChildren: Boolean)
: (Option[Expression], EqualityPredicates) =
condition match {
case e @ EqualTo(left: AttributeReference, right: Literal) => (None, Seq(((left, right), e)))
case e @ EqualTo(left: Literal, right: AttributeReference) => (None, Seq(((right, left), e)))
case e @ EqualNullSafe(left: AttributeReference, right: Literal) =>
(None, Seq(((left, right), e)))
case e @ EqualNullSafe(left: Literal, right: AttributeReference) =>
(None, Seq(((right, left), e)))
case a: And =>
val (newLeft, equalityPredicatesLeft) = traverse(a.left, replaceChildren = false)
val (newRight, equalityPredicatesRight) = traverse(a.right, replaceChildren = false)
val equalityPredicates = equalityPredicatesLeft ++ equalityPredicatesRight
val newSelf = if (equalityPredicates.nonEmpty && replaceChildren) {
Some(And(replaceConstants(newLeft.getOrElse(a.left), equalityPredicates),
replaceConstants(newRight.getOrElse(a.right), equalityPredicates)))
} else {
if (newLeft.isDefined || newRight.isDefined) {
Some(And(newLeft.getOrElse(a.left), newRight.getOrElse(a.right)))
} else {
None
}
}
(newSelf, equalityPredicates)
case o: Or =>
// Ignore the EqualityPredicates from children since they are only propagated through And.
val (newLeft, _) = traverse(o.left, replaceChildren = true)
val (newRight, _) = traverse(o.right, replaceChildren = true)
val newSelf = if (newLeft.isDefined || newRight.isDefined) {
Some(Or(left = newLeft.getOrElse(o.left), right = newRight.getOrElse((o.right))))
} else {
None
}
(newSelf, Seq.empty)
case n: Not =>
// Ignore the EqualityPredicates from children since they are only propagated through And.
val (newChild, _) = traverse(n.child, replaceChildren = true)
(newChild.map(Not), Seq.empty)
case _ => (None, Seq.empty)
}
private def replaceConstants(condition: Expression, equalityPredicates: EqualityPredicates)
: Expression = {
val constantsMap = AttributeMap(equalityPredicates.map(_._1))
val predicates = equalityPredicates.map(_._2).toSet
def replaceConstants0(expression: Expression) = expression transform {
case a: AttributeReference => constantsMap.getOrElse(a, a)
}
condition transform {
case e @ EqualTo(_, _) if !predicates.contains(e) => replaceConstants0(e)
case e @ EqualNullSafe(_, _) if !predicates.contains(e) => replaceConstants0(e)
}
}
}
/**
* Reorder associative integral-type operators and fold all constants into one.
*/
object ReorderAssociativeOperator extends Rule[LogicalPlan] {
private def flattenAdd(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Add(l, r) if !groupSet.contains(expr) =>
flattenAdd(l, groupSet) ++ flattenAdd(r, groupSet)
case other => other :: Nil
}
private def flattenMultiply(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Multiply(l, r) if !groupSet.contains(expr) =>
flattenMultiply(l, groupSet) ++ flattenMultiply(r, groupSet)
case other => other :: Nil
}
private def collectGroupingExpressions(plan: LogicalPlan): ExpressionSet = plan match {
case Aggregate(groupingExpressions, aggregateExpressions, child) =>
ExpressionSet.apply(groupingExpressions)
case _ => ExpressionSet(Seq.empty)
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan =>
// We have to respect aggregate expressions which exists in grouping expressions when plan
// is an Aggregate operator, otherwise the optimized expression could not be derived from
// grouping expressions.
val groupingExpressionSet = collectGroupingExpressions(q)
q transformExpressionsDown {
case a: Add if a.deterministic && a.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenAdd(a, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Add(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType)
if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y)), c)
} else {
a
}
case m: Multiply if m.deterministic && m.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenMultiply(m, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Multiply(x, y))
val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType)
if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y)), c)
} else {
m
}
}
}
}
/**
* Optimize IN predicates:
* 1. Converts the predicate to false when the list is empty and
* the value is not nullable.
* 2. Removes literal repetitions.
* 3. Replaces [[In (value, seq[Literal])]] with optimized version
* [[InSet (value, HashSet[Literal])]] which is much faster.
*/
object OptimizeIn extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case In(v, list) if list.isEmpty =>
// When v is not nullable, the following expression will be optimized
// to FalseLiteral which is tested in OptimizeInSuite.scala
If(IsNotNull(v), FalseLiteral, Literal(null, BooleanType))
case expr @ In(v, list) if expr.inSetConvertible =>
val newList = ExpressionSet(list).toSeq
if (newList.length == 1
// TODO: `EqualTo` for structural types are not working. Until SPARK-24443 is addressed,
// TODO: we exclude them in this rule.
&& !v.isInstanceOf[CreateNamedStruct]
&& !newList.head.isInstanceOf[CreateNamedStruct]) {
EqualTo(v, newList.head)
} else if (newList.length > SQLConf.get.optimizerInSetConversionThreshold) {
val hSet = newList.map(e => e.eval(EmptyRow))
InSet(v, HashSet() ++ hSet)
} else if (newList.length < list.length) {
expr.copy(list = newList)
} else { // newList.length == list.length && newList.length > 1
expr
}
}
}
}
/**
* Simplifies boolean expressions:
* 1. Simplifies expressions whose answer can be determined without evaluating both sides.
* 2. Eliminates / extracts common factors.
* 3. Merge same expressions
* 4. Removes `Not` operator.
*/
object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case TrueLiteral And e => e
case e And TrueLiteral => e
case FalseLiteral Or e => e
case e Or FalseLiteral => e
case FalseLiteral And _ => FalseLiteral
case _ And FalseLiteral => FalseLiteral
case TrueLiteral Or _ => TrueLiteral
case _ Or TrueLiteral => TrueLiteral
case a And b if Not(a).semanticEquals(b) =>
If(IsNull(a), Literal.create(null, a.dataType), FalseLiteral)
case a And b if a.semanticEquals(Not(b)) =>
If(IsNull(b), Literal.create(null, b.dataType), FalseLiteral)
case a Or b if Not(a).semanticEquals(b) =>
If(IsNull(a), Literal.create(null, a.dataType), TrueLiteral)
case a Or b if a.semanticEquals(Not(b)) =>
If(IsNull(b), Literal.create(null, b.dataType), TrueLiteral)
case a And b if a.semanticEquals(b) => a
case a Or b if a.semanticEquals(b) => a
// The following optimizations are applicable only when the operands are not nullable,
// since the three-value logic of AND and OR are different in NULL handling.
// See the chart:
// +---------+---------+---------+---------+
// | operand | operand | OR | AND |
// +---------+---------+---------+---------+
// | TRUE | TRUE | TRUE | TRUE |
// | TRUE | FALSE | TRUE | FALSE |
// | FALSE | FALSE | FALSE | FALSE |
// | UNKNOWN | TRUE | TRUE | UNKNOWN |
// | UNKNOWN | FALSE | UNKNOWN | FALSE |
// | UNKNOWN | UNKNOWN | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
// (NULL And (NULL Or FALSE)) = NULL, but (NULL And FALSE) = FALSE. Thus, a can't be nullable.
case a And (b Or c) if !a.nullable && Not(a).semanticEquals(b) => And(a, c)
// (NULL And (FALSE Or NULL)) = NULL, but (NULL And FALSE) = FALSE. Thus, a can't be nullable.
case a And (b Or c) if !a.nullable && Not(a).semanticEquals(c) => And(a, b)
// ((NULL Or FALSE) And NULL) = NULL, but (FALSE And NULL) = FALSE. Thus, c can't be nullable.
case (a Or b) And c if !c.nullable && a.semanticEquals(Not(c)) => And(b, c)
// ((FALSE Or NULL) And NULL) = NULL, but (FALSE And NULL) = FALSE. Thus, c can't be nullable.
case (a Or b) And c if !c.nullable && b.semanticEquals(Not(c)) => And(a, c)
// (NULL Or (NULL And TRUE)) = NULL, but (NULL Or TRUE) = TRUE. Thus, a can't be nullable.
case a Or (b And c) if !a.nullable && Not(a).semanticEquals(b) => Or(a, c)
// (NULL Or (TRUE And NULL)) = NULL, but (NULL Or TRUE) = TRUE. Thus, a can't be nullable.
case a Or (b And c) if !a.nullable && Not(a).semanticEquals(c) => Or(a, b)
// ((NULL And TRUE) Or NULL) = NULL, but (TRUE Or NULL) = TRUE. Thus, c can't be nullable.
case (a And b) Or c if !c.nullable && a.semanticEquals(Not(c)) => Or(b, c)
// ((TRUE And NULL) Or NULL) = NULL, but (TRUE Or NULL) = TRUE. Thus, c can't be nullable.
case (a And b) Or c if !c.nullable && b.semanticEquals(Not(c)) => Or(a, c)
// Common factor elimination for conjunction
case and @ (left And right) =>
// 1. Split left and right to get the disjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common || (ldiff && rdiff)
val lhs = splitDisjunctivePredicates(left)
val rhs = splitDisjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
and
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a || b || c || ...) && (a || b) => (a || b)
common.reduce(Or)
} else {
// (a || b || c || ...) && (a || b || d || ...) =>
// ((c || ...) && (d || ...)) || a || b
(common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or)
}
}
// Common factor elimination for disjunction
case or @ (left Or right) =>
// 1. Split left and right to get the conjunctive predicates,
// i.e. lhs = (a, b), rhs = (a, c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. Apply the formula, get the optimized predicate: common && (ldiff || rdiff)
val lhs = splitConjunctivePredicates(left)
val rhs = splitConjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.isEmpty) {
// No common factors, return the original predicate
or
} else {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a && b) || (a && b && c && ...) => a && b
common.reduce(And)
} else {
// (a && b && c && ...) || (a && b && d && ...) =>
// ((c && ...) || (d && ...)) && a && b
(common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And)
}
}
case Not(TrueLiteral) => FalseLiteral
case Not(FalseLiteral) => TrueLiteral
case Not(a GreaterThan b) => LessThanOrEqual(a, b)
case Not(a GreaterThanOrEqual b) => LessThan(a, b)
case Not(a LessThan b) => GreaterThanOrEqual(a, b)
case Not(a LessThanOrEqual b) => GreaterThan(a, b)
case Not(a Or b) => And(Not(a), Not(b))
case Not(a And b) => Or(Not(a), Not(b))
case Not(Not(e)) => e
case Not(IsNull(e)) => IsNotNull(e)
case Not(IsNotNull(e)) => IsNull(e)
}
}
}
/**
* Simplifies binary comparisons with semantically-equal expressions:
* 1) Replace '<=>' with 'true' literal.
* 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable.
* 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable.
*/
object SimplifyBinaryComparison extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
// True with equality
case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral
case a EqualTo b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
case a GreaterThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) =>
TrueLiteral
case a LessThanOrEqual b if !a.nullable && !b.nullable && a.semanticEquals(b) => TrueLiteral
// False with inequality
case a GreaterThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
case a LessThan b if !a.nullable && !b.nullable && a.semanticEquals(b) => FalseLiteral
}
}
}
/**
* Simplifies conditional expressions (if / case).
*/
object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
private def falseOrNullLiteral(e: Expression): Boolean = e match {
case FalseLiteral => true
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case If(TrueLiteral, trueValue, _) => trueValue
case If(FalseLiteral, _, falseValue) => falseValue
case If(Literal(null, _), _, falseValue) => falseValue
case If(cond, trueValue, falseValue)
if cond.deterministic && trueValue.semanticEquals(falseValue) => trueValue
case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) =>
// If there are branches that are always false, remove them.
// If there are no more branches left, just use the else value.
// Note that these two are handled together here in a single case statement because
// otherwise we cannot determine the data type for the elseValue if it is None (i.e. null).
val newBranches = branches.filter(x => !falseOrNullLiteral(x._1))
if (newBranches.isEmpty) {
elseValue.getOrElse(Literal.create(null, e.dataType))
} else {
e.copy(branches = newBranches)
}
case CaseWhen(branches, _) if branches.headOption.map(_._1).contains(TrueLiteral) =>
// If the first branch is a true literal, remove the entire CaseWhen and use the value
// from that. Note that CaseWhen.branches should never be empty, and as a result the
// headOption (rather than head) added above is just an extra (and unnecessary) safeguard.
branches.head._2
case CaseWhen(branches, _) if branches.exists(_._1 == TrueLiteral) =>
// a branch with a true condition eliminates all following branches,
// these branches can be pruned away
val (h, t) = branches.span(_._1 != TrueLiteral)
CaseWhen( h :+ t.head, None)
case e @ CaseWhen(branches, Some(elseValue))
if branches.forall(_._2.semanticEquals(elseValue)) =>
// For non-deterministic conditions with side effect, we can not remove it, or change
// the ordering. As a result, we try to remove the deterministic conditions from the tail.
var hitNonDeterministicCond = false
var i = branches.length
while (i > 0 && !hitNonDeterministicCond) {
hitNonDeterministicCond = !branches(i - 1)._1.deterministic
if (!hitNonDeterministicCond) {
i -= 1
}
}
if (i == 0) {
elseValue
} else {
e.copy(branches = branches.take(i).map(branch => (branch._1, elseValue)))
}
}
}
}
/**
* Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition.
* For example, when the expression is just checking to see if a string starts with a given
* pattern.
*/
object LikeSimplification extends Rule[LogicalPlan] {
// if guards below protect from escapes on trailing %.
// Cases like "something\\%" are not optimized, but this does not affect correctness.
private val startsWith = "([^_%]+)%".r
private val endsWith = "%([^_%]+)".r
private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r
private val contains = "%([^_%]+)%".r
private val equalTo = "([^_%]*)".r
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Like(input, Literal(pattern, StringType), escapeChar) =>
if (pattern == null) {
// If pattern is null, return null value directly, since "col like null" == null.
Literal(null, BooleanType)
} else {
val escapeStr = String.valueOf(escapeChar)
pattern.toString match {
case startsWith(prefix) if !prefix.endsWith(escapeStr) =>
StartsWith(input, Literal(prefix))
case endsWith(postfix) =>
EndsWith(input, Literal(postfix))
// 'a%a' pattern is basically same with 'a%' && '%a'.
// However, the additional `Length` condition is required to prevent 'a' match 'a%a'.
case startsAndEndsWith(prefix, postfix) if !prefix.endsWith(escapeStr) =>
And(GreaterThanOrEqual(Length(input), Literal(prefix.length + postfix.length)),
And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix))))
case contains(infix) if !infix.endsWith(escapeStr) =>
Contains(input, Literal(infix))
case equalTo(str) =>
EqualTo(input, Literal(str))
case _ => Like(input, Literal.create(pattern, StringType), escapeChar)
}
}
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
object NullPropagation extends Rule[LogicalPlan] {
private def isNullLiteral(e: Expression): Boolean = e match {
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case e @ WindowExpression(Cast(Literal(0L, _), _, _), _) =>
Cast(Literal(0L), e.dataType, Option(SQLConf.get.sessionLocalTimeZone))
case e @ AggregateExpression(Count(exprs), _, _, _, _) if exprs.forall(isNullLiteral) =>
Cast(Literal(0L), e.dataType, Option(SQLConf.get.sessionLocalTimeZone))
case ae @ AggregateExpression(Count(exprs), _, false, _, _) if !exprs.exists(_.nullable) =>
// This rule should be only triggered when isDistinct field is false.
ae.copy(aggregateFunction = Count(Literal(1)))
case IsNull(c) if !c.nullable => Literal.create(false, BooleanType)
case IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType)
case EqualNullSafe(Literal(null, _), r) => IsNull(r)
case EqualNullSafe(l, Literal(null, _)) => IsNull(l)
case AssertNotNull(c, _) if !c.nullable => c
// For Coalesce, remove null literals.
case e @ Coalesce(children) =>
val newChildren = children.filterNot(isNullLiteral)
if (newChildren.isEmpty) {
Literal.create(null, e.dataType)
} else if (newChildren.length == 1) {
newChildren.head
} else {
Coalesce(newChildren)
}
// If the value expression is NULL then transform the In expression to null literal.
case In(Literal(null, _), _) => Literal.create(null, BooleanType)
case InSubquery(Seq(Literal(null, _)), _) => Literal.create(null, BooleanType)
// Non-leaf NullIntolerant expressions will return null, if at least one of its children is
// a null literal.
case e: NullIntolerant if e.children.exists(isNullLiteral) =>
Literal.create(null, e.dataType)
}
}
}
/**
* Replace attributes with aliases of the original foldable expressions if possible.
* Other optimizations will take advantage of the propagated foldable expressions. For example,
* this rule can optimize
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3
* }}}
* to
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now()
* }}}
* and other rules can further optimize it and remove the ORDER BY operator.
*/
object FoldablePropagation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
var foldableMap = AttributeMap(plan.flatMap {
case Project(projectList, _) => projectList.collect {
case a: Alias if a.child.foldable => (a.toAttribute, a)
}
case _ => Nil
})
val replaceFoldable: PartialFunction[Expression, Expression] = {
case a: AttributeReference if foldableMap.contains(a) => foldableMap(a)
}
if (foldableMap.isEmpty) {
plan
} else {
CleanupAliases(plan.transformUp {
// We can only propagate foldables for a subset of unary nodes.
case u: UnaryNode if foldableMap.nonEmpty && canPropagateFoldables(u) =>
u.transformExpressions(replaceFoldable)
// Join derives the output attributes from its child while they are actually not the
// same attributes. For example, the output of outer join is not always picked from its
// children, but can also be null. We should exclude these miss-derived attributes when
// propagating the foldable expressions.
// TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes
// of outer join.
case j @ Join(left, right, joinType, _, _) if foldableMap.nonEmpty =>
val newJoin = j.transformExpressions(replaceFoldable)
val missDerivedAttrsSet: AttributeSet = AttributeSet(joinType match {
case _: InnerLike | LeftExistence(_) => Nil
case LeftOuter => right.output
case RightOuter => left.output
case FullOuter => left.output ++ right.output
})
foldableMap = AttributeMap(foldableMap.baseMap.values.filterNot {
case (attr, _) => missDerivedAttrsSet.contains(attr)
}.toSeq)
newJoin
// We can not replace the attributes in `Expand.output`. If there are other non-leaf
// operators that have the `output` field, we should put them here too.
case expand: Expand if foldableMap.nonEmpty =>
expand.copy(projections = expand.projections.map { projection =>
projection.map(_.transform(replaceFoldable))
})
// For other plans, they are not safe to apply foldable propagation, and they should not
// propagate foldable expressions from children.
case other if foldableMap.nonEmpty =>
val childrenOutputSet = AttributeSet(other.children.flatMap(_.output))
foldableMap = AttributeMap(foldableMap.baseMap.values.filterNot {
case (attr, _) => childrenOutputSet.contains(attr)
}.toSeq)
other
})
}
}
/**
* Whitelist of all [[UnaryNode]]s for which allow foldable propagation.
*/
private def canPropagateFoldables(u: UnaryNode): Boolean = u match {
case _: Project => true
case _: Filter => true
case _: SubqueryAlias => true
case _: Aggregate => true
case _: Window => true
case _: Sample => true
case _: GlobalLimit => true
case _: LocalLimit => true
case _: Generate => true
case _: Distinct => true
case _: AppendColumns => true
case _: AppendColumnsWithObject => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: Sort => true
case _: TypedFilter => true
case _ => false
}
}
/**
* Removes [[Cast Casts]] that are unnecessary because the input is already the correct type.
*/
object SimplifyCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case Cast(e, dataType, _) if e.dataType == dataType => e
case c @ Cast(e, dataType, _) => (e.dataType, dataType) match {
case (ArrayType(from, false), ArrayType(to, true)) if from == to => e
case (MapType(fromKey, fromValue, false), MapType(toKey, toValue, true))
if fromKey == toKey && fromValue == toValue => e
case _ => c
}
}
}
/**
* Removes nodes that are not necessary.
*/
object RemoveDispensableExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case UnaryPositive(child) => child
}
}
/**
* Removes the inner case conversion expressions that are unnecessary because
* the inner conversion is overwritten by the outer one.
*/
object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsUp {
case Upper(Upper(child)) => Upper(child)
case Upper(Lower(child)) => Upper(child)
case Lower(Upper(child)) => Lower(child)
case Lower(Lower(child)) => Lower(child)
}
}
}
/**
* Combine nested [[Concat]] expressions.
*/
object CombineConcats extends Rule[LogicalPlan] {
private def flattenConcats(concat: Concat): Concat = {
val stack = Stack[Expression](concat)
val flattened = ArrayBuffer.empty[Expression]
while (stack.nonEmpty) {
stack.pop() match {
case Concat(children) =>
stack.pushAll(children.reverse)
// If `spark.sql.function.concatBinaryAsString` is false, nested `Concat` exprs possibly
// have `Concat`s with binary output. Since `TypeCoercion` casts them into strings,
// we need to handle the case to combine all nested `Concat`s.
case c @ Cast(Concat(children), StringType, _) =>
val newChildren = children.map { e => c.copy(child = e) }
stack.pushAll(newChildren.reverse)
case child =>
flattened += child
}
}
Concat(flattened)
}
private def hasNestedConcats(concat: Concat): Boolean = concat.children.exists {
case c: Concat => true
case c @ Cast(Concat(children), StringType, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformExpressionsDown {
case concat: Concat if hasNestedConcats(concat) =>
flattenConcats(concat)
}
}
|
jkbradley/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala
|
Scala
|
apache-2.0
| 31,906 |
package lmxml
package shortcuts.html
trait HtmlShortcuts extends LmxmlParsers {
def docContent = """([^\\n]+)""".r ^^ { s => s }
def doctype: Parser[TopLevel] = "!" ~> docContent ^^ {
case content => TextNode("<!DOCTYPE %s>\\n".format(content), true, _)
}
def js: Parser[TopLevel] = "js" ~> inlineParams ^^ {
case attrs =>
LmxmlNode("script", Map("type" -> "text/javascript") ++ attrs, _)
}
def css: Parser[TopLevel] = "css" ~> inlineParams ^^ {
case attrs if attrs.isEmpty =>
LmxmlNode("style", Map.empty, _)
case attrs =>
val defaultAttrs = Map(
"rel" -> "stylesheet",
"type" -> "text/css"
)
LmxmlNode("link", defaultAttrs ++ attrs, _)
}
def div: Parser[TopLevel] = ">" ~> inlineParams ^^ {
case attrs => LmxmlNode("div", attrs, _)
}
override def topLevel = (doctype | js | css | div) | super.topLevel
}
trait DynamicShortcuts extends HtmlShortcuts {
val definition: Seq[Parser[TopLevel]]
def define(key: String, result: String, ats: Attrs = Map()): Parser[TopLevel] =
key ~> inlineParams ^^ { case attrs => LmxmlNode(result, ats ++ attrs, _) }
override def topLevel = definition.foldRight(super.topLevel)(_ | _)
}
|
philcali/lmxml
|
html/src/main/scala/shortcuts.scala
|
Scala
|
mit
| 1,220 |
package org.velocity4s.log
import org.apache.velocity.runtime.log.LogChute
import org.jboss.logging.Logger
import org.jboss.logging.Logger.Level
import org.scalatest.FunSpec
import org.scalatest.Matchers._
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.mockito.internal.util.reflection.Whitebox
class JBossLoggingLogChuteSpec extends FunSpec with MockitoSugar {
describe("JBossLoggingLogChute Spec") {
it("debug enabled") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
when(mockLogger.isEnabled(Level.DEBUG)).thenReturn(true)
when(mockLogger.isEnabled(Level.TRACE)).thenReturn(false)
when(mockLogger.isEnabled(Level.INFO)).thenReturn(false)
when(mockLogger.isEnabled(Level.WARN)).thenReturn(false)
when(mockLogger.isEnabled(Level.ERROR)).thenReturn(false)
jbossLoggingLogChute.isLevelEnabled(LogChute.DEBUG_ID) should be (true)
}
it("trace enabled") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
when(mockLogger.isEnabled(Level.DEBUG)).thenReturn(false)
when(mockLogger.isEnabled(Level.TRACE)).thenReturn(true)
when(mockLogger.isEnabled(Level.INFO)).thenReturn(false)
when(mockLogger.isEnabled(Level.WARN)).thenReturn(false)
when(mockLogger.isEnabled(Level.ERROR)).thenReturn(false)
jbossLoggingLogChute.isLevelEnabled(LogChute.TRACE_ID) should be (true)
}
it("info enabled") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
when(mockLogger.isEnabled(Level.DEBUG)).thenReturn(false)
when(mockLogger.isEnabled(Level.TRACE)).thenReturn(false)
when(mockLogger.isEnabled(Level.INFO)).thenReturn(true)
when(mockLogger.isEnabled(Level.WARN)).thenReturn(false)
when(mockLogger.isEnabled(Level.ERROR)).thenReturn(false)
jbossLoggingLogChute.isLevelEnabled(LogChute.INFO_ID) should be (true)
}
it("warn enabled") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
when(mockLogger.isEnabled(Level.DEBUG)).thenReturn(false)
when(mockLogger.isEnabled(Level.TRACE)).thenReturn(false)
when(mockLogger.isEnabled(Level.INFO)).thenReturn(false)
when(mockLogger.isEnabled(Level.WARN)).thenReturn(true)
when(mockLogger.isEnabled(Level.ERROR)).thenReturn(false)
jbossLoggingLogChute.isLevelEnabled(LogChute.WARN_ID) should be (true)
}
it("error enabled") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
when(mockLogger.isEnabled(Level.DEBUG)).thenReturn(false)
when(mockLogger.isEnabled(Level.TRACE)).thenReturn(false)
when(mockLogger.isEnabled(Level.INFO)).thenReturn(false)
when(mockLogger.isEnabled(Level.WARN)).thenReturn(false)
when(mockLogger.isEnabled(Level.ERROR)).thenReturn(true)
jbossLoggingLogChute.isLevelEnabled(LogChute.ERROR_ID) should be (true)
}
it("log message") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
jbossLoggingLogChute.log(LogChute.DEBUG_ID, "debug message")
jbossLoggingLogChute.log(LogChute.TRACE_ID, "trace message")
jbossLoggingLogChute.log(LogChute.INFO_ID, "info message")
jbossLoggingLogChute.log(LogChute.WARN_ID, "warn message")
jbossLoggingLogChute.log(LogChute.ERROR_ID, "error message")
verify(mockLogger).debug("debug message")
verify(mockLogger).trace("trace message")
verify(mockLogger).info("info message")
verify(mockLogger).warn("warn message")
verify(mockLogger).error("error message")
}
it("log message with Throwable") {
val mockLogger = mock[Logger]
val jbossLoggingLogChute = new JBossLoggingLogChute
Whitebox.setInternalState(jbossLoggingLogChute, "logger", mockLogger)
val th = new Throwable
jbossLoggingLogChute.log(LogChute.DEBUG_ID, "debug message", th)
jbossLoggingLogChute.log(LogChute.TRACE_ID, "trace message", th)
jbossLoggingLogChute.log(LogChute.INFO_ID, "info message", th)
jbossLoggingLogChute.log(LogChute.WARN_ID, "warn message", th)
jbossLoggingLogChute.log(LogChute.ERROR_ID, "error message", th)
verify(mockLogger).debug("debug message", th)
verify(mockLogger).trace("trace message", th)
verify(mockLogger).info("info message", th)
verify(mockLogger).warn("warn message", th)
verify(mockLogger).error("error message", th)
}
}
}
|
kazuhira-r/velocity4s
|
velocity4s/src/test/scala/org/velocity4s/log/JBossLoggingLogChuteSpec.scala
|
Scala
|
apache-2.0
| 5,072 |
package im.actor.server.push
import scala.concurrent._
import akka.actor.ActorSystem
import com.google.android.gcm.server.Message
import slick.driver.PostgresDriver.api._
import im.actor.api.rpc.peers.Peer
import im.actor.server.{ models, persist }
// FIXME: #perf pinned dispatcher
private[push] class GooglePusher(pushManager: GooglePushManager, db: Database)(implicit system: ActorSystem) extends VendorPush {
implicit val ec: ExecutionContext = system.dispatcher
def deliverGooglePush(creds: models.push.GooglePushCredentials, authId: Long, seq: Int, textOpt: Option[String], originPeerOpt: Option[Peer]): Unit = {
pushManager.getInstance(creds.projectId) match {
case Some(gcmSender) β
system.log.debug("Delivering google push, authId: {}, seq: {}", authId, seq)
val builder = (new Message.Builder)
.collapseKey(authId.toString)
.addData("seq", seq.toString)
val messageAction = textOpt match {
case Some(text) β
persist.AuthId.findUserId(authId) flatMap {
case Some(userId) β
persist.AuthSession.findAppIdByAuthId(authId) flatMap {
case Some(appId) β
val category = models.AuthSession.appCategory(appId)
val paramBase = s"category.${category}.notification"
(originPeerOpt match {
case Some(originPeer) β
getChatNotificationEnabled(userId, paramBase, originPeer)
case None β DBIO.successful(true)
}) flatMap {
case true β
for {
showText β getShowText(userId, paramBase)
} yield {
if (showText) {
builder.addData("message", text)
}
builder.build()
}
case false β DBIO.successful(builder.build())
}
case None β DBIO.successful(builder.build())
}
case None β DBIO.successful(builder.build())
}
case None β DBIO.successful(builder.build())
}
db.run(for {
message β messageAction
} yield {
system.log.debug("Delivering google push message, authId: {}, message: {}", authId, message.toString)
val resultFuture = Future { blocking { gcmSender.send(message, creds.regId, 3) } }
resultFuture.map { result β
system.log.debug("Google push result messageId: {}, error: {}", result.getMessageId, result.getErrorCodeName)
}.onFailure {
case e β system.log.error(e, "Failed to deliver google push")
}
})
case None β
system.log.error("Key not found for projectId {}", creds.projectId)
}
}
}
|
zillachan/actor-platform
|
actor-server/actor-cqrs/src/main/scala/im/actor/server/push/GooglePusher.scala
|
Scala
|
mit
| 2,973 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.transaction
import java.io.PrintStream
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import kafka.internals.generated.{TransactionLogKey, TransactionLogValue}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil}
import org.apache.kafka.common.record.{CompressionType, Record, RecordBatch}
import org.apache.kafka.common.{MessageFormatter, TopicPartition}
import scala.collection.mutable
import scala.jdk.CollectionConverters._
/**
* Messages stored for the transaction topic represent the producer id and transactional status of the corresponding
* transactional id, which have versions for both the key and value fields. Key and value
* versions are used to evolve the message formats:
*
* key version 0: [transactionalId]
* -> value version 0: [producer_id, producer_epoch, expire_timestamp, status, [topic, [partition] ], timestamp]
*/
object TransactionLog {
// log-level config default values and enforced values
val DefaultNumPartitions: Int = 50
val DefaultSegmentBytes: Int = 100 * 1024 * 1024
val DefaultReplicationFactor: Short = 3.toShort
val DefaultMinInSyncReplicas: Int = 2
val DefaultLoadBufferSize: Int = 5 * 1024 * 1024
// enforce always using
// 1. cleanup policy = compact
// 2. compression = none
// 3. unclean leader election = disabled
// 4. required acks = -1 when writing
val EnforcedCompressionType: CompressionType = CompressionType.NONE
val EnforcedRequiredAcks: Short = (-1).toShort
/**
* Generates the bytes for transaction log message key
*
* @return key bytes
*/
private[transaction] def keyToBytes(transactionalId: String): Array[Byte] = {
MessageUtil.toVersionPrefixedBytes(TransactionLogKey.HIGHEST_SUPPORTED_VERSION,
new TransactionLogKey().setTransactionalId(transactionalId))
}
/**
* Generates the payload bytes for transaction log message value
*
* @return value payload bytes
*/
private[transaction] def valueToBytes(txnMetadata: TxnTransitMetadata): Array[Byte] = {
if (txnMetadata.txnState == Empty && txnMetadata.topicPartitions.nonEmpty)
throw new IllegalStateException(s"Transaction is not expected to have any partitions since its state is ${txnMetadata.txnState}: $txnMetadata")
val transactionPartitions = if (txnMetadata.txnState == Empty) null
else txnMetadata.topicPartitions
.groupBy(_.topic)
.map { case (topic, partitions) =>
new TransactionLogValue.PartitionsSchema()
.setTopic(topic)
.setPartitionIds(partitions.map(tp => Integer.valueOf(tp.partition)).toList.asJava)
}.toList.asJava
MessageUtil.toVersionPrefixedBytes(TransactionLogValue.HIGHEST_SUPPORTED_VERSION,
new TransactionLogValue()
.setProducerId(txnMetadata.producerId)
.setProducerEpoch(txnMetadata.producerEpoch)
.setTransactionTimeoutMs(txnMetadata.txnTimeoutMs)
.setTransactionStatus(txnMetadata.txnState.id)
.setTransactionLastUpdateTimestampMs(txnMetadata.txnLastUpdateTimestamp)
.setTransactionStartTimestampMs(txnMetadata.txnStartTimestamp)
.setTransactionPartitions(transactionPartitions))
}
/**
* Decodes the transaction log messages' key
*
* @return the key
*/
def readTxnRecordKey(buffer: ByteBuffer): TxnKey = {
val version = buffer.getShort
if (version >= TransactionLogKey.LOWEST_SUPPORTED_VERSION && version <= TransactionLogKey.HIGHEST_SUPPORTED_VERSION) {
val value = new TransactionLogKey(new ByteBufferAccessor(buffer), version)
TxnKey(
version = version,
transactionalId = value.transactionalId
)
} else throw new IllegalStateException(s"Unknown version $version from the transaction log message")
}
/**
* Decodes the transaction log messages' payload and retrieves the transaction metadata from it
*
* @return a transaction metadata object from the message
*/
def readTxnRecordValue(transactionalId: String, buffer: ByteBuffer): Option[TransactionMetadata] = {
// tombstone
if (buffer == null) None
else {
val version = buffer.getShort
if (version >= TransactionLogValue.LOWEST_SUPPORTED_VERSION && version <= TransactionLogValue.HIGHEST_SUPPORTED_VERSION) {
val value = new TransactionLogValue(new ByteBufferAccessor(buffer), version)
val transactionMetadata = new TransactionMetadata(
transactionalId = transactionalId,
producerId = value.producerId,
lastProducerId = RecordBatch.NO_PRODUCER_ID,
producerEpoch = value.producerEpoch,
lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH,
txnTimeoutMs = value.transactionTimeoutMs,
state = TransactionState.fromId(value.transactionStatus),
topicPartitions = mutable.Set.empty[TopicPartition],
txnStartTimestamp = value.transactionStartTimestampMs,
txnLastUpdateTimestamp = value.transactionLastUpdateTimestampMs)
if (!transactionMetadata.state.equals(Empty))
value.transactionPartitions.forEach(partitionsSchema =>
transactionMetadata.addPartitions(partitionsSchema.partitionIds
.asScala
.map(partitionId => new TopicPartition(partitionsSchema.topic, partitionId))
.toSet)
)
Some(transactionMetadata)
} else throw new IllegalStateException(s"Unknown version $version from the transaction log message value")
}
}
// Formatter for use with tools to read transaction log messages
class TransactionLogMessageFormatter extends MessageFormatter {
def writeTo(consumerRecord: ConsumerRecord[Array[Byte], Array[Byte]], output: PrintStream): Unit = {
Option(consumerRecord.key).map(key => readTxnRecordKey(ByteBuffer.wrap(key))).foreach { txnKey =>
val transactionalId = txnKey.transactionalId
val value = consumerRecord.value
val producerIdMetadata = if (value == null)
None
else
readTxnRecordValue(transactionalId, ByteBuffer.wrap(value))
output.write(transactionalId.getBytes(StandardCharsets.UTF_8))
output.write("::".getBytes(StandardCharsets.UTF_8))
output.write(producerIdMetadata.getOrElse("NULL").toString.getBytes(StandardCharsets.UTF_8))
output.write("\n".getBytes(StandardCharsets.UTF_8))
}
}
}
/**
* Exposed for printing records using [[kafka.tools.DumpLogSegments]]
*/
def formatRecordKeyAndValue(record: Record): (Option[String], Option[String]) = {
val txnKey = TransactionLog.readTxnRecordKey(record.key)
val keyString = s"transaction_metadata::transactionalId=${txnKey.transactionalId}"
val valueString = TransactionLog.readTxnRecordValue(txnKey.transactionalId, record.value) match {
case None => "<DELETE>"
case Some(txnMetadata) => s"producerId:${txnMetadata.producerId}," +
s"producerEpoch:${txnMetadata.producerEpoch}," +
s"state=${txnMetadata.state}," +
s"partitions=${txnMetadata.topicPartitions.mkString("[", ",", "]")}," +
s"txnLastUpdateTimestamp=${txnMetadata.txnLastUpdateTimestamp}," +
s"txnTimeoutMs=${txnMetadata.txnTimeoutMs}"
}
(Some(keyString), Some(valueString))
}
}
case class TxnKey(version: Short, transactionalId: String) {
override def toString: String = transactionalId
}
|
guozhangwang/kafka
|
core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala
|
Scala
|
apache-2.0
| 8,323 |
package com.toscaruntime.it.openstack.standalone
import com.toscaruntime.it.AbstractSpec
import com.toscaruntime.it.TestConstant._
import com.toscaruntime.it.steps.AgentsSteps._
import com.toscaruntime.it.steps.CsarsSteps._
import com.toscaruntime.it.steps.DeploymentsSteps._
import com.toscaruntime.it.util.URLChecker._
import org.scalatest.MustMatchers
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
class AlienSpec extends AbstractSpec with MustMatchers {
info("Test deployment of a topology Alien in mode masterless")
feature("Deployment of Alien web app") {
scenario("Standard deployment") {
Given("I download and install all necessary csars for Alien web app deployment")
installNormativeTypesAndProviders()
downloadZipFileAndExtract("https://github.com/alien4cloud/samples/archive/master.zip", tempPath)
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("jdk")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("elasticsearch")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("alien")))
assertNoCompilationErrorsDetected(installCsar(tempPath.resolve("samples-master").resolve("topology-alien4cloud-cluster")))
And("A deployment image has been created for the alien openstack topology")
createDeploymentImage("alien4cloud", openstackProvider) must be(true)
When("I deploy it")
launchDeployment("alien4cloud")
Then("I should have an output for the alien's public url")
val url = assertDeploymentHasOutput("alien4cloud", "alien_url")
And("A request on the application's url should return a response 200 OK")
checkURL(url, 200, Set.empty, 5 minutes)
And("I should be able to undeploy it without error")
launchUndeployment("alien4cloud")
}
}
}
|
vuminhkh/tosca-runtime
|
test/src/it/scala/com/toscaruntime/it/openstack/standalone/AlienSpec.scala
|
Scala
|
mit
| 1,925 |
package utils
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by jyothi on 11/12/16.
*/
@deprecated
trait CassandraSparkContext {
/**
*
* @param host hostName (optional if default host is used)
* @return
*/
def getCassandraSparkContext(host: String = "127.0.0.1"): SparkContext = {
val remoteHost = "spark://192.168.0.21:4040"
val conf = new SparkConf(true)
.setMaster("local[*]")
//.setMaster(remoteHost) //giving remote host for sparkJobs for JVM limitation
.setAppName("SparkCassandraScalaPlay")
.set("spark.app.id", "SparkCassandraScalaPlay")
.set("spark.driver.allowMultipleContexts", "true")
.set("spark.cassandra.connection.host", host) //we can set cassandra server name here
val sc = new SparkContext(conf)
sc
}
}
|
arajajyothibabu/MongoDB-Cassandra-Migration
|
app/utils/CassandraSparkContext.scala
|
Scala
|
mit
| 822 |
package com.cloudray.scalapress.folder.widget
import javax.persistence._
import org.hibernate.annotations._
import com.cloudray.scalapress.widgets.Widget
import javax.persistence.Table
import javax.persistence.Entity
import scala.beans.BeanProperty
import com.cloudray.scalapress.folder.Folder
import com.cloudray.scalapress.util.UrlGenerator
import scala.xml.{Unparsed, Utility, Node}
import scala.collection.mutable.ListBuffer
import com.cloudray.scalapress.framework.{Logging, ScalapressRequest}
/** @author Stephen Samuel */
@Table(name = "categories_boxes")
@Entity
class FoldersWidget extends Widget with Logging {
@BeanProperty var depth: Int = _
@BeanProperty var excludeCurrent: Boolean = _
@ManyToOne(fetch = FetchType.EAGER)
@JoinColumn(name = "root")
@Fetch(FetchMode.JOIN)
@NotFound(action = NotFoundAction.IGNORE)
@BeanProperty
var start: Folder = _
@BeanProperty
var exclusions: String = _
override def backoffice = "/backoffice/plugin/folder/widget/folder/" + id
override def render(sreq: ScalapressRequest): Option[String] = {
val root = Option(start).getOrElse(sreq.folderRoot)
val xml = _renderChildren(root, 1)
val rendered = Option(xml).map(Utility.trim(_).toString())
rendered
}
def _renderChildren(parent: Folder, level: Int): Node = {
_folders(parent) match {
case list if list.isEmpty => null
case list => _renderChildren(list, level)
}
}
def _renderChildren(children: Seq[Folder], level: Int): Node = {
val css = if (level == 1) "widget-folder-plugin" else null
val nodes = new ListBuffer[Node]
for ( child <- children ) {
nodes.append(_renderFolder(child, level))
if (depth > level)
nodes.append(_renderChildren(child, level + 1))
}
<ul class={css}>
{nodes}
</ul>
}
def _renderFolder(folder: Folder, level: Int): Node = {
val css = s"l$level"
val id = s"w${this.id}_f${folder.id}"
val link = Unparsed(UrlGenerator.link(folder))
val xml = <li class={css} id={id}>
{link}
</li>
Utility.trim(xml)
}
def _folders(parent: Folder): Seq[Folder] =
parent.sortedSubfolders.toSeq
.filterNot(_.hidden)
.filterNot(_.name == null)
.filterNot(f => _exclusions.contains(f.id.toString))
.filterNot(f => _exclusions.contains(f.name.toLowerCase.replaceAll("\\\\s{2,}", " ").trim))
def _exclusions: Seq[String] =
Option(exclusions)
.map(_.toLowerCase)
.map(_.split(Array('\\n', ',')).map(_.toLowerCase.replaceAll("\\\\s{2,}", " ").trim))
.getOrElse(Array[String]()).toSeq
}
|
vidyacraghav/scalapress
|
src/main/scala/com/cloudray/scalapress/folder/widget/FoldersWidget.scala
|
Scala
|
apache-2.0
| 2,602 |
//
// OrcTuple.scala -- Scala class OrcTuple
// Project OrcScala
//
// $Id: OrcTuple.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by dkitchin on Jul 10, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.values
import orc.values.sites.UntypedSite
import orc.values.sites.PartialSite
import orc.error.runtime.ArgumentTypeMismatchException
import orc.error.runtime.ArityMismatchException
import orc.error.runtime.TupleIndexOutOfBoundsException
/**
*
* @author dkitchin
*/
case class OrcTuple(values: List[AnyRef]) extends PartialSite with UntypedSite {
assert(values.size > 1)
def evaluate(args: List[AnyRef]) =
args match {
case List(bi: BigInt) => {
val i: Int = bi.intValue
if (0 <= i && i < values.size) { Some(values(i)) }
else { throw new TupleIndexOutOfBoundsException(i) }
}
case List(a) => throw new ArgumentTypeMismatchException(0, "Integer", if (a != null) a.getClass().toString() else "null")
case _ => throw new ArityMismatchException(1, args.size)
}
override def toOrcSyntax() = "(" + Format.formatSequence(values) + ")"
}
|
laurenyew/cOrcS
|
src/orc/values/OrcTuple.scala
|
Scala
|
bsd-3-clause
| 1,383 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.alvsanand.sgc.ftp.secure
import es.alvsanand.sgc.core.connector.{SgcConnector, SgcConnectorFactory}
import es.alvsanand.sgc.ftp.FTPSlot
/**
* This Factory creates instances of es.alvsanand.sgc.core.connector.SgcConnector for
* integrating with a [[https://en.wikipedia.org/wiki/SSH_File_Transfer_Protocol SFTP server]]. It
* list and fetch all the files that are in a configured directory.
*
* Note: every file will be used as a slot.
*
* It has these features:
*
* - The SFTP client will authenticate using the credentials.
* - If the keystore is set, the SFTP client will use the private key to authenticate instead of
* the password.
*/
object SFTPSgcConnectorFactory extends SgcConnectorFactory[FTPSlot, SFTPParameters] {
/**
* Method that returns a new instance of a SgcConnector
* @param parameters The parameters of the SgcConnector
* @return A new instance of a SgcConnector.
*/
override def get(parameters: SFTPParameters): SgcConnector[FTPSlot, SFTPParameters] = {
new SFTPSgcConnector(parameters)
}
}
|
alvsanand/spark-generic-connector
|
sgc-ftp/src/main/scala/es/alvsanand/sgc/ftp/secure/SFTPSgcConnectorFactory.scala
|
Scala
|
apache-2.0
| 1,880 |
package io.getquill.context.async.postgres
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
import io.getquill.context.sql.ProductSpec
import io.getquill.context.sql.Id
class ProductPostgresAsyncSpec extends ProductSpec {
val context = testContext
import testContext._
override def beforeAll = {
await(testContext.run(quote(query[Product].delete)))
()
}
"Product" - {
"Insert multiple products" in {
val inserted = await(Future.sequence(productEntries.map(product => testContext.run(productInsert(lift(product))))))
val product = await(testContext.run(productById(lift(inserted(2))))).head
product.description mustEqual productEntries(2).description
product.id mustEqual inserted(2)
}
"Single insert product" in {
val inserted = await(testContext.run(productSingleInsert))
val product = await(testContext.run(productById(lift(inserted)))).head
product.description mustEqual "Window"
product.id mustEqual inserted
}
"Single insert with inlined free variable" in {
val prd = Product(0L, "test1", 1L)
val inserted = await {
testContext.run {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
}
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test1"
returnedProduct.sku mustEqual 1L
returnedProduct.id mustEqual inserted
}
"Single insert with free variable and explicit quotation" in {
val prd = Product(0L, "test2", 2L)
val q1 = quote {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
val inserted = await(testContext.run(q1))
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test2"
returnedProduct.sku mustEqual 2L
returnedProduct.id mustEqual inserted
}
"Single product insert with a method quotation" in {
val prd = Product(0L, "test3", 3L)
val inserted = await(testContext.run(productInsert(lift(prd))))
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test3"
returnedProduct.sku mustEqual 3L
returnedProduct.id mustEqual inserted
}
"Single insert with value class" in {
case class Product(id: Id, description: String, sku: Long)
val prd = Product(Id(0L), "test2", 2L)
val q1 = quote {
query[Product].insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
await(testContext.run(q1)) mustBe a[Id]
}
"supports casts from string to number" - {
"toInt" in {
case class Product(id: Long, description: String, sku: Int)
val queried = await {
testContext.run {
query[Product].filter(_.sku == lift("1004").toInt)
}
}.head
queried.sku mustEqual 1004L
}
"toLong" in {
val queried = await {
testContext.run {
query[Product].filter(_.sku == lift("1004").toLong)
}
}.head
queried.sku mustEqual 1004L
}
}
}
}
|
getquill/quill
|
quill-async-postgres/src/test/scala/io/getquill/context/async/postgres/ProductPostgresAsyncSpec.scala
|
Scala
|
apache-2.0
| 3,351 |
package org.jetbrains.plugins.scala.lang.psi.api.expr.xml
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
/**
* @author Alexander Podkhalyuzin
* Date: 21.04.2008
*/
trait ScXmlCDSect extends ScalaPsiElement {
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/expr/xml/ScXmlCDSect.scala
|
Scala
|
apache-2.0
| 226 |
package cwe.scala.library.visitor
/**
* Marks a class as beeing visitable, which signals that it accepts Visitors
*/
trait Visitable {
/**
* A host which is visitable should implement this method
* It accepts a visitor, goes through the visit and returns the last intention of the visitor
* to the caller in order to chain visits.
*/
def accept(v: Visitor): VisitorCommand
}
|
wwwigii-system/research
|
cwe-scala-library/src/cwe/scala/library/visitor/Visitable.scala
|
Scala
|
gpl-3.0
| 387 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.CharArrayWriter
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.runtime.universe.TypeTag
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.annotation.{DeveloperApi, Experimental, InterfaceStability}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.java.function._
import org.apache.spark.api.python.{PythonRDD, SerDeUtil}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.json.JacksonGenerator
import org.apache.spark.sql.catalyst.optimizer.CombineUnions
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, PartitioningCollection}
import org.apache.spark.sql.catalyst.util.usePrettyExpression
import org.apache.spark.sql.execution.{FileRelation, LogicalRDD, QueryExecution, SQLExecution}
import org.apache.spark.sql.execution.command.{CreateViewCommand, ExplainCommand, GlobalTempView, LocalTempView}
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.spark.sql.execution.python.EvaluatePython
import org.apache.spark.sql.streaming.DataStreamWriter
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.unsafe.types.CalendarInterval
import org.apache.spark.util.Utils
private[sql] object Dataset {
def apply[T: Encoder](sparkSession: SparkSession, logicalPlan: LogicalPlan): Dataset[T] = {
new Dataset(sparkSession, logicalPlan, implicitly[Encoder[T]])
}
def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame = {
val qe = sparkSession.sessionState.executePlan(logicalPlan)
qe.assertAnalyzed()
new Dataset[Row](sparkSession, qe, RowEncoder(qe.analyzed.schema))
}
}
/**
* A Dataset is a strongly typed collection of domain-specific objects that can be transformed
* in parallel using functional or relational operations. Each Dataset also has an untyped view
* called a `DataFrame`, which is a Dataset of [[Row]].
*
* Operations available on Datasets are divided into transformations and actions. Transformations
* are the ones that produce new Datasets, and actions are the ones that trigger computation and
* return results. Example transformations include map, filter, select, and aggregate (`groupBy`).
* Example actions count, show, or writing data out to file systems.
*
* Datasets are "lazy", i.e. computations are only triggered when an action is invoked. Internally,
* a Dataset represents a logical plan that describes the computation required to produce the data.
* When an action is invoked, Spark's query optimizer optimizes the logical plan and generates a
* physical plan for efficient execution in a parallel and distributed manner. To explore the
* logical plan as well as optimized physical plan, use the `explain` function.
*
* To efficiently support domain-specific objects, an [[Encoder]] is required. The encoder maps
* the domain specific type `T` to Spark's internal type system. For example, given a class `Person`
* with two fields, `name` (string) and `age` (int), an encoder is used to tell Spark to generate
* code at runtime to serialize the `Person` object into a binary structure. This binary structure
* often has much lower memory footprint as well as are optimized for efficiency in data processing
* (e.g. in a columnar format). To understand the internal binary representation for data, use the
* `schema` function.
*
* There are typically two ways to create a Dataset. The most common way is by pointing Spark
* to some files on storage systems, using the `read` function available on a `SparkSession`.
* {{{
* val people = spark.read.parquet("...").as[Person] // Scala
* Dataset<Person> people = spark.read().parquet("...").as(Encoders.bean(Person.class)); // Java
* }}}
*
* Datasets can also be created through transformations available on existing Datasets. For example,
* the following creates a new Dataset by applying a filter on the existing one:
* {{{
* val names = people.map(_.name) // in Scala; names is a Dataset[String]
* Dataset<String> names = people.map((Person p) -> p.name, Encoders.STRING)); // in Java 8
* }}}
*
* Dataset operations can also be untyped, through various domain-specific-language (DSL)
* functions defined in: Dataset (this class), [[Column]], and [[functions]]. These operations
* are very similar to the operations available in the data frame abstraction in R or Python.
*
* To select a column from the Dataset, use `apply` method in Scala and `col` in Java.
* {{{
* val ageCol = people("age") // in Scala
* Column ageCol = people.col("age"); // in Java
* }}}
*
* Note that the [[Column]] type can also be manipulated through its various functions.
* {{{
* // The following creates a new column that increases everybody's age by 10.
* people("age") + 10 // in Scala
* people.col("age").plus(10); // in Java
* }}}
*
* A more concrete example in Scala:
* {{{
* // To create Dataset[Row] using SparkSession
* val people = spark.read.parquet("...")
* val department = spark.read.parquet("...")
*
* people.filter("age > 30")
* .join(department, people("deptId") === department("id"))
* .groupBy(department("name"), "gender")
* .agg(avg(people("salary")), max(people("age")))
* }}}
*
* and in Java:
* {{{
* // To create Dataset<Row> using SparkSession
* Dataset<Row> people = spark.read().parquet("...");
* Dataset<Row> department = spark.read().parquet("...");
*
* people.filter("age".gt(30))
* .join(department, people.col("deptId").equalTo(department("id")))
* .groupBy(department.col("name"), "gender")
* .agg(avg(people.col("salary")), max(people.col("age")));
* }}}
*
* @groupname basic Basic Dataset functions
* @groupname action Actions
* @groupname untypedrel Untyped transformations
* @groupname typedrel Typed transformations
*
* @since 1.6.0
*/
@InterfaceStability.Stable
class Dataset[T] private[sql](
@transient val sparkSession: SparkSession,
@DeveloperApi @InterfaceStability.Unstable @transient val queryExecution: QueryExecution,
encoder: Encoder[T])
extends Serializable {
queryExecution.assertAnalyzed()
// Note for Spark contributors: if adding or updating any action in `Dataset`, please make sure
// you wrap it with `withNewExecutionId` if this actions doesn't call other action.
def this(sparkSession: SparkSession, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sparkSession, sparkSession.sessionState.executePlan(logicalPlan), encoder)
}
def this(sqlContext: SQLContext, logicalPlan: LogicalPlan, encoder: Encoder[T]) = {
this(sqlContext.sparkSession, logicalPlan, encoder)
}
@transient private[sql] val logicalPlan: LogicalPlan = {
def hasSideEffects(plan: LogicalPlan): Boolean = plan match {
case _: Command |
_: InsertIntoTable => true
case _ => false
}
queryExecution.analyzed match {
// For various commands (like DDL) and queries with side effects, we force query execution
// to happen right away to let these side effects take place eagerly.
case p if hasSideEffects(p) =>
LogicalRDD(queryExecution.analyzed.output, queryExecution.toRdd)(sparkSession)
case Union(children) if children.forall(hasSideEffects) =>
LogicalRDD(queryExecution.analyzed.output, queryExecution.toRdd)(sparkSession)
case _ =>
queryExecution.analyzed
}
}
/**
* Currently [[ExpressionEncoder]] is the only implementation of [[Encoder]], here we turn the
* passed in encoder to [[ExpressionEncoder]] explicitly, and mark it implicit so that we can use
* it when constructing new Dataset objects that have the same object type (that will be
* possibly resolved to a different schema).
*/
private[sql] implicit val exprEnc: ExpressionEncoder[T] = encoderFor(encoder)
/**
* Encoder is used mostly as a container of serde expressions in Dataset. We build logical
* plans by these serde expressions and execute it within the query framework. However, for
* performance reasons we may want to use encoder as a function to deserialize internal rows to
* custom objects, e.g. collect. Here we resolve and bind the encoder so that we can call its
* `fromRow` method later.
*/
private val boundEnc =
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer)
private implicit def classTag = exprEnc.clsTag
// sqlContext must be val because a stable identifier is expected when you import implicits
@transient lazy val sqlContext: SQLContext = sparkSession.sqlContext
private[sql] def resolve(colName: String): NamedExpression = {
queryExecution.analyzed.resolveQuoted(colName, sparkSession.sessionState.analyzer.resolver)
.getOrElse {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
}
private[sql] def numericColumns: Seq[Expression] = {
schema.fields.filter(_.dataType.isInstanceOf[NumericType]).map { n =>
queryExecution.analyzed.resolveQuoted(n.name, sparkSession.sessionState.analyzer.resolver).get
}
}
private def aggregatableColumns: Seq[Expression] = {
schema.fields
.filter(f => f.dataType.isInstanceOf[NumericType] || f.dataType.isInstanceOf[StringType])
.map { n =>
queryExecution.analyzed.resolveQuoted(n.name, sparkSession.sessionState.analyzer.resolver)
.get
}
}
/**
* Compose the string representing rows for output
*
* @param _numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
*/
private[sql] def showString(_numRows: Int, truncate: Int = 20): String = {
val numRows = _numRows.max(0)
val takeResult = toDF().take(numRows + 1)
val hasMoreData = takeResult.length > numRows
val data = takeResult.take(numRows)
// For array values, replace Seq and Array with square brackets
// For cells that are beyond `truncate` characters, replace it with the
// first `truncate-3` and "..."
val rows: Seq[Seq[String]] = schema.fieldNames.toSeq +: data.map { row =>
row.toSeq.map { cell =>
val str = cell match {
case null => "null"
case binary: Array[Byte] => binary.map("%02X".format(_)).mkString("[", " ", "]")
case array: Array[_] => array.mkString("[", ", ", "]")
case seq: Seq[_] => seq.mkString("[", ", ", "]")
case _ => cell.toString
}
if (truncate > 0 && str.length > truncate) {
// do not show ellipses for strings shorter than 4 characters.
if (truncate < 4) str.substring(0, truncate)
else str.substring(0, truncate - 3) + "..."
} else {
str
}
}: Seq[String]
}
val sb = new StringBuilder
val numCols = schema.fieldNames.length
// Initialise the width of each column to a minimum value of '3'
val colWidths = Array.fill(numCols)(3)
// Compute the width of each column
for (row <- rows) {
for ((cell, i) <- row.zipWithIndex) {
colWidths(i) = math.max(colWidths(i), cell.length)
}
}
// Create SeparateLine
val sep: String = colWidths.map("-" * _).addString(sb, "+", "+", "+\\n").toString()
// column names
rows.head.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell, colWidths(i))
} else {
StringUtils.rightPad(cell, colWidths(i))
}
}.addString(sb, "|", "|", "|\\n")
sb.append(sep)
// data
rows.tail.map {
_.zipWithIndex.map { case (cell, i) =>
if (truncate > 0) {
StringUtils.leftPad(cell.toString, colWidths(i))
} else {
StringUtils.rightPad(cell.toString, colWidths(i))
}
}.addString(sb, "|", "|", "|\\n")
}
sb.append(sep)
// For Data that has more than "numRows" records
if (hasMoreData) {
val rowsString = if (numRows == 1) "row" else "rows"
sb.append(s"only showing top $numRows $rowsString\\n")
}
sb.toString()
}
override def toString: String = {
try {
val builder = new StringBuilder
val fields = schema.take(2).map {
case f => s"${f.name}: ${f.dataType.simpleString(2)}"
}
builder.append("[")
builder.append(fields.mkString(", "))
if (schema.length > 2) {
if (schema.length - fields.size == 1) {
builder.append(" ... 1 more field")
} else {
builder.append(" ... " + (schema.length - 2) + " more fields")
}
}
builder.append("]").toString()
} catch {
case NonFatal(e) =>
s"Invalid tree; ${e.getMessage}:\\n$queryExecution"
}
}
/**
* Converts this strongly typed collection of data to generic Dataframe. In contrast to the
* strongly typed objects that Dataset operations work on, a Dataframe returns generic [[Row]]
* objects that allow fields to be accessed by ordinal or name.
*
* @group basic
* @since 1.6.0
*/
// This is declared with parentheses to prevent the Scala compiler from treating
// `ds.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = new Dataset[Row](sparkSession, queryExecution, RowEncoder(schema))
/**
* :: Experimental ::
* Returns a new Dataset where each record has been mapped on to the specified type. The
* method used to map columns depend on the type of `U`:
* - When `U` is a class, fields for the class will be mapped to columns of the same name
* (case sensitivity is determined by `spark.sql.caseSensitive`).
* - When `U` is a tuple, the columns will be be mapped by ordinal (i.e. the first column will
* be assigned to `_1`).
* - When `U` is a primitive type (i.e. String, Int, etc), then the first column of the
* `DataFrame` will be used.
*
* If the schema of the Dataset does not match the desired `U` type, you can use `select`
* along with `alias` or `as` to rearrange or rename as required.
*
* @group basic
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def as[U : Encoder]: Dataset[U] = Dataset[U](sparkSession, logicalPlan)
/**
* Converts this strongly typed collection of data to generic `DataFrame` with columns renamed.
* This can be quite convenient in conversion from an RDD of tuples into a `DataFrame` with
* meaningful names. For example:
* {{{
* val rdd: RDD[(Int, String)] = ...
* rdd.toDF() // this implicit conversion creates a DataFrame with column name `_1` and `_2`
* rdd.toDF("id", "name") // this creates a DataFrame with column name "id" and "name"
* }}}
*
* @group basic
* @since 2.0.0
*/
@scala.annotation.varargs
def toDF(colNames: String*): DataFrame = {
require(schema.size == colNames.size,
"The number of columns doesn't match.\\n" +
s"Old column names (${schema.size}): " + schema.fields.map(_.name).mkString(", ") + "\\n" +
s"New column names (${colNames.size}): " + colNames.mkString(", "))
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
select(newCols : _*)
}
/**
* Returns the schema of this Dataset.
*
* @group basic
* @since 1.6.0
*/
def schema: StructType = queryExecution.analyzed.schema
/**
* Prints the schema to the console in a nice tree format.
*
* @group basic
* @since 1.6.0
*/
// scalastyle:off println
def printSchema(): Unit = println(schema.treeString)
// scalastyle:on println
/**
* Prints the plans (logical and physical) to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(extended: Boolean): Unit = {
val explain = ExplainCommand(queryExecution.logical, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect().foreach {
// scalastyle:off println
r => println(r.getString(0))
// scalastyle:on println
}
}
/**
* Prints the physical plan to the console for debugging purposes.
*
* @group basic
* @since 1.6.0
*/
def explain(): Unit = explain(extended = false)
/**
* Returns all column names and their data types as an array.
*
* @group basic
* @since 1.6.0
*/
def dtypes: Array[(String, String)] = schema.fields.map { field =>
(field.name, field.dataType.toString)
}
/**
* Returns all column names as an array.
*
* @group basic
* @since 1.6.0
*/
def columns: Array[String] = schema.fields.map(_.name)
/**
* Returns true if the `collect` and `take` methods can be run locally
* (without any Spark executors).
*
* @group basic
* @since 1.6.0
*/
def isLocal: Boolean = logicalPlan.isInstanceOf[LocalRelation]
/**
* Returns true if this Dataset contains one or more sources that continuously
* return data as it arrives. A Dataset that reads data from a streaming source
* must be executed as a `StreamingQuery` using the `start()` method in
* `DataStreamWriter`. Methods that return a single answer, e.g. `count()` or
* `collect()`, will throw an [[AnalysisException]] when there is a streaming
* source present.
*
* @group streaming
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def isStreaming: Boolean = logicalPlan.isStreaming
/**
* Eagerly checkpoint a Dataset and return the new Dataset. Checkpointing can be used to truncate
* the logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@InterfaceStability.Evolving
def checkpoint(): Dataset[T] = checkpoint(eager = true)
/**
* Returns a checkpointed version of this Dataset. Checkpointing can be used to truncate the
* logical plan of this Dataset, which is especially useful in iterative algorithms where the
* plan may grow exponentially. It will be saved to files inside the checkpoint
* directory set with `SparkContext#setCheckpointDir`.
*
* @group basic
* @since 2.1.0
*/
@Experimental
@InterfaceStability.Evolving
def checkpoint(eager: Boolean): Dataset[T] = {
val internalRdd = queryExecution.toRdd.map(_.copy())
internalRdd.checkpoint()
if (eager) {
internalRdd.count()
}
val physicalPlan = queryExecution.executedPlan
// Takes the first leaf partitioning whenever we see a `PartitioningCollection`. Otherwise the
// size of `PartitioningCollection` may grow exponentially for queries involving deep inner
// joins.
def firstLeafPartitioning(partitioning: Partitioning): Partitioning = {
partitioning match {
case p: PartitioningCollection => firstLeafPartitioning(p.partitionings.head)
case p => p
}
}
val outputPartitioning = firstLeafPartitioning(physicalPlan.outputPartitioning)
Dataset.ofRows(
sparkSession,
LogicalRDD(
logicalPlan.output,
internalRdd,
outputPartitioning,
physicalPlan.outputOrdering
)(sparkSession)).as[T]
}
/**
* :: Experimental ::
* Defines an event time watermark for this [[Dataset]]. A watermark tracks a point in time
* before which we assume no more late data is going to arrive.
*
* Spark will use this watermark for several purposes:
* - To know when a given time window aggregation can be finalized and thus can be emitted when
* using output modes that do not allow updates.
* - To minimize the amount of state that we need to keep for on-going aggregations.
*
* The current watermark is computed by looking at the `MAX(eventTime)` seen across
* all of the partitions in the query minus a user specified `delayThreshold`. Due to the cost
* of coordinating this value across partitions, the actual watermark used is only guaranteed
* to be at least `delayThreshold` behind the actual event time. In some cases we may still
* process records that arrive more than `delayThreshold` late.
*
* @param eventTime the name of the column that contains the event time of the row.
* @param delayThreshold the minimum delay to wait to data to arrive late, relative to the latest
* record that has been processed in the form of an interval
* (e.g. "1 minute" or "5 hours").
*
* @group streaming
* @since 2.1.0
*/
@Experimental
@InterfaceStability.Evolving
// We only accept an existing column name, not a derived column here as a watermark that is
// defined on a derived column cannot referenced elsewhere in the plan.
def withWatermark(eventTime: String, delayThreshold: String): Dataset[T] = withTypedPlan {
val parsedDelay =
Option(CalendarInterval.fromString("interval " + delayThreshold))
.getOrElse(throw new AnalysisException(s"Unable to parse time delay '$delayThreshold'"))
EventTimeWatermark(UnresolvedAttribute(eventTime), parsedDelay, logicalPlan)
}
/**
* Displays the Dataset in a tabular form. Strings more than 20 characters will be truncated,
* and all cells will be aligned right. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
*
* @group action
* @since 1.6.0
*/
def show(numRows: Int): Unit = show(numRows, truncate = true)
/**
* Displays the top 20 rows of Dataset in a tabular form. Strings more than 20 characters
* will be truncated, and all cells will be aligned right.
*
* @group action
* @since 1.6.0
*/
def show(): Unit = show(20)
/**
* Displays the top 20 rows of Dataset in a tabular form.
*
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
def show(truncate: Boolean): Unit = show(20, truncate)
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
* @param numRows Number of rows to show
* @param truncate Whether truncate long strings. If true, strings more than 20 characters will
* be truncated and all cells will be aligned right
*
* @group action
* @since 1.6.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Boolean): Unit = if (truncate) {
println(showString(numRows, truncate = 20))
} else {
println(showString(numRows, truncate = 0))
}
// scalastyle:on println
/**
* Displays the Dataset in a tabular form. For example:
* {{{
* year month AVG('Adj Close) MAX('Adj Close)
* 1980 12 0.503218 0.595103
* 1981 01 0.523289 0.570307
* 1982 02 0.436504 0.475256
* 1983 03 0.410516 0.442194
* 1984 04 0.450090 0.483521
* }}}
*
* @param numRows Number of rows to show
* @param truncate If set to more than 0, truncates strings to `truncate` characters and
* all cells will be aligned right.
* @group action
* @since 1.6.0
*/
// scalastyle:off println
def show(numRows: Int, truncate: Int): Unit = println(showString(numRows, truncate))
// scalastyle:on println
/**
* Returns a [[DataFrameNaFunctions]] for working with missing data.
* {{{
* // Dropping rows containing any null values.
* ds.na.drop()
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def na: DataFrameNaFunctions = new DataFrameNaFunctions(toDF())
/**
* Returns a [[DataFrameStatFunctions]] for working statistic functions support.
* {{{
* // Finding frequent items in column with name 'a'.
* ds.stat.freqItems(Seq("a"))
* }}}
*
* @group untypedrel
* @since 1.6.0
*/
def stat: DataFrameStatFunctions = new DataFrameStatFunctions(toDF())
/**
* Join with another `DataFrame`.
*
* Behaves as an INNER JOIN and requires a subsequent join predicate.
*
* @param right Right side of the join operation.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Inner, None)
}
/**
* Inner equi-join with another `DataFrame` using the given column.
*
* Different from other join functions, the join column will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the column "user_id"
* df1.join(df2, "user_id")
* }}}
*
* @param right Right side of the join operation.
* @param usingColumn Name of the column to join on. This column must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumn: String): DataFrame = {
join(right, Seq(usingColumn))
}
/**
* Inner equi-join with another `DataFrame` using the given columns.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* {{{
* // Joining df1 and df2 using the columns "user_id" and "user_name"
* df1.join(df2, Seq("user_id", "user_name"))
* }}}
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String]): DataFrame = {
join(right, usingColumns, "inner")
}
/**
* Equi-join with another `DataFrame` using the given columns.
*
* Different from other join functions, the join columns will only appear once in the output,
* i.e. similar to SQL's `JOIN USING` syntax.
*
* @param right Right side of the join operation.
* @param usingColumns Names of the columns to join on. This columns must exist on both sides.
* @param joinType One of: `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
*
* @note If you perform a self-join using this function without aliasing the input
* `DataFrame`s, you will NOT be able to reference any columns after the join, since
* there is no way to disambiguate which side of the join you would like to reference.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], usingColumns: Seq[String], joinType: String): DataFrame = {
// Analyze the self join. The assumption is that the analyzer will disambiguate left vs right
// by creating a new instance for one of the branch.
val joined = sparkSession.sessionState.executePlan(
Join(logicalPlan, right.logicalPlan, joinType = JoinType(joinType), None))
.analyzed.asInstanceOf[Join]
withPlan {
Join(
joined.left,
joined.right,
UsingJoin(JoinType(joinType), usingColumns),
None)
}
}
/**
* Inner join with another `DataFrame`, using the given join expression.
*
* {{{
* // The following two are equivalent:
* df1.join(df2, $"df1Key" === $"df2Key")
* df1.join(df2).where($"df1Key" === $"df2Key")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column): DataFrame = join(right, joinExprs, "inner")
/**
* Join with another `DataFrame`, using the given join expression. The following performs
* a full outer join between `df1` and `df2`.
*
* {{{
* // Scala:
* import org.apache.spark.sql.functions._
* df1.join(df2, $"df1Key" === $"df2Key", "outer")
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df1.join(df2, col("df1Key").equalTo(col("df2Key")), "outer");
* }}}
*
* @param right Right side of the join.
* @param joinExprs Join expression.
* @param joinType One of: `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
*
* @group untypedrel
* @since 2.0.0
*/
def join(right: Dataset[_], joinExprs: Column, joinType: String): DataFrame = {
// Note that in this function, we introduce a hack in the case of self-join to automatically
// resolve ambiguous join conditions into ones that might make sense [SPARK-6231].
// Consider this case: df.join(df, df("key") === df("key"))
// Since df("key") === df("key") is a trivially true condition, this actually becomes a
// cartesian join. However, most likely users expect to perform a self join using "key".
// With that assumption, this hack turns the trivially true condition into equality on join
// keys that are resolved to both sides.
// Trigger analysis so in the case of self-join, the analyzer will clone the plan.
// After the cloning, left and right side will have distinct expression ids.
val plan = withPlan(
Join(logicalPlan, right.logicalPlan, JoinType(joinType), Some(joinExprs.expr)))
.queryExecution.analyzed.asInstanceOf[Join]
// If auto self join alias is disabled, return the plan.
if (!sparkSession.sessionState.conf.dataFrameSelfJoinAutoResolveAmbiguity) {
return withPlan(plan)
}
// If left/right have no output set intersection, return the plan.
val lanalyzed = withPlan(this.logicalPlan).queryExecution.analyzed
val ranalyzed = withPlan(right.logicalPlan).queryExecution.analyzed
if (lanalyzed.outputSet.intersect(ranalyzed.outputSet).isEmpty) {
return withPlan(plan)
}
// Otherwise, find the trivially true predicates and automatically resolves them to both sides.
// By the time we get here, since we have already run analysis, all attributes should've been
// resolved and become AttributeReference.
val cond = plan.condition.map { _.transform {
case catalyst.expressions.EqualTo(a: AttributeReference, b: AttributeReference)
if a.sameRef(b) =>
catalyst.expressions.EqualTo(
withPlan(plan.left).resolve(a.name),
withPlan(plan.right).resolve(b.name))
}}
withPlan {
plan.copy(condition = cond)
}
}
/**
* Explicit cartesian join with another `DataFrame`.
*
* @param right Right side of the join operation.
*
* @note Cartesian joins are very expensive without an extra filter that can be pushed down.
*
* @group untypedrel
* @since 2.1.0
*/
def crossJoin(right: Dataset[_]): DataFrame = withPlan {
Join(logicalPlan, right.logicalPlan, joinType = Cross, None)
}
/**
* :: Experimental ::
* Joins this Dataset returning a `Tuple2` for each pair where `condition` evaluates to
* true.
*
* This is similar to the relation `join` function with one important difference in the
* result schema. Since `joinWith` preserves objects present on either side of the join, the
* result schema is similarly nested into a tuple under the column names `_1` and `_2`.
*
* This type of join can be useful both for preserving type-safety with the original object
* types as well as working with relational data where either side of the join has column
* names in common.
*
* @param other Right side of the join.
* @param condition Join expression.
* @param joinType One of: `inner`, `outer`, `left_outer`, `right_outer`, `leftsemi`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def joinWith[U](other: Dataset[U], condition: Column, joinType: String): Dataset[(T, U)] = {
// Creates a Join node and resolve it first, to get join condition resolved, self-join resolved,
// etc.
val joined = sparkSession.sessionState.executePlan(
Join(
this.logicalPlan,
other.logicalPlan,
JoinType(joinType),
Some(condition.expr))).analyzed.asInstanceOf[Join]
// For both join side, combine all outputs into a single column and alias it with "_1" or "_2",
// to match the schema for the encoder of the join result.
// Note that we do this before joining them, to enable the join operator to return null for one
// side, in cases like outer-join.
val left = {
val combined = if (this.exprEnc.flat) {
assert(joined.left.output.length == 1)
Alias(joined.left.output.head, "_1")()
} else {
Alias(CreateStruct(joined.left.output), "_1")()
}
Project(combined :: Nil, joined.left)
}
val right = {
val combined = if (other.exprEnc.flat) {
assert(joined.right.output.length == 1)
Alias(joined.right.output.head, "_2")()
} else {
Alias(CreateStruct(joined.right.output), "_2")()
}
Project(combined :: Nil, joined.right)
}
// Rewrites the join condition to make the attribute point to correct column/field, after we
// combine the outputs of each join side.
val conditionExpr = joined.condition.get transformUp {
case a: Attribute if joined.left.outputSet.contains(a) =>
if (this.exprEnc.flat) {
left.output.head
} else {
val index = joined.left.output.indexWhere(_.exprId == a.exprId)
GetStructField(left.output.head, index)
}
case a: Attribute if joined.right.outputSet.contains(a) =>
if (other.exprEnc.flat) {
right.output.head
} else {
val index = joined.right.output.indexWhere(_.exprId == a.exprId)
GetStructField(right.output.head, index)
}
}
implicit val tuple2Encoder: Encoder[(T, U)] =
ExpressionEncoder.tuple(this.exprEnc, other.exprEnc)
withTypedPlan(Join(left, right, joined.joinType, Some(conditionExpr)))
}
/**
* :: Experimental ::
* Using inner equi-join to join this Dataset returning a `Tuple2` for each pair
* where `condition` evaluates to true.
*
* @param other Right side of the join.
* @param condition Join expression.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def joinWith[U](other: Dataset[U], condition: Column): Dataset[(T, U)] = {
joinWith(other, condition, "inner")
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): Dataset[T] = {
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
* Returns a new Dataset with each partition sorted by the given expressions.
*
* This is the same operation as "SORT BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sortWithinPartitions(sortExprs: Column*): Dataset[T] = {
sortInternal(global = false, sortExprs)
}
/**
* Returns a new Dataset sorted by the specified column, all in ascending order.
* {{{
* // The following 3 are equivalent
* ds.sort("sortcol")
* ds.sort($"sortcol")
* ds.sort($"sortcol".asc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): Dataset[T] = {
sort((sortCol +: sortCols).map(apply) : _*)
}
/**
* Returns a new Dataset sorted by the given expressions. For example:
* {{{
* ds.sort($"col1", $"col2".desc)
* }}}
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def sort(sortExprs: Column*): Dataset[T] = {
sortInternal(global = true, sortExprs)
}
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortCol: String, sortCols: String*): Dataset[T] = sort(sortCol, sortCols : _*)
/**
* Returns a new Dataset sorted by the given expressions.
* This is an alias of the `sort` function.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def orderBy(sortExprs: Column*): Dataset[T] = sort(sortExprs : _*)
/**
* Selects column based on the column name and return it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def apply(colName: String): Column = col(colName)
/**
* Selects column based on the column name and return it as a [[Column]].
*
* @note The column name can also reference to a nested column like `a.b`.
*
* @group untypedrel
* @since 2.0.0
*/
def col(colName: String): Column = colName match {
case "*" =>
Column(ResolvedStar(queryExecution.analyzed.output))
case _ =>
val expr = resolve(colName)
Column(expr)
}
/**
* Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 1.6.0
*/
def as(alias: String): Dataset[T] = withTypedPlan {
SubqueryAlias(alias, logicalPlan, None)
}
/**
* (Scala-specific) Returns a new Dataset with an alias set.
*
* @group typedrel
* @since 2.0.0
*/
def as(alias: Symbol): Dataset[T] = as(alias.name)
/**
* Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: String): Dataset[T] = as(alias)
/**
* (Scala-specific) Returns a new Dataset with an alias set. Same as `as`.
*
* @group typedrel
* @since 2.0.0
*/
def alias(alias: Symbol): Dataset[T] = as(alias)
/**
* Selects a set of column based expressions.
* {{{
* ds.select($"colA", $"colB" + 1)
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(cols: Column*): DataFrame = withPlan {
Project(cols.map(_.named), logicalPlan)
}
/**
* Selects a set of columns. This is a variant of `select` that can only select
* existing columns using column names (i.e. cannot construct expressions).
*
* {{{
* // The following two are equivalent:
* ds.select("colA", "colB")
* ds.select($"colA", $"colB")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
* SQL expressions.
*
* {{{
* // The following are equivalent:
* ds.selectExpr("colA", "colB as newName", "abs(colC)")
* ds.select(expr("colA"), expr("colB as newName"), expr("abs(colC)"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def selectExpr(exprs: String*): DataFrame = {
select(exprs.map { expr =>
Column(sparkSession.sessionState.sqlParser.parseExpression(expr))
}: _*)
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expression for each element.
*
* {{{
* val ds = Seq(1, 2, 3).toDS()
* val newDS = ds.select(expr("value + 1").as[Int])
* }}}
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1](c1: TypedColumn[T, U1]): Dataset[U1] = {
implicit val encoder = c1.encoder
val project = Project(c1.withInputType(exprEnc, logicalPlan.output).named :: Nil,
logicalPlan)
if (encoder.flat) {
new Dataset[U1](sparkSession, project, encoder)
} else {
// Flattens inner fields of U1
new Dataset[Tuple1[U1]](sparkSession, project, ExpressionEncoder.tuple(encoder)).map(_._1)
}
}
/**
* Internal helper function for building typed selects that return tuples. For simplicity and
* code reuse, we do this without the help of the type system and then use helper functions
* that cast appropriately for the user facing interface.
*/
protected def selectUntyped(columns: TypedColumn[_, _]*): Dataset[_] = {
val encoders = columns.map(_.encoder)
val namedColumns =
columns.map(_.withInputType(exprEnc, logicalPlan.output).named)
val execution = new QueryExecution(sparkSession, Project(namedColumns, logicalPlan))
new Dataset(sparkSession, execution, ExpressionEncoder.tuple(encoders))
}
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2](c1: TypedColumn[T, U1], c2: TypedColumn[T, U2]): Dataset[(U1, U2)] =
selectUntyped(c1, c2).asInstanceOf[Dataset[(U1, U2)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3]): Dataset[(U1, U2, U3)] =
selectUntyped(c1, c2, c3).asInstanceOf[Dataset[(U1, U2, U3)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3, U4](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4]): Dataset[(U1, U2, U3, U4)] =
selectUntyped(c1, c2, c3, c4).asInstanceOf[Dataset[(U1, U2, U3, U4)]]
/**
* :: Experimental ::
* Returns a new Dataset by computing the given [[Column]] expressions for each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def select[U1, U2, U3, U4, U5](
c1: TypedColumn[T, U1],
c2: TypedColumn[T, U2],
c3: TypedColumn[T, U3],
c4: TypedColumn[T, U4],
c5: TypedColumn[T, U5]): Dataset[(U1, U2, U3, U4, U5)] =
selectUntyped(c1, c2, c3, c4, c5).asInstanceOf[Dataset[(U1, U2, U3, U4, U5)]]
/**
* Filters rows using the given condition.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(condition: Column): Dataset[T] = withTypedPlan {
Filter(condition.expr, logicalPlan)
}
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.filter("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def filter(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Filters rows using the given condition. This is an alias for `filter`.
* {{{
* // The following are equivalent:
* peopleDs.filter($"age" > 15)
* peopleDs.where($"age" > 15)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(condition: Column): Dataset[T] = filter(condition)
/**
* Filters rows using the given SQL expression.
* {{{
* peopleDs.where("age > 15")
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def where(conditionExpr: String): Dataset[T] = {
filter(Column(sparkSession.sessionState.sqlParser.parseExpression(conditionExpr)))
}
/**
* Groups the Dataset using the specified columns, so we can run aggregation on them. See
* [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy($"department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.GroupByType)
}
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup($"department", $"group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube($"department", $"group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(cols: Column*): RelationalGroupedDataset = {
RelationalGroupedDataset(toDF(), cols.map(_.expr), RelationalGroupedDataset.CubeType)
}
/**
* Groups the Dataset using the specified columns, so that we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of groupBy that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns grouped by department.
* ds.groupBy("department").avg()
*
* // Compute the max age and average salary, grouped by department and gender.
* ds.groupBy($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def groupBy(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.GroupByType)
}
/**
* :: Experimental ::
* (Scala-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def reduce(func: (T, T) => T): T = rdd.reduce(func)
/**
* :: Experimental ::
* (Java-specific)
* Reduces the elements of this Dataset using the specified binary function. The given `func`
* must be commutative and associative or the result may be non-deterministic.
*
* @group action
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def reduce(func: ReduceFunction[T]): T = reduce(func.call(_, _))
/**
* :: Experimental ::
* (Scala-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def groupByKey[K: Encoder](func: T => K): KeyValueGroupedDataset[K, T] = {
val inputPlan = logicalPlan
val withGroupingKey = AppendColumns(func, inputPlan)
val executed = sparkSession.sessionState.executePlan(withGroupingKey)
new KeyValueGroupedDataset(
encoderFor[K],
encoderFor[T],
executed,
inputPlan.output,
withGroupingKey.newColumns)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a [[KeyValueGroupedDataset]] where the data is grouped by the given key `func`.
*
* @group typedrel
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def groupByKey[K](func: MapFunction[T, K], encoder: Encoder[K]): KeyValueGroupedDataset[K, T] =
groupByKey(func.call(_))(encoder)
/**
* Create a multi-dimensional rollup for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of rollup that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns rolluped by department and group.
* ds.rollup("department", "group").avg()
*
* // Compute the max age and average salary, rolluped by department and gender.
* ds.rollup($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def rollup(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.RollupType)
}
/**
* Create a multi-dimensional cube for the current Dataset using the specified columns,
* so we can run aggregation on them.
* See [[RelationalGroupedDataset]] for all the available aggregate functions.
*
* This is a variant of cube that can only group by existing columns using column names
* (i.e. cannot construct expressions).
*
* {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube("department", "group").avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube($"department", $"gender").agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def cube(col1: String, cols: String*): RelationalGroupedDataset = {
val colNames: Seq[String] = col1 +: cols
RelationalGroupedDataset(
toDF(), colNames.map(colName => resolve(colName)), RelationalGroupedDataset.CubeType)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg("age" -> "max", "salary" -> "avg")
* ds.groupBy().agg("age" -> "max", "salary" -> "avg")
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
groupBy().agg(aggExpr, aggExprs : _*)
}
/**
* (Scala-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* (Java-specific) Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(Map("age" -> "max", "salary" -> "avg"))
* ds.groupBy().agg(Map("age" -> "max", "salary" -> "avg"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
def agg(exprs: java.util.Map[String, String]): DataFrame = groupBy().agg(exprs)
/**
* Aggregates on the entire Dataset without groups.
* {{{
* // ds.agg(...) is a shorthand for ds.groupBy().agg(...)
* ds.agg(max($"age"), avg($"salary"))
* ds.groupBy().agg(max($"age"), avg($"salary"))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
/**
* Returns a new Dataset by taking the first `n` rows. The difference between this function
* and `head` is that `head` is an action and returns an array (by triggering query execution)
* while `limit` returns a new Dataset.
*
* @group typedrel
* @since 2.0.0
*/
def limit(n: Int): Dataset[T] = withTypedPlan {
Limit(Literal(n), logicalPlan)
}
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
* This is equivalent to `UNION ALL` in SQL.
*
* To do a SQL-style set union (that does deduplication of elements), use this function followed
* by a [[distinct]].
*
* @group typedrel
* @since 2.0.0
*/
@deprecated("use union()", "2.0.0")
def unionAll(other: Dataset[T]): Dataset[T] = union(other)
/**
* Returns a new Dataset containing union of rows in this Dataset and another Dataset.
* This is equivalent to `UNION ALL` in SQL.
*
* To do a SQL-style set union (that does deduplication of elements), use this function followed
* by a [[distinct]].
*
* @group typedrel
* @since 2.0.0
*/
def union(other: Dataset[T]): Dataset[T] = withSetOperator {
// This breaks caching, but it's usually ok because it addresses a very specific use case:
// using union to union many files or partitions.
CombineUnions(Union(logicalPlan, other.logicalPlan))
}
/**
* Returns a new Dataset containing rows only in both this Dataset and another Dataset.
* This is equivalent to `INTERSECT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 1.6.0
*/
def intersect(other: Dataset[T]): Dataset[T] = withSetOperator {
Intersect(logicalPlan, other.logicalPlan)
}
/**
* Returns a new Dataset containing rows in this Dataset but not in another Dataset.
* This is equivalent to `EXCEPT` in SQL.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def except(other: Dataset[T]): Dataset[T] = withSetOperator {
Except(logicalPlan, other.logicalPlan)
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a user-supplied seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate.
* @param seed Seed for sampling.
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] = {
require(fraction >= 0,
s"Fraction must be nonnegative, but got ${fraction}")
withTypedPlan {
Sample(0.0, fraction, withReplacement, seed, logicalPlan)()
}
}
/**
* Returns a new [[Dataset]] by sampling a fraction of rows, using a random seed.
*
* @param withReplacement Sample with replacement or not.
* @param fraction Fraction of rows to generate.
*
* @note This is NOT guaranteed to provide exactly the fraction of the total count
* of the given [[Dataset]].
*
* @group typedrel
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* For Java API, use [[randomSplitAsList]].
*
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double], seed: Long): Array[Dataset[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
// It is possible that the underlying dataframe doesn't guarantee the ordering of rows in its
// constituent partitions each time a split is materialized which could result in
// overlapping splits. To prevent this, we explicitly sort each input partition to make the
// ordering deterministic.
// MapType cannot be sorted.
val sorted = Sort(logicalPlan.output.filterNot(_.dataType.isInstanceOf[MapType])
.map(SortOrder(_, Ascending)), global = false, logicalPlan)
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
new Dataset[T](
sparkSession, Sample(x(0), x(1), withReplacement = false, seed, sorted)(), encoder)
}.toArray
}
/**
* Returns a Java list that contains randomly split Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*
* @group typedrel
* @since 2.0.0
*/
def randomSplitAsList(weights: Array[Double], seed: Long): java.util.List[Dataset[T]] = {
val values = randomSplit(weights, seed)
java.util.Arrays.asList(values : _*)
}
/**
* Randomly splits this Dataset with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @group typedrel
* @since 2.0.0
*/
def randomSplit(weights: Array[Double]): Array[Dataset[T]] = {
randomSplit(weights, Utils.random.nextLong)
}
/**
* Randomly splits this Dataset with the provided weights. Provided for the Python Api.
*
* @param weights weights for splits, will be normalized if they don't sum to 1.
* @param seed Seed for sampling.
*/
private[spark] def randomSplit(weights: List[Double], seed: Long): Array[Dataset[T]] = {
randomSplit(weights.toArray, seed)
}
/**
* (Scala-specific) Returns a new Dataset where each row has been expanded to zero or more
* rows by the provided function. This is similar to a `LATERAL VIEW` in HiveQL. The columns of
* the input row are implicitly joined with each row that is output by the function.
*
* Given that this is deprecated, as an alternative, you can explode columns either using
* `functions.explode()` or `flatMap()`. The following example uses these alternatives to count
* the number of books that contain a given word:
*
* {{{
* case class Book(title: String, words: String)
* val ds: Dataset[Book]
*
* val allWords = ds.select('title, explode(split('words, " ")).as("word"))
*
* val bookCountPerWord = allWords.groupBy("word").agg(countDistinct("title"))
* }}}
*
* Using `flatMap()` this can similarly be exploded as:
*
* {{{
* ds.flatMap(_.words.split(" "))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@deprecated("use flatMap() or select() with functions.explode() instead", "2.0.0")
def explode[A <: Product : TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
val elementSchema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val convert = CatalystTypeConverters.createToCatalystConverter(elementSchema)
val rowFunction =
f.andThen(_.map(convert(_).asInstanceOf[InternalRow]))
val generator = UserDefinedGenerator(elementSchema, rowFunction, input.map(_.expr))
withPlan {
Generate(generator, join = true, outer = false,
qualifier = None, generatorOutput = Nil, logicalPlan)
}
}
/**
* (Scala-specific) Returns a new Dataset where a single column has been expanded to zero
* or more rows by the provided function. This is similar to a `LATERAL VIEW` in HiveQL. All
* columns of the input row are implicitly joined with each value that is output by the function.
*
* Given that this is deprecated, as an alternative, you can explode columns either using
* `functions.explode()`:
*
* {{{
* ds.select(explode(split('words, " ")).as("word"))
* }}}
*
* or `flatMap()`:
*
* {{{
* ds.flatMap(_.words.split(" "))
* }}}
*
* @group untypedrel
* @since 2.0.0
*/
@deprecated("use flatMap() or select() with functions.explode() instead", "2.0.0")
def explode[A, B : TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
: DataFrame = {
val dataType = ScalaReflection.schemaFor[B].dataType
val attributes = AttributeReference(outputColumn, dataType)() :: Nil
// TODO handle the metadata?
val elementSchema = attributes.toStructType
def rowFunction(row: Row): TraversableOnce[InternalRow] = {
val convert = CatalystTypeConverters.createToCatalystConverter(dataType)
f(row(0).asInstanceOf[A]).map(o => InternalRow(convert(o)))
}
val generator = UserDefinedGenerator(elementSchema, rowFunction, apply(inputColumn).expr :: Nil)
withPlan {
Generate(generator, join = true, outer = false,
qualifier = None, generatorOutput = Nil, logicalPlan)
}
}
/**
* Returns a new Dataset by adding a column or replacing the existing column that has
* the same name.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumn(colName: String, col: Column): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldReplace = output.exists(f => resolver(f.name, colName))
if (shouldReplace) {
val columns = output.map { field =>
if (resolver(field.name, colName)) {
col.as(colName)
} else {
Column(field)
}
}
select(columns : _*)
} else {
select(Column("*"), col.as(colName))
}
}
/**
* Returns a new Dataset by adding a column with metadata.
*/
private[spark] def withColumn(colName: String, col: Column, metadata: Metadata): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldReplace = output.exists(f => resolver(f.name, colName))
if (shouldReplace) {
val columns = output.map { field =>
if (resolver(field.name, colName)) {
col.as(colName, metadata)
} else {
Column(field)
}
}
select(columns : _*)
} else {
select(Column("*"), col.as(colName, metadata))
}
}
/**
* Returns a new Dataset with a column renamed.
* This is a no-op if schema doesn't contain existingName.
*
* @group untypedrel
* @since 2.0.0
*/
def withColumnRenamed(existingName: String, newName: String): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val output = queryExecution.analyzed.output
val shouldRename = output.exists(f => resolver(f.name, existingName))
if (shouldRename) {
val columns = output.map { col =>
if (resolver(col.name, existingName)) {
Column(col).as(newName)
} else {
Column(col)
}
}
select(columns : _*)
} else {
toDF()
}
}
/**
* Returns a new Dataset with a column dropped. This is a no-op if schema doesn't contain
* column name.
*
* This method can only be used to drop top level columns. the colName string is treated
* literally without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(colName: String): DataFrame = {
drop(Seq(colName) : _*)
}
/**
* Returns a new Dataset with columns dropped.
* This is a no-op if schema doesn't contain column name(s).
*
* This method can only be used to drop top level columns. the colName string is treated literally
* without further interpretation.
*
* @group untypedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def drop(colNames: String*): DataFrame = {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val remainingCols = allColumns.filter { attribute =>
colNames.forall(n => !resolver(attribute.name, n))
}.map(attribute => Column(attribute))
if (remainingCols.size == allColumns.size) {
toDF()
} else {
this.select(remainingCols: _*)
}
}
/**
* Returns a new Dataset with a column dropped.
* This version of drop accepts a [[Column]] rather than a name.
* This is a no-op if the Dataset doesn't have a column
* with an equivalent expression.
*
* @group untypedrel
* @since 2.0.0
*/
def drop(col: Column): DataFrame = {
val expression = col match {
case Column(u: UnresolvedAttribute) =>
queryExecution.analyzed.resolveQuoted(
u.name, sparkSession.sessionState.analyzer.resolver).getOrElse(u)
case Column(expr: Expression) => expr
}
val attrs = this.logicalPlan.output
val colsAfterDrop = attrs.filter { attr =>
attr != expression
}.map(attr => Column(attr))
select(colsAfterDrop : _*)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `distinct`.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(): Dataset[T] = dropDuplicates(this.columns)
/**
* (Scala-specific) Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Seq[String]): Dataset[T] = withTypedPlan {
val resolver = sparkSession.sessionState.analyzer.resolver
val allColumns = queryExecution.analyzed.output
val groupCols = colNames.flatMap { colName =>
// It is possibly there are more than one columns with the same name,
// so we call filter instead of find.
val cols = allColumns.filter(col => resolver(col.name, colName))
if (cols.isEmpty) {
throw new AnalysisException(
s"""Cannot resolve column name "$colName" among (${schema.fieldNames.mkString(", ")})""")
}
cols
}
val groupColExprIds = groupCols.map(_.exprId)
val aggCols = logicalPlan.output.map { attr =>
if (groupColExprIds.contains(attr.exprId)) {
attr
} else {
// Removing duplicate rows should not change output attributes. We should keep
// the original exprId of the attribute. Otherwise, to select a column in original
// dataset will cause analysis exception due to unresolved attribute.
Alias(new First(attr).toAggregateExpression(), attr.name)(exprId = attr.exprId)
}
}
Aggregate(groupCols, aggCols, logicalPlan)
}
/**
* Returns a new Dataset with duplicate rows removed, considering only
* the subset of columns.
*
* @group typedrel
* @since 2.0.0
*/
def dropDuplicates(colNames: Array[String]): Dataset[T] = dropDuplicates(colNames.toSeq)
/**
* Returns a new [[Dataset]] with duplicate rows removed, considering only
* the subset of columns.
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def dropDuplicates(col1: String, cols: String*): Dataset[T] = {
val colNames: Seq[String] = col1 +: cols
dropDuplicates(colNames)
}
/**
* Computes statistics for numeric and string columns, including count, mean, stddev, min, and
* max. If no columns are given, this function computes statistics for all numerical or string
* columns.
*
* This function is meant for exploratory data analysis, as we make no guarantee about the
* backward compatibility of the schema of the resulting Dataset. If you want to
* programmatically compute summary statistics, use the `agg` function instead.
*
* {{{
* ds.describe("age", "height").show()
*
* // output:
* // summary age height
* // count 10.0 10.0
* // mean 53.3 178.05
* // stddev 11.6 15.7
* // min 18.0 163.0
* // max 92.0 192.0
* }}}
*
* @group action
* @since 1.6.0
*/
@scala.annotation.varargs
def describe(cols: String*): DataFrame = withPlan {
// The list of summary statistics to compute, in the form of expressions.
val statistics = List[(String, Expression => Expression)](
"count" -> ((child: Expression) => Count(child).toAggregateExpression()),
"mean" -> ((child: Expression) => Average(child).toAggregateExpression()),
"stddev" -> ((child: Expression) => StddevSamp(child).toAggregateExpression()),
"min" -> ((child: Expression) => Min(child).toAggregateExpression()),
"max" -> ((child: Expression) => Max(child).toAggregateExpression()))
val outputCols =
(if (cols.isEmpty) aggregatableColumns.map(usePrettyExpression(_).sql) else cols).toList
val ret: Seq[Row] = if (outputCols.nonEmpty) {
val aggExprs = statistics.flatMap { case (_, colToAgg) =>
outputCols.map(c => Column(Cast(colToAgg(Column(c).expr), StringType)).as(c))
}
val row = groupBy().agg(aggExprs.head, aggExprs.tail: _*).head().toSeq
// Pivot the data so each summary is one row
row.grouped(outputCols.size).toSeq.zip(statistics).map { case (aggregation, (statistic, _)) =>
Row(statistic :: aggregation.toList: _*)
}
} else {
// If there are no output columns, just output a single column that contains the stats.
statistics.map { case (name, _) => Row(name) }
}
// All columns are string type
val schema = StructType(
StructField("summary", StringType) :: outputCols.map(StructField(_, StringType))).toAttributes
// `toArray` forces materialization to make the seq serializable
LocalRelation.fromExternalRows(schema, ret.toArray.toSeq)
}
/**
* Returns the first `n` rows.
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @group action
* @since 1.6.0
*/
def head(n: Int): Array[T] = withTypedCallback("head", limit(n)) { df =>
df.collect(needCallback = false)
}
/**
* Returns the first row.
* @group action
* @since 1.6.0
*/
def head(): T = head(1).head
/**
* Returns the first row. Alias for head().
* @group action
* @since 1.6.0
*/
def first(): T = head()
/**
* Concise syntax for chaining custom transformations.
* {{{
* def featurize(ds: Dataset[T]): Dataset[U] = ...
*
* ds
* .transform(featurize)
* .transform(...)
* }}}
*
* @group typedrel
* @since 1.6.0
*/
def transform[U](t: Dataset[T] => Dataset[U]): Dataset[U] = t(this)
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def filter(func: T => Boolean): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that only contains elements where `func` returns `true`.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def filter(func: FilterFunction[T]): Dataset[T] = {
withTypedPlan(TypedFilter(func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def map[U : Encoder](func: T => U): Dataset[U] = withTypedPlan {
MapElements[T, U](func, logicalPlan)
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `func` to each element.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def map[U](func: MapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
implicit val uEnc = encoder
withTypedPlan(MapElements[T, U](func, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset that contains the result of applying `func` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sparkSession,
MapPartitions[T, U](func, logicalPlan),
implicitly[Encoder[U]])
}
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset that contains the result of applying `f` to each partition.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def mapPartitions[U](f: MapPartitionsFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (Iterator[T]) => Iterator[U] = x => f.call(x.asJava).asScala
mapPartitions(func)(encoder)
}
/**
* Returns a new `DataFrame` that contains the result of applying a serialized R function
* `func` to each partition.
*/
private[sql] def mapPartitionsInR(
func: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
schema: StructType): DataFrame = {
val rowEncoder = encoder.asInstanceOf[ExpressionEncoder[Row]]
Dataset.ofRows(
sparkSession,
MapPartitionsInR(func, packageNames, broadcastVars, schema, rowEncoder, logicalPlan))
}
/**
* :: Experimental ::
* (Scala-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
* :: Experimental ::
* (Java-specific)
* Returns a new Dataset by first applying a function to all elements of this Dataset,
* and then flattening the results.
*
* @group typedrel
* @since 1.6.0
*/
@Experimental
@InterfaceStability.Evolving
def flatMap[U](f: FlatMapFunction[T, U], encoder: Encoder[U]): Dataset[U] = {
val func: (T) => Iterator[U] = x => f.call(x).asScala
flatMap(func)(encoder)
}
/**
* Applies a function `f` to all rows.
*
* @group action
* @since 1.6.0
*/
def foreach(f: T => Unit): Unit = withNewExecutionId {
rdd.foreach(f)
}
/**
* (Java-specific)
* Runs `func` on each element of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreach(func: ForeachFunction[T]): Unit = foreach(func.call(_))
/**
* Applies a function `f` to each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withNewExecutionId {
rdd.foreachPartition(f)
}
/**
* (Java-specific)
* Runs `func` on each partition of this Dataset.
*
* @group action
* @since 1.6.0
*/
def foreachPartition(func: ForeachPartitionFunction[T]): Unit =
foreachPartition(it => func.call(it.asJava))
/**
* Returns the first `n` rows in the Dataset.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def take(n: Int): Array[T] = head(n)
/**
* Returns the first `n` rows in the Dataset as a list.
*
* Running take requires moving data into the application's driver process, and doing so with
* a very large `n` can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def takeAsList(n: Int): java.util.List[T] = java.util.Arrays.asList(take(n) : _*)
/**
* Returns an array that contains all of [[Row]]s in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* For Java API, use [[collectAsList]].
*
* @group action
* @since 1.6.0
*/
def collect(): Array[T] = collect(needCallback = true)
/**
* Returns a Java list that contains all of [[Row]]s in this Dataset.
*
* Running collect requires moving all the data into the application's driver process, and
* doing so on a very large dataset can crash the driver process with OutOfMemoryError.
*
* @group action
* @since 1.6.0
*/
def collectAsList(): java.util.List[T] = withCallback("collectAsList", toDF()) { _ =>
withNewExecutionId {
val values = queryExecution.executedPlan.executeCollect().map(boundEnc.fromRow)
java.util.Arrays.asList(values : _*)
}
}
private def collect(needCallback: Boolean): Array[T] = {
def execute(): Array[T] = withNewExecutionId {
queryExecution.executedPlan.executeCollect().map(boundEnc.fromRow)
}
if (needCallback) {
withCallback("collect", toDF())(_ => execute())
} else {
execute()
}
}
/**
* Return an iterator that contains all of [[Row]]s in this Dataset.
*
* The iterator will consume as much memory as the largest partition in this Dataset.
*
* @note this results in multiple Spark jobs, and if the input Dataset is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input Dataset should be cached first.
*
* @group action
* @since 2.0.0
*/
def toLocalIterator(): java.util.Iterator[T] = withCallback("toLocalIterator", toDF()) { _ =>
withNewExecutionId {
queryExecution.executedPlan.executeToIterator().map(boundEnc.fromRow).asJava
}
}
/**
* Returns the number of rows in the Dataset.
* @group action
* @since 1.6.0
*/
def count(): Long = withCallback("count", groupBy().count()) { df =>
df.collect(needCallback = false).head.getLong(0)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions.
*
* @group typedrel
* @since 1.6.0
*/
def repartition(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = true, logicalPlan)
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions into
* `numPartitions`. The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(numPartitions: Int, partitionExprs: Column*): Dataset[T] = withTypedPlan {
RepartitionByExpression(partitionExprs.map(_.expr), logicalPlan, Some(numPartitions))
}
/**
* Returns a new Dataset partitioned by the given partitioning expressions, using
* `spark.sql.shuffle.partitions` as number of partitions.
* The resulting Dataset is hash partitioned.
*
* This is the same operation as "DISTRIBUTE BY" in SQL (Hive QL).
*
* @group typedrel
* @since 2.0.0
*/
@scala.annotation.varargs
def repartition(partitionExprs: Column*): Dataset[T] = withTypedPlan {
RepartitionByExpression(partitionExprs.map(_.expr), logicalPlan, numPartitions = None)
}
/**
* Returns a new Dataset that has exactly `numPartitions` partitions.
* Similar to coalesce defined on an `RDD`, this operation results in a narrow dependency, e.g.
* if you go from 1000 partitions to 100 partitions, there will not be a shuffle, instead each of
* the 100 new partitions will claim 10 of the current partitions.
*
* @group typedrel
* @since 1.6.0
*/
def coalesce(numPartitions: Int): Dataset[T] = withTypedPlan {
Repartition(numPartitions, shuffle = false, logicalPlan)
}
/**
* Returns a new Dataset that contains only the unique rows from this Dataset.
* This is an alias for `dropDuplicates`.
*
* @note Equality checking is performed directly on the encoded representation of the data
* and thus is not affected by a custom `equals` function defined on `T`.
*
* @group typedrel
* @since 2.0.0
*/
def distinct(): Dataset[T] = dropDuplicates()
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def persist(): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this)
this
}
/**
* Persist this Dataset with the default storage level (`MEMORY_AND_DISK`).
*
* @group basic
* @since 1.6.0
*/
def cache(): this.type = persist()
/**
* Persist this Dataset with the given storage level.
* @param newLevel One of: `MEMORY_ONLY`, `MEMORY_AND_DISK`, `MEMORY_ONLY_SER`,
* `MEMORY_AND_DISK_SER`, `DISK_ONLY`, `MEMORY_ONLY_2`,
* `MEMORY_AND_DISK_2`, etc.
*
* @group basic
* @since 1.6.0
*/
def persist(newLevel: StorageLevel): this.type = {
sparkSession.sharedState.cacheManager.cacheQuery(this, None, newLevel)
this
}
/**
* Get the Dataset's current storage level, or StorageLevel.NONE if not persisted.
*
* @group basic
* @since 2.1.0
*/
def storageLevel: StorageLevel = {
sparkSession.sharedState.cacheManager.lookupCachedData(this).map { cachedData =>
cachedData.cachedRepresentation.storageLevel
}.getOrElse(StorageLevel.NONE)
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
*
* @group basic
* @since 1.6.0
*/
def unpersist(blocking: Boolean): this.type = {
sparkSession.sharedState.cacheManager.uncacheQuery(this, blocking)
this
}
/**
* Mark the Dataset as non-persistent, and remove all blocks for it from memory and disk.
*
* @group basic
* @since 1.6.0
*/
def unpersist(): this.type = unpersist(blocking = false)
/**
* Represents the content of the Dataset as an `RDD` of [[T]].
*
* @group basic
* @since 1.6.0
*/
lazy val rdd: RDD[T] = {
val objectType = exprEnc.deserializer.dataType
val deserialized = CatalystSerde.deserialize[T](logicalPlan)
sparkSession.sessionState.executePlan(deserialized).toRdd.mapPartitions { rows =>
rows.map(_.get(0, objectType).asInstanceOf[T])
}
}
/**
* Returns the content of the Dataset as a `JavaRDD` of [[T]]s.
* @group basic
* @since 1.6.0
*/
def toJavaRDD: JavaRDD[T] = rdd.toJavaRDD()
/**
* Returns the content of the Dataset as a `JavaRDD` of [[T]]s.
* @group basic
* @since 1.6.0
*/
def javaRDD: JavaRDD[T] = toJavaRDD
/**
* Registers this Dataset as a temporary table using the given name. The lifetime of this
* temporary table is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 1.6.0
*/
@deprecated("Use createOrReplaceTempView(viewName) instead.", "2.0.0")
def registerTempTable(tableName: String): Unit = {
createOrReplaceTempView(tableName)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* Local temporary view is session-scoped. Its lifetime is the lifetime of the session that
* created it, i.e. it will be automatically dropped when the session terminates. It's not
* tied to any databases, i.e. we can't use `db1.view1` to reference a local temporary view.
*
* @throws AnalysisException if the view name already exists
*
* @group basic
* @since 2.0.0
*/
@throws[AnalysisException]
def createTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = false)
}
/**
* Creates a local temporary view using the given name. The lifetime of this
* temporary view is tied to the [[SparkSession]] that was used to create this Dataset.
*
* @group basic
* @since 2.0.0
*/
def createOrReplaceTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = true, global = false)
}
/**
* Creates a global temporary view using the given name. The lifetime of this
* temporary view is tied to this Spark application.
*
* Global temporary view is cross-session. Its lifetime is the lifetime of the Spark application,
* i.e. it will be automatically dropped when the application terminates. It's tied to a system
* preserved database `_global_temp`, and we must use the qualified name to refer a global temp
* view, e.g. `SELECT * FROM _global_temp.view1`.
*
* @throws AnalysisException if the view name already exists
*
* @group basic
* @since 2.1.0
*/
@throws[AnalysisException]
def createGlobalTempView(viewName: String): Unit = withPlan {
createTempViewCommand(viewName, replace = false, global = true)
}
private def createTempViewCommand(
viewName: String,
replace: Boolean,
global: Boolean): CreateViewCommand = {
val viewType = if (global) GlobalTempView else LocalTempView
CreateViewCommand(
name = sparkSession.sessionState.sqlParser.parseTableIdentifier(viewName),
userSpecifiedColumns = Nil,
comment = None,
properties = Map.empty,
originalText = None,
child = logicalPlan,
allowExisting = false,
replace = replace,
viewType = viewType)
}
/**
* Interface for saving the content of the non-streaming Dataset out into external storage.
*
* @group basic
* @since 1.6.0
*/
def write: DataFrameWriter[T] = {
if (isStreaming) {
logicalPlan.failAnalysis(
"'write' can not be called on streaming Dataset/DataFrame")
}
new DataFrameWriter[T](this)
}
/**
* :: Experimental ::
* Interface for saving the content of the streaming Dataset out into external storage.
*
* @group basic
* @since 2.0.0
*/
@Experimental
@InterfaceStability.Evolving
def writeStream: DataStreamWriter[T] = {
if (!isStreaming) {
logicalPlan.failAnalysis(
"'writeStream' can be called only on streaming Dataset/DataFrame")
}
new DataStreamWriter[T](this)
}
/**
* Returns the content of the Dataset as a Dataset of JSON strings.
* @since 2.0.0
*/
def toJSON: Dataset[String] = {
val rowSchema = this.schema
val rdd: RDD[String] = queryExecution.toRdd.mapPartitions { iter =>
val writer = new CharArrayWriter()
// create the Generator without separator inserted between 2 records
val gen = new JacksonGenerator(rowSchema, writer)
new Iterator[String] {
override def hasNext: Boolean = iter.hasNext
override def next(): String = {
gen.write(iter.next())
gen.flush()
val json = writer.toString
if (hasNext) {
writer.reset()
} else {
gen.close()
}
json
}
}
}
import sparkSession.implicits.newStringEncoder
sparkSession.createDataset(rdd)
}
/**
* Returns a best-effort snapshot of the files that compose this Dataset. This method simply
* asks each constituent BaseRelation for its respective files and takes the union of all results.
* Depending on the source relations, this may not find all input files. Duplicates are removed.
*
* @group basic
* @since 2.0.0
*/
def inputFiles: Array[String] = {
val files: Seq[String] = queryExecution.optimizedPlan.collect {
case LogicalRelation(fsBasedRelation: FileRelation, _, _) =>
fsBasedRelation.inputFiles
case fr: FileRelation =>
fr.inputFiles
}.flatten
files.toSet.toArray
}
////////////////////////////////////////////////////////////////////////////
// For Python API
////////////////////////////////////////////////////////////////////////////
/**
* Converts a JavaRDD to a PythonRDD.
*/
private[sql] def javaToPython: JavaRDD[Array[Byte]] = {
val structType = schema // capture it for closure
val rdd = queryExecution.toRdd.map(EvaluatePython.toJava(_, structType))
EvaluatePython.javaToPython(rdd)
}
private[sql] def collectToPython(): Int = {
EvaluatePython.registerPicklers()
withNewExecutionId {
val toJava: (Any) => Any = EvaluatePython.toJava(_, schema)
val iter = new SerDeUtil.AutoBatchedPickler(
queryExecution.executedPlan.executeCollect().iterator.map(toJava))
PythonRDD.serveIterator(iter, "serve-DataFrame")
}
}
private[sql] def toPythonIterator(): Int = {
withNewExecutionId {
PythonRDD.toLocalIteratorAndServe(javaToPython.rdd)
}
}
////////////////////////////////////////////////////////////////////////////
// Private Helpers
////////////////////////////////////////////////////////////////////////////
/**
* Wrap a Dataset action to track all Spark jobs in the body so that we can connect them with
* an execution.
*/
private[sql] def withNewExecutionId[U](body: => U): U = {
SQLExecution.withNewExecutionId(sparkSession, queryExecution)(body)
}
/**
* Wrap a Dataset action to track the QueryExecution and time cost, then report to the
* user-registered callback functions.
*/
private def withCallback[U](name: String, df: DataFrame)(action: DataFrame => U) = {
try {
df.queryExecution.executedPlan.foreach { plan =>
plan.resetMetrics()
}
val start = System.nanoTime()
val result = action(df)
val end = System.nanoTime()
sparkSession.listenerManager.onSuccess(name, df.queryExecution, end - start)
result
} catch {
case e: Exception =>
sparkSession.listenerManager.onFailure(name, df.queryExecution, e)
throw e
}
}
private def withTypedCallback[A, B](name: String, ds: Dataset[A])(action: Dataset[A] => B) = {
try {
ds.queryExecution.executedPlan.foreach { plan =>
plan.resetMetrics()
}
val start = System.nanoTime()
val result = action(ds)
val end = System.nanoTime()
sparkSession.listenerManager.onSuccess(name, ds.queryExecution, end - start)
result
} catch {
case e: Exception =>
sparkSession.listenerManager.onFailure(name, ds.queryExecution, e)
throw e
}
}
private def sortInternal(global: Boolean, sortExprs: Seq[Column]): Dataset[T] = {
val sortOrder: Seq[SortOrder] = sortExprs.map { col =>
col.expr match {
case expr: SortOrder =>
expr
case expr: Expression =>
SortOrder(expr, Ascending)
}
}
withTypedPlan {
Sort(sortOrder, global = global, logicalPlan)
}
}
/** A convenient function to wrap a logical plan and produce a DataFrame. */
@inline private def withPlan(logicalPlan: => LogicalPlan): DataFrame = {
Dataset.ofRows(sparkSession, logicalPlan)
}
/** A convenient function to wrap a logical plan and produce a Dataset. */
@inline private def withTypedPlan[U : Encoder](logicalPlan: => LogicalPlan): Dataset[U] = {
Dataset(sparkSession, logicalPlan)
}
/** A convenient function to wrap a set based logical plan and produce a Dataset. */
@inline private def withSetOperator[U : Encoder](logicalPlan: => LogicalPlan): Dataset[U] = {
if (classTag.runtimeClass.isAssignableFrom(classOf[Row])) {
// Set operators widen types (change the schema), so we cannot reuse the row encoder.
Dataset.ofRows(sparkSession, logicalPlan).asInstanceOf[Dataset[U]]
} else {
Dataset(sparkSession, logicalPlan)
}
}
}
|
ZxlAaron/mypros
|
sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
|
Scala
|
apache-2.0
| 94,807 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.tutorial
import cc.factorie.app.nlp._
/** A simple example of running an NLP document processor. */
object NLP1 extends App {
val doc = new Document("Mr. Jones took a job at Google in New York. He and his Australian wife moved from New South Wales on 4/1/12.")
println(doc.string.length)
segment.DeterministicNormalizingTokenizer.process(doc)
println(doc.tokens.map(_.string).mkString("\\n"))
}
|
strubell/factorie
|
src/main/scala/cc/factorie/tutorial/NLP1.scala
|
Scala
|
apache-2.0
| 1,172 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events.Event
object StubReporter extends Reporter {
def apply(event: Event) {
}
}
|
travisbrown/scalatest
|
src/test/scala/org/scalatest/StubReporter.scala
|
Scala
|
apache-2.0
| 732 |
package com.avsystem.commons
package annotation
/**
* Marker trait for annotations which don't want to be inherited by subtypes
* of a sealed trait or class that has this annotation applied. Intended for annotations that should apply
* only to the sealed trait itself.
*/
trait NotInheritedFromSealedTypes extends StaticAnnotation
|
AVSystem/scala-commons
|
commons-core/src/main/scala/com/avsystem/commons/annotation/NotInheritedFromSealedTypes.scala
|
Scala
|
mit
| 340 |
import sbt.{ State => _, Configuration => _, Show => _, File => _, _ }
import Keys._
import scalaz._
import Scalaz.{ state => _, _}
import sbt.complete.DefaultParsers._
import sbt.complete._
import edu.gemini.osgi.tools.idea.{ IdeaModule, IdeaProject, IdeaProjectTask }
import edu.gemini.osgi.tools.app.{ Application, Configuration, AppBuilder }
import xml.PrettyPrinter
import java.io.File
trait OcsBundleSettings { this: OcsKey =>
lazy val osgiEmbeddedJars = TaskKey[Seq[File]]("osgi-embedded-jars")
lazy val osgiExplodedJars = TaskKey[Seq[File]]("osgi-exploded-jars")
// Bundle Settings
lazy val ocsBundleSettings = Seq(
// // Blow up jarfiles in lib/
// resourceGenerators in Compile += Def.task[Seq[java.io.File]] {
// val n = name.value
// val s = state.value
// val out = (resourceManaged in Compile).value
// val ff = FileFunction.cached(target.value / "blowup-cache", FilesInfo.lastModified, FilesInfo.exists) { (fs: Set[File]) =>
// fs.filter(_.getName.endsWith(".jar")).flatMap { j =>
// s.log.info(s"$n: exploding ${j.getName}")
// IO.unzip(j, out, (_: String) != "META-INF/MANIFEST.MF")
// }
// }
// val jars = Option(unmanagedBase.value.listFiles).map(_.toList.filter(_.getName.endsWith(".jar"))).getOrElse(Nil).toSet
// ff(jars).toList
// }.taskValue,
osgiExplodedJars ++= {
Option(unmanagedBase.value.listFiles).map(_.toList.filter(_.getName.endsWith(".jar"))).getOrElse(Nil)
},
ocsProjectDependencies := thisProject.value.dependencies.collect {
case ResolvedClasspathDependency(r @ ProjectRef(_, _), _) => r
},
ocsUsers := {
val s = state.value
val extracted = Project.extract(s)
val ref = thisProjectRef.value
ocsAllBundleProjects.value.filter(p => extracted.get(ocsDependencies in p).contains(ref))
},
ocsProjectAggregate := thisProject.value.aggregate,
ocsDependencies := ocsProjectDependencies.value ++ ocsProjectAggregate.value,
ocsBundleIdeaModuleName := s"${name.value}-${version.value}",
ocsBundleIdeaModuleAbstractPath := baseDirectory.value / (ocsBundleIdeaModuleName.value + ".iml"),
ocsBundleIdeaModule := {
val iml = ocsBundleIdeaModuleAbstractPath.value
val modules: Seq[String] = {
val s = state.value
val extracted = Project.extract(s)
ocsDependencies.value.map(p => extracted.get(ocsBundleIdeaModuleName in p))
}
val classpath = ((managedClasspath in Compile).value ++ (unmanagedJars in Compile).value).map(_.data)
val testClasspath= ((managedClasspath in Test).value ++ (unmanagedJars in Test).value).map(_.data) filterNot (classpath.contains)
IO.createDirectory(iml.getParentFile)
val mod = new IdeaModule(iml.getParentFile, modules, classpath, testClasspath)
IO.writeLines(iml, List(new PrettyPrinter(132, 2).format(mod.module)), IO.utf8)
streams.value.log.info("IDEA module: " + iml)
iml
},
ocsClosure := computeOnce(baseDirectory.value + " closure") {
val s = state.value
val extracted = Project.extract(s)
val ds = ocsDependencies.value
(ds ++ ds.flatMap(p => extracted.runTask(ocsClosure in p, s)._2)).distinct
},
ocsBundleDependencies := {
val extracted = Project.extract(state.value)
val tree = Tree.unfoldTree(thisProjectRef.value) { p =>
(extracted.get(name in p), () => extracted.get(ocsDependencies in p).toStream)
}
printTree(tree)
},
ocsBundleDependencies0 := {
val extracted = Project.extract(state.value)
val tree = Tree.node(name.value, ocsDependencies.value.toStream.map { p =>
Tree.node(extracted.get(name in p), Stream.empty)
})
printTree(tree)
},
ocsBundleUsers := {
val s = state.value
val extracted = Project.extract(s)
val tree = Tree.unfoldTree(thisProjectRef.value) { p =>
(extracted.get(name in p), () => extracted.runTask(ocsUsers in p, s)._2.toStream)
}
printTree(tree)
},
ocsBundleUsers0 := {
val extracted = Project.extract(state.value)
val tree = Tree.node(name.value, ocsUsers.value.toStream.map { p =>
Tree.node(extracted.get(name in p), Stream.empty)
})
printTree(tree)
},
ocsBundleInfo <<= mkBundleInfo //,
// incOptions := incOptions.value.withNameHashing(true)
)
// A terrible thing, but necessary for some statically-computable properties like the dependency
// closure that cannot be computed as settings because sbt is terrible
protected var computeOnceCache: Map[String, Any] = Map.empty // sigh
protected def computeOnce[A](key: String)(a: => A): A =
computeOnceCache.get(key) match {
case Some(x) => x.asInstanceOf[A]
case None =>
val x = a
synchronized { computeOnceCache += (key -> x) }
x
}
case class OcsBundleInfo(
embeddedJars: List[File],
libraryBundleJars: List[File],
unmooredJars: List[File], // the presence of anything here means the bundle can't be packaged
bundleProjectRefs: List[ProjectRef])
val mkBundleInfo: Def.Initialize[Task[OcsBundleInfo]] = {
Def.task {
computeOnce(baseDirectory.value + " info") {
// Setup
val s = state.value
val log = streams.value.log
val ex = Project.extract(s)
s.log.info("Computing bundle info for " + name.value)
// We need to know how to do this
def bundleName(jar: File): Option[String] =
Option(new java.util.jar.JarFile(jar).getManifest.getMainAttributes.getValue("Bundle-SymbolicName"))
// All class dirs and jars in classpath
val full: List[File] =
((managedClasspath in Compile).value ++ (externalDependencyClasspath in Compile).value).toList.map(_.data.getCanonicalFile).distinct.sortBy(_.toString)
// (fullClasspath in Compile).value.toList.map(_.data.getCanonicalFile).distinct.sortBy(_.toString)
// All class dirs and embedded jars from dependent projects
val proj: Set[File] = ocsDependencies.value.flatMap { p =>
ex.get(classDirectory in (p, Compile)) +: ex.runTask(osgiEmbeddedJars in p, s)._2
}.map(_.getCanonicalFile).distinct.toSet
// Dependencies other than bundle projects. These are library bundles or normal library jars
val external = full.filterNot(proj)
val externalJars = external.filter(_.isFile)
val withName = externalJars.fproduct(bundleName)
val (bundleJars, libJars) = withName.partition(_._2.isDefined).umap(_.map(_._1))
val projectRefs = ocsClosure.value.distinct.sorted.toList
// Done
OcsBundleInfo(osgiEmbeddedJars.value.toList, bundleJars, libJars, projectRefs)
}
}
}
// val showBundleInfo = {
// lazy val osgiEmbeddedJars = TaskKey[Seq[File]]("osgi-embedded-jars")
// Def.task {
// val s = state.value
// val log = streams.value.log
// val ex = Project.extract(s)
// def bundleName(jar: File): Option[String] =
// Option(new java.util.jar.JarFile(jar).getManifest.getMainAttributes.getValue("Bundle-SymbolicName"))
// // All class dirs and jars in classpath
// val full: List[File] =
// (fullClasspath in Compile).value.toList.map(_.data.getCanonicalFile).distinct.sortBy(_.toString)
// // All class dirs and embedded jars from dependent projects
// val proj: Set[File] = ocsDependencies.value.flatMap { p =>
// ex.get(classDirectory in (p, Compile)) +: ex.runTask(osgiEmbeddedJars, s)._2
// }.map(_.getCanonicalFile).distinct.toSet
// // Dependencies other than bundle projects. These are library bundles or normal library jars
// val external = full.filterNot(proj)
// // None of these should be dirs
// // (external.filter(_.isDirectory)) ...
// val externalJars = external.filter(_.isFile)
// val withName = externalJars.fproduct(bundleName)
// val (bundleJars, libJars) = withName.partition(_._2.isDefined)
// val projectRefs = ocsClosure.value.map(p => ex.get(name in p)).distinct.sorted
// log.info("")
// log.info(s"Bundle Info for ${name.value}")
// if (osgiEmbeddedJars.value.nonEmpty) {
// log.info("")
// log.info(" Embedded Jars:")
// osgiEmbeddedJars.value.sorted.foreach(f => log.info(s" $f"))
// }
// if (projectRefs.nonEmpty) {
// log.info("")
// log.info(" Project References:")
// projectRefs.map(name => log.info(s" $name"))
// }
// if (bundleJars.nonEmpty) {
// log.info("")
// log.info(" Referenced Library Bundles:")
// bundleJars.collect { case (f, Some(n)) => n } .distinct.sorted.foreach(n => log.info(s" $n"))
// }
// if (libJars.nonEmpty) {
// log.info("")
// log.info(" Referenced Library Jars: (MUST be embedded or wrapped)")
// libJars.map(_._1).foreach { f =>
// log.info(s" $f")
// }
// }
// log.info("")
// }
// }
// Tree from a closure, given a root.
def treeFrom[A](a: A, m: Map[A, Stream[A]], full: Boolean): Tree[A] =
Tree.node(a, unfoldForest(~m.get(a))(p => (p, () => if (full) ~m.get(p) else Stream.empty)))
// Print a tree in a slighly prettier form than default, using toString for elements
def printTree[A](t: Tree[A]): Unit =
t.draw(Show.showA).zipWithIndex.filter(_._2 % 2 == 0).map(_._1).foreach(println)
}
|
arturog8m/ocs
|
project/OcsBundleSettings.scala
|
Scala
|
bsd-3-clause
| 9,638 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.classpath
import scala.reflect.io.AbstractFile
import scala.tools.nsc.util.ClassRepresentation
case class ClassPathEntries(packages: scala.collection.Seq[PackageEntry], classesAndSources: scala.collection.Seq[ClassRepresentation])
object ClassPathEntries {
import scala.language.implicitConversions
// to have working unzip method
implicit def entry2Tuple(entry: ClassPathEntries): (scala.collection.Seq[PackageEntry], scala.collection.Seq[ClassRepresentation]) = (entry.packages, entry.classesAndSources)
val empty = ClassPathEntries(Seq.empty, Seq.empty)
}
trait ClassFileEntry extends ClassRepresentation {
def file: AbstractFile
}
trait SourceFileEntry extends ClassRepresentation {
def file: AbstractFile
}
case class PackageName(dottedString: String) {
def isRoot: Boolean = dottedString.isEmpty
val dirPathTrailingSlash: String = FileUtils.dirPath(dottedString) + "/"
def entryName(entry: String): String = {
if (isRoot) entry else {
val builder = new java.lang.StringBuilder(dottedString.length + 1 + entry.length)
builder.append(dottedString)
builder.append('.')
builder.append(entry)
builder.toString
}
}
}
trait PackageEntry {
def name: String
}
private[nsc] case class ClassFileEntryImpl(file: AbstractFile) extends ClassFileEntry {
override val name = FileUtils.stripClassExtension(file.name) // class name
override def binary: Option[AbstractFile] = Some(file)
override def source: Option[AbstractFile] = None
}
private[nsc] case class SourceFileEntryImpl(file: AbstractFile) extends SourceFileEntry {
override val name = FileUtils.stripSourceExtension(file.name)
override def binary: Option[AbstractFile] = None
override def source: Option[AbstractFile] = Some(file)
}
private[nsc] case class ClassAndSourceFilesEntry(classFile: AbstractFile, srcFile: AbstractFile) extends ClassRepresentation {
override val name = FileUtils.stripClassExtension(classFile.name)
override def binary: Option[AbstractFile] = Some(classFile)
override def source: Option[AbstractFile] = Some(srcFile)
}
private[nsc] case class PackageEntryImpl(name: String) extends PackageEntry
private[nsc] trait NoSourcePaths {
final def asSourcePathString: String = ""
final private[nsc] def sources(inPackage: PackageName): Seq[SourceFileEntry] = Seq.empty
}
private[nsc] trait NoClassPaths {
final def findClassFile(className: String): Option[AbstractFile] = None
private[nsc] final def classes(inPackage: PackageName): Seq[ClassFileEntry] = Seq.empty
}
|
martijnhoekstra/scala
|
src/compiler/scala/tools/nsc/classpath/ClassPath.scala
|
Scala
|
apache-2.0
| 2,854 |
/*
* Copyright 2021 LINE Corporation
*
* LINE Corporation licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License
*/
package com.linecorp.armeria.server.sangria
import com.linecorp.armeria.client.{WebClient, WebClientBuilder}
import com.linecorp.armeria.client.logging.LoggingClient
import com.linecorp.armeria.internal.testing.ServerRuleDelegate
import com.linecorp.armeria.server.ServerBuilder
import munit.Suite
trait ServerSuite {
self: Suite =>
// TODO(ikhoon): This code is copied from :scala_2.13. Make this reusable by introducing a common-testing
// module for Scala.
private var delegate: ServerRuleDelegate = _
protected def configureServer: ServerBuilder => Unit
protected def configureWebClient: WebClientBuilder => Unit = _ => ()
protected def server: ServerRuleDelegate = delegate
/**
* Returns whether this extension should run around each test method instead of the entire test class.
* Implementations should override this method to return `true` to run around each test method.
*/
protected def runServerForEachTest = false
override def beforeAll(): Unit = {
delegate = new ServerRuleDelegate(false) {
override def configure(sb: ServerBuilder): Unit = configureServer(sb)
override def configureWebClient(wcb: WebClientBuilder): Unit = self.configureWebClient(wcb)
}
if (!runServerForEachTest) {
server.start()
}
}
override def afterAll(): Unit = {
if (!runServerForEachTest) {
server.stop()
}
}
override def beforeEach(context: BeforeEach): Unit = {
if (runServerForEachTest) {
server.start()
}
}
override def afterEach(context: AfterEach): Unit = {
if (runServerForEachTest) {
server.stop()
}
}
}
|
line/armeria
|
sangria/sangria_2.13/src/test/scala/com/linecorp/armeria/server/sangria/ServerSuite.scala
|
Scala
|
apache-2.0
| 2,280 |
package com.sksamuel.elastic4s.indexes.admin
import com.sksamuel.exts.OptionImplicits._
trait IndexRecoveryApi {
def recoverIndex(first: String, rest: String*): IndexRecoveryRequest = recoverIndex(first +: rest)
def recoverIndex(indexes: Iterable[String]): IndexRecoveryRequest = IndexRecoveryRequest(indexes.toSeq)
}
case class IndexRecoveryRequest(indices: Seq[String],
activeOnly: Option[Boolean] = None,
detailed: Option[Boolean] = None) {
def activeOnly(boolean: Boolean): IndexRecoveryRequest = copy(activeOnly = boolean.some)
def detailed(boolean: Boolean): IndexRecoveryRequest = copy(detailed = boolean.some)
}
|
Tecsisa/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/indexes/admin/IndexRecoveryApi.scala
|
Scala
|
apache-2.0
| 700 |
package stasiak.karol.fimpp
sealed trait Expr {
def eval(context:Context):RuntimeValue
}
case object EmptyExpr extends Expr {
def eval(context:Context) = RuntimeNull
}
case class NumberValue(value: Long) extends Expr {
def eval(context:Context) = RuntimeNumber(value)
}
case class StringValue(value: String) extends Expr {
def eval(context:Context) = RuntimeString(value)
}
case class BooleanValue(b: Boolean) extends Expr {
def eval(context:Context) = RuntimeBoolean(b)
}
case class Concatenation(exprs: List[Expr]) extends Expr {
def eval(context:Context) = RuntimeString(exprs.map(_.eval(context).toString).mkString(""))
}
case class ListExpression(elems: List[Expr]) extends Expr {
def eval(context:Context) = RuntimeList(elems.map(_.eval(context)))
}
case class VariableValue(ident: String) extends Expr {
def eval(context: Context) = context.get(ident)
}
case object NullValue extends Expr {
def eval(context: Context) = RuntimeNull
}
case class FunctionCall(function: String, args: List[Expr]) extends Expr {
def eval(context: Context) = {
context.get(function) match {
case RuntimeFunction(f) => f.call(context, args map (_ eval context))
case RuntimeBuiltin(f) => f(args map (_ eval context))
case z => throw new FimException("Cannot call "+z)
}
}
}
case class FunctionCallEach(function: String, args: Expr) extends Expr {
def eval(context: Context) = {
context.get(function) match {
case RuntimeFunction(f) =>
val e = args.eval(context)
e match {
case RuntimeList(es) => f.call(context, es)
case RuntimeArray(es) => f.call(context, es.toList)
case _ => throw new FimException("This are not multiple arguments")
}
case RuntimeBuiltin(f) =>
val e = args.eval(context)
e match {
case RuntimeList(es) => f(es)
case RuntimeArray(es) => f(es.toList)
case _ => throw new FimException("This are not multiple arguments")
}
case z => throw new FimException("Cannot call "+z)
}
}
}
sealed trait Condition {
def eval(context:Context):Boolean
}
case class Conjunction(conds: List[Condition])extends Condition {
def eval(context:Context) = conds.forall(_.eval(context))
}
case class Alternative(conds: List[Condition])extends Condition {
def eval(context:Context) = conds.exists(_.eval(context))
}
case class Relational(left: Expr, op: String, right: Expr)extends Condition {
def helper(e1:RuntimeValue, op:String, e2:RuntimeValue) :Boolean = {
(e1,op,e2) match {
case (RuntimeNumber(x),">",RuntimeNumber(y)) => x>y
case (RuntimeNumber(x),"<",RuntimeNumber(y)) => x<y
case (RuntimeNumber(x),">=",RuntimeNumber(y)) => x>=y
case (RuntimeNumber(x),"<=",RuntimeNumber(y)) => x<=y
case (x,"==",y) => x==y
case (x,"=",y) => x==y
case (x,"!=",y) => x!=y
case (RuntimeList(xs),"all>",y) => xs.forall(x=>helper(x,">",y))
case (RuntimeList(xs),"all<",y) => xs.forall(x=>helper(x,"<",y))
case (RuntimeList(xs),"any>",y) => xs.exists(x=>helper(x,">",y))
case (RuntimeList(xs),"any<",y) => xs.exists(x=>helper(x,"<",y))
case _ => throw new FimException("Unsupported comparison between "+e1+" and "+e1+" using β"+op+"β")
}
}
def eval(context:Context) = {
val e1 = left.eval(context)
val e2 = right.eval(context)
helper(e1,op,e2)
}
}
case class TrivialCondition(value: Boolean) extends Condition {
def eval(context:Context) = value
}
|
KarolS/fimpp
|
src/stasiak/karol/fimpp/Expr.scala
|
Scala
|
gpl-3.0
| 3,518 |
/*
* Copyright (c) 2017 Richard Hull
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package byok3.web
import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.marshalling.{Marshaller, ToResponseMarshaller}
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.HttpEntity.ChunkStreamPart
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server.Directives._
import akka.pattern.ask
import akka.stream.scaladsl.Source
import akka.util.Timeout
import byok3.BuildInfo
import byok3.web.actors._
import scala.concurrent.ExecutionContext
class RestAPI(system: ActorSystem, timeout: Timeout) extends RestRoutes {
implicit val requestTimeout = timeout
implicit def executionContext = system.dispatcher
def createSupervisor = system.actorOf(Supervisor.props, Supervisor.name)
}
trait SupervisorAPI {
def createSupervisor(): ActorRef
implicit def executionContext: ExecutionContext
implicit def requestTimeout: Timeout
@volatile lazy val supervisor = createSupervisor()
def evaluate(session: Option[String], line: String) =
supervisor.ask(KeyboardInput(session, line)).mapTo[Event]
}
trait RestRoutes extends SupervisorAPI {
implicit val toResponseMarshaller: ToResponseMarshaller[Source[String, Any]] =
Marshaller.opaque { lines =>
val data = lines.map(line => ChunkStreamPart("\\r" + line + "\\n"))
HttpResponse(entity = HttpEntity.Chunked(`text/plain(UTF-8)`, data))
}
private val version =
path("api" / "version") {
get {
complete(HttpEntity(ContentTypes.`application/json`, BuildInfo.toJson))
}
}
private val sendCommand =
path("api") {
post {
entity(as[String]) { input =>
optionalCookie("session") { cookie =>
onSuccess(evaluate(cookie.map(_.value), input)) {
case ProgramOutput(Some(session), output) =>
setCookie(HttpCookiePair("session", session).toCookie()) {
complete {
Source(output)
}
}
case ProgramOutput(None, _) | UnknownSession =>
deleteCookie("session") {
complete(NotFound)
}
}
}
}
}
}
val routes = version ~ sendCommand
}
|
rm-hull/byok3
|
web/src/main/scala/byok3/web/RestAPI.scala
|
Scala
|
mit
| 3,438 |
package scalautils
import better.files.File._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import scalautils.StringUtils.ImplStringUtils
import scalautils.r._
class ShellTest extends FlatSpec with Matchers with BeforeAndAfter {
val wd = (home / ".scalautils_test" / "r").createIfNotExists(asDirectory = true)
before {
// fixme wont work with links currently, see https://github.com/pathikrit/better-files/pull/72
// wd.glob("**").foreach(_.delete())
wd.list.foreach(_.delete(true))
}
it should "rendr a small r markdown report from a snippt " in {
rendrSnippet("test_report", """
#' # Test Report
require(ggplot2)
#+ fig.width=10
ggplot(iris, aes(Species)) + geom_bar()
#' ## another section
head(iris)
#' bla bla bla
1+1
""".alignLeft, wd = wd)
(wd / "test_report.html").toJava should exist
(wd / "test_report.html").contentAsString should not be empty
}
}
|
holgerbrandl/scalautils
|
src/test/scala/scalautils/ShellTest.scala
|
Scala
|
bsd-2-clause
| 964 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package tests.arbitrary
import Prelude._
import org.scalacheck.{ Arbitrary, Gen }
import org.scalacheck.util.Buildable
import scala.collection.mutable
import scala.language.implicitConversions
/** @author Carlo Micieli
* @since 0.0.1
*/
trait ArbitraryList {
implicit def listToTraversable[T](list: List[T]): Traversable[T] = new Traversable[T] {
override def foreach[U](f: (T) => U): Unit = list.foreach(f)
}
implicit def arbitraryList[T](implicit a: Arbitrary[T]): Arbitrary[List[T]] = Arbitrary {
import Arbitrary._
import Gen._
val genEmptyList = const(List.empty[T])
val genSingletonList = for { x <- arbitrary[T] } yield List(x)
def genList(sz: Int): Gen[List[T]] = containerOfN[List, T](sz, arbitrary[T])
def sizedList(sz: Int) = {
if (sz <= 0) { genEmptyList }
else { Gen.frequency((1, genEmptyList), (1, genSingletonList), (8, genList(sz))) }
}
Gen.sized(sz => sizedList(sz))
}
implicit def listBuildable[A]: Buildable[A, List[A]] = new Buildable[A, List[A]] {
override def builder = new mutable.Builder[A, List[A]] {
private var list = List.empty[A]
override def +=(elem: A): this.type = {
list = elem :: list
this
}
override def result(): List[A] = list
override def clear(): Unit = list = List.empty[A]
}
}
}
|
CarloMicieli/hascalator
|
core/src/test/scala/io/hascalator/tests/arbitrary/ArbitraryList.scala
|
Scala
|
apache-2.0
| 1,973 |
package org.firedancer3d.scenegraph.geometry
class Disk {
}
|
cyberthinkers/FireDancer3D
|
firedancer3d_shared/src/main/scala/org/firedancer3d/scenegraph/geometry/Disk.scala
|
Scala
|
mit
| 65 |
package com.ubeeko.htalk.hbase
import com.ubeeko.exceptions.NotImplementedOperation
import com.ubeeko.htalk.bytesconv._
import java.util.Date
import java.io.IOException
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase._
import org.apache.hadoop.hbase.client._
import org.apache.hadoop.hbase.client.coprocessor.Batch
import org.apache.hadoop.classification.InterfaceAudience
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel
import org.scalatest.Matchers
import com.google.protobuf.Service
import com.ubeeko.htalk.criteria.Family
case class TestTable(desc: HTableDescriptor, var rows: Map[IndexedSeq[Byte], List[Cell]], var enabled: Boolean = true) extends TestHTable
trait TestHBaseManager extends HBaseManager {
protected var tables: Map[TableName, TestTable]
def hbaseBackendVersion: String = "FakeHBase"
def getRawTestTables: Map[TableName, TestTable] = tables
def getTableDescriptors(tableNames: Seq[TableName]): Array[HTableDescriptor] = {
tableNames.map(name => tables(name).desc).toArray
}
def isTableEnabled(name: TableName): Boolean = tables.get(name).map(_.enabled).getOrElse(false)
def createTable(name: TableName,
families: Seq[String] = Seq(Family.default.value),
ignoreExisting: Boolean = false): Unit = {
if (!tableExists(name)) {
val desc = new HTableDescriptor(name)
families.foreach(fam => desc.addFamily(new HColumnDescriptor(fam)))
tables += name -> TestTable(desc, Map())
} else if (!ignoreExisting) throw new TableExistsException(name)
}
def deleteTable(name: TableName): Unit = {
tables = tables - name
}
def getTable(name: TableName): Table = {
tables.get(name).getOrElse(null)
}
private def getMutator(table: Table): BufferedMutator = new BufferedMutator {
import scala.collection.JavaConversions._
def close(): Unit = table.close()
def flush(): Unit = {}
def getConfiguration(): org.apache.hadoop.conf.Configuration = table.getConfiguration
def getName(): org.apache.hadoop.hbase.TableName = table.getName
def getWriteBufferSize(): Long = throw NotImplementedOperation("getWriteBufferSize")
def mutate(mutations: java.util.List[_ <: org.apache.hadoop.hbase.client.Mutation]): Unit = mutations.foreach(mutate)
def mutate(mutation: org.apache.hadoop.hbase.client.Mutation): Unit = mutation match {
case putOp: Put => table.put(putOp)
case deleteOp: Delete => table.delete(deleteOp)
case _: Append => throw NotImplementedOperation("Mutate with Append")
case _: Increment => throw NotImplementedOperation("Mutate with Increment")
}
}
def getBufferedMutator(name: TableName): BufferedMutator = {
tables.get(name).map(getMutator).getOrElse(null)
}
def tableExists(name: TableName): Boolean = tables.exists(_._1 == name)
def close(): Unit =
tables = Map()
}
object TestHBaseManager extends Matchers {
private def stringFromCell(cell: org.apache.hadoop.hbase.Cell, valueGetter: org.apache.hadoop.hbase.Cell => Array[Byte]): String =
BytesConv.StringBytesConv(valueGetter(cell))
// XXX Should be written with implicit
def row(cell: org.apache.hadoop.hbase.Cell): String = stringFromCell(cell, CellUtil.cloneRow)
def qualifier(cell: org.apache.hadoop.hbase.Cell): String = stringFromCell(cell, CellUtil.cloneQualifier)
def family(cell: org.apache.hadoop.hbase.Cell): String = stringFromCell(cell, CellUtil.cloneFamily)
def cellValue(cell: org.apache.hadoop.hbase.Cell): String = stringFromCell(cell, CellUtil.cloneValue)
def typedCellValue[T](cell: org.apache.hadoop.hbase.Cell)(implicit conv: BytesConv[T]): T =
conv(CellUtil.cloneValue(cell))
def assertTableAndGetRows(htalkContext: HTalkContext, table: String, rowNumber: Int): Map[String, List[org.apache.hadoop.hbase.Cell]] = {
val hbaseManager = htalkContext.hbaseManager.asInstanceOf[TestHBaseManager]
val tableName = htalkContext.tableName(table)
val tables = hbaseManager.getRawTestTables
tables.keySet should contain(tableName)
tables(tableName).rows should have size rowNumber
tables(tableName).rows.map {
case (rowId, cells) => (BytesConv.StringBytesConv(rowId.toArray), cells)
}
}
def emptyInstance = new TestHBaseManager {
protected var tables = Map.empty[TableName, TestTable]
}
}
trait TestHTable extends Table {
import scala.collection.JavaConversions._
val desc: HTableDescriptor
var rows: Map[IndexedSeq[Byte], List[Cell]]
var enabled: Boolean
def checkAndDelete(x$1: Array[Byte],x$2: Array[Byte],x$3: Array[Byte],x$4: org.apache.hadoop.hbase.filter.CompareFilter.CompareOp,x$5: Array[Byte],x$6: org.apache.hadoop.hbase.client.Delete): Boolean = ???
def checkAndPut(x$1: Array[Byte],x$2: Array[Byte],x$3: Array[Byte],x$4: org.apache.hadoop.hbase.filter.CompareFilter.CompareOp,x$5: Array[Byte],x$6: org.apache.hadoop.hbase.client.Put): Boolean = ???
def existsAll(x$1: java.util.List[org.apache.hadoop.hbase.client.Get]): Array[Boolean] = ???
def hasFamily(family: Array[Byte]): Boolean = desc.hasFamily(family)
def getRow(row: Array[Byte]) = rows.get(row.toIndexedSeq)
def getFiltered(row: Array[Byte], family: Array[Byte], column: Option[Array[Byte]]) = {
getRow(row).map {
_.filter(cell => CellUtil.cloneFamily(cell).deep == family.deep &&
column.map(CellUtil.cloneQualifier(cell).deep == _.deep).getOrElse(true))
}.getOrElse(List())
}
def scanFilter(row: List[Cell], f: Cell => Boolean) = {
row.filter(f)
}
def scanFamily(row: List[Cell], family: Array[Byte]) = scanFilter(row, cell => CellUtil.cloneFamily(cell).deep == family.deep)
def scanColumn(row: List[Cell], family: Array[Byte], column: Array[Byte]) = scanFilter(row,
cell => CellUtil.cloneFamily(cell).deep == family.deep && CellUtil.cloneQualifier(cell).deep == column.deep)
def deleteRow(row: IndexedSeq[Byte]): Unit = rows -= row
def delete(row: IndexedSeq[Byte], f: Cell => Boolean): Unit = {
val results = rows.get(row).map {
cells =>
cells.filter(f)
}.getOrElse(List())
}
def deleteFamily(row: IndexedSeq[Byte], family: Array[Byte]) = {
val fam = family.deep
delete(row, CellUtil.cloneFamily(_).deep == fam)
}
def deleteColumn(row: IndexedSeq[Byte], family: Array[Byte], qualifier: Array[Byte]) = {
val fam = family.deep
val qua = qualifier.deep
delete(row, cell => CellUtil.cloneFamily(cell).deep == fam && CellUtil.cloneQualifier(cell).deep == qua)
}
/**
* Gets the fully qualified table name instance of this table.
*/
def getName: TableName = desc.getTableName
/**
* Returns the {@link Configuration} object used by this instance.
* <p>
* The reference returned is not a copy, so any change made to it will
* affect this instance.
*/
def getConfiguration: Configuration = throw NotImplementedOperation("getConfiguration not implemented in the Test implementation")
/**
* Gets the {@link HTableDescriptor table descriptor} for this table.
* @throws IOException if a remote or network exception occurs.
*/
def getTableDescriptor: HTableDescriptor = desc
/**
* Test for the existence of columns in the table, as specified by the Get.
* <p>
*
* This will return true if the Get matches one or more keys, false if not.
* <p>
*
* This is a server-side call so it prevents any data from being transfered to
* the client.
*
* @param get the Get
* @return true if the specified Get matches one or more keys, false if not
* @throws IOException e
*/
def exists(get: Get): Boolean = {
get.getFamilyMap().exists{
case (family, List()) =>
hasFamily(family) && !getFiltered(get.getRow(), family, None).isEmpty
case (family, columns) =>
hasFamily(family) && columns.exists(column => !getFiltered(get.getRow(), family, Some(column)).isEmpty)
}
}
/**
* Test for the existence of columns in the table, as specified by the Gets.
* <p>
*
* This will return an array of booleans. Each value will be true if the related Get matches
* one or more keys, false if not.
* <p>
*
* This is a server-side call so it prevents any data from being transfered to
* the client.
*
* @param gets the Gets
* @return Array of Boolean true if the specified Get matches one or more keys, false if not
* @throws IOException e
*/
def exists(gets: java.util.List[Get]): Array[java.lang.Boolean] = gets.map(get => new java.lang.Boolean(exists(get))).toArray
/**
* Method that does a batch call on Deletes, Gets, Puts, Increments, Appends and RowMutations.
* The ordering of execution of the actions is not defined. Meaning if you do a Put and a
* Get in the same {@link #batch} call, you will not necessarily be
* guaranteed that the Get returns what the Put had put.
*
* @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
* @param results Empty Object[], same size as actions. Provides access to partial
* results, in case an exception is thrown. A null in the result array means that
* the call for that action failed, even after retries
* @throws IOException
* @since 0.90.0
*/
def batch(actions: java.util.List[_ <: Row], results: Array[Object]): Unit = throw NotImplementedOperation("batch not implemented in the Test implementation")
/**
* Same as {@link #batch(List, Object[])}, but returns an array of
* results instead of using a results parameter reference.
*
* @param actions list of Get, Put, Delete, Increment, Append, RowMutations objects
* @return the results from the actions. A null in the return array means that
* the call for that action failed, even after retries
* @throws IOException
* @since 0.90.0
*/
def batch(actions: java.util.List[_ <: Row]): Array[Object] = throw NotImplementedOperation("batch not implemented in the Test implementation")
/**
* Same as {@link #batch(List, Object[])}, but with a callback.
* @since 0.96.0
*/
def batchCallback[R](
actions: java.util.List[_ <: Row], results: Array[Object], callback: Batch.Callback[R]
): Unit = throw NotImplementedOperation("batchCallback not implemented in the Test implementation")
/**
* Same as {@link #batch(List)}, but with a callback.
* @since 0.96.0
*/
def batchCallback[R](
actions: java.util.List[_ <: Row], callback: Batch.Callback[R]
): Array[Object] = throw NotImplementedOperation("batchCallback not implemented in the Test implementation")
/**
* Extracts certain cells from a given row.
* @param get The object that specifies what data to fetch and from which row.
* @return The data coming from the specified row, if it exists. If the row
* specified doesn't exist, the {@link Result} instance returned won't
* contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def get(get: Get): Result = {
val familyMap = get.getFamilyMap()
val rowkey = get.getRow
val cells = if (familyMap.isEmpty) {
getRow(rowkey).getOrElse(List())
} else {
familyMap.flatMap {
case (family, null) => getFiltered(rowkey, family, None)
case (family, columns) => columns.flatMap(column => getFiltered(get.getRow, family, Some(column)))
}
}
Result.create(cells.toSeq)
}
/**
* Extracts certain cells from the given rows, in batch.
*
* @param gets The objects that specify what data to fetch and from which rows.
*
* @return The data coming from the specified rows, if it exists. If the row
* specified doesn't exist, the {@link Result} instance returned won't
* contain any {@link KeyValue}, as indicated by {@link Result#isEmpty()}.
* If there are any failures even after retries, there will be a null in
* the results array for those Gets, AND an exception will be thrown.
* @throws IOException if a remote or network exception occurs.
*
* @since 0.90.0
*/
def get(gets: java.util.List[Get]): Array[Result] = gets.map(get).toArray
/**
* Return the row that matches <i>row</i> exactly,
* or the one that immediately precedes it.
*
* @param row A row key.
* @param family Column family to include in the {@link Result}.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*
* @deprecated As of version 0.92 this method is deprecated without
* replacement.
* getRowOrBefore is used internally to find entries in hbase:meta and makes
* various assumptions about the table (which are true for hbase:meta but not
* in general) to be efficient.
*/
def getRowOrBefore(row: Array[Byte], family: Array[Byte]): Result = throw NotImplementedOperation("getRowOrBefore not implemented in the Test implementation")
/**
* Returns a scanner on the current table as specified by the {@link Scan}
* object.
* Note that the passed {@link Scan}'s start row and caching properties
* maybe changed.
*
* @param scan A configured {@link Scan} object.
* @return A scanner.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def getScanner(scan: Scan): ResultScanner = {
import org.apache.hadoop.hbase.util.Bytes
val familyMap = scan.getFamilyMap
val results = if (familyMap.isEmpty) {
rows.values
} else {
rows.values.map {row =>
familyMap.flatMap {
case (family, null) => scanFamily(row, family)
case (family, columns) => columns.flatMap(column => scanColumn(row, family, column))
}.toList
}.filterNot(_.isEmpty)
}.filter(row => {
val firstCell = row.head
val rowkey = firstCell.getRowArray.drop(firstCell.getRowOffset).take(firstCell.getRowLength)
val startRow = Option(scan.getStartRow)
val comp = Bytes.compareTo(rowkey, startRow.getOrElse(Array.empty))
comp > 0
})
new ResultScanner {
import scala.collection.JavaConversions.asJavaIterator
protected var iterator: java.util.Iterator[Result] = results.iterator.map(Result.create(_))
def next: Result = {
if (iterator.hasNext) iterator.next
else null
}
def next(nbRows: Int) = {
iterator.take(nbRows).toArray
}
def close: Unit = iterator = Iterator.empty
}
}
/**
* Gets a scanner on the current table for the given family.
*
* @param family The column family to scan.
* @return A scanner.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def getScanner(family: Array[Byte]): ResultScanner = {
getScanner(new Scan().addFamily(family))
}
/**
* Gets a scanner on the current table for the given family and qualifier.
*
* @param family The column family to scan.
* @param qualifier The column qualifier to scan.
* @return A scanner.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def getScanner(family: Array[Byte], qualifier: Array[Byte]): ResultScanner = {
getScanner(new Scan().addColumn(family,qualifier))
}
/**
* Puts some data in the table.
* <p>
* If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
* until the internal buffer is full.
* @param put The data to put.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def put(put: Put): Unit = {
val cells = put.getFamilyCellMap().flatMap {
case (family, cells) =>
if (desc.hasFamily(family)) cells
else throw new IOException("Family not defined")
}
if (!cells.isEmpty) {
val row = put.getRow.toIndexedSeq
rows = rows + (row -> (rows.getOrElse(row, List()) ++ cells))
}
}
/**
* Puts some data in the table, in batch.
* <p>
* If {@link #isAutoFlush isAutoFlush} is false, the update is buffered
* until the internal buffer is full.
* <p>
* This can be used for group commit, or for submitting user defined
* batches. The writeBuffer will be periodically inspected while the List
* is processed, so depending on the List size the writeBuffer may flush
* not at all, or more than once.
* @param puts The list of mutations to apply. The batch put is done by
* aggregating the iteration of the Puts over the write buffer
* at the client-side for a single RPC call.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def put(puts: java.util.List[Put]): Unit = puts.foreach(put)
private def checkAnd(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
value: Array[Byte], f: => Unit): Boolean = {
val ok = (getFiltered(row, family, Some(qualifier)), value) match {
case (List(), null) => true
case (_, null) => false
case (list, _) => CellUtil.cloneValue(list.maxBy(_.getTimestamp)).deep == value.deep
}
if (ok) f
ok
}
/**
* Atomically checks if a row/family/qualifier value matches the expected
* value. If it does, it adds the put. If the passed value is null, the check
* is for the lack of column (ie: non-existance)
*
* @param row to check
* @param family column family to check
* @param qualifier column qualifier to check
* @param value the expected value
* @param put data to put if check succeeds
* @throws IOException e
* @return true if the new put was executed, false otherwise
*/
def checkAndPut(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
value: Array[Byte], putData: Put): Boolean =
checkAnd(row, family, qualifier, value, put(putData))
/**
* Deletes the specified cells/row.
*
* @param delete The object that specifies what to delete.
* @throws IOException if a remote or network exception occurs.
* @since 0.20.0
*/
def delete(delete: Delete): Unit = {
val familyMap = delete.getFamilyCellMap()
val row = delete.getRow.toIndexedSeq
if (familyMap.isEmpty) deleteRow(row)
else familyMap.foreach {
case (family, null) => deleteFamily(row, family)
case (family, list) if list.isEmpty() => deleteFamily(row, family)
case (family, cells) => cells.foreach(cell => deleteColumn(row, family, CellUtil.cloneQualifier(cell)))
}
}
/**
* Deletes the specified cells/rows in bulk.
* @param deletes List of things to delete. List gets modified by this
* method (in particular it gets re-ordered, so the order in which the elements
* are inserted in the list gives no guarantee as to the order in which the
* {@link Delete}s are executed).
* @throws IOException if a remote or network exception occurs. In that case
* the {@code deletes} argument will contain the {@link Delete} instances
* that have not be successfully applied.
* @since 0.20.1
*/
def delete(deletes: java.util.List[Delete]): Unit =
deletes.foreach(delete)
/**
* Atomically checks if a row/family/qualifier value matches the expected
* value. If it does, it adds the delete. If the passed value is null, the
* check is for the lack of column (ie: non-existance)
*
* @param row to check
* @param family column family to check
* @param qualifier column qualifier to check
* @param value the expected value
* @param deleteData data to delete if check succeeds
* @throws IOException e
* @return true if the new delete was executed, false otherwise
*/
def checkAndDelete(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
value: Array[Byte], deleteData: Delete): Boolean =
checkAnd(row, family, qualifier, value, delete(deleteData))
/**
* Performs multiple mutations atomically on a single row. Currently
* {@link Put} and {@link Delete} are supported.
*
* @param rm object that specifies the set of mutations to perform atomically
* @throws IOException
*/
def mutateRow(rm: RowMutations): Unit = ???
/**
* Appends values to one or more columns within a single row.
* <p>
* This operation does not appear atomic to readers. Appends are done
* under a single row lock, so write operations to a row are synchronized, but
* readers do not take row locks so get and scan operations can see this
* operation partially completed.
*
* @param append object that specifies the columns and amounts to be used
* for the increment operations
* @throws IOException e
* @return values of columns after the append operation (maybe null)
*/
def append(append: Append): Result = ???
/**
* Increments one or more columns within a single row.
* <p>
* This operation does not appear atomic to readers. Increments are done
* under a single row lock, so write operations to a row are synchronized, but
* readers do not take row locks so get and scan operations can see this
* operation partially completed.
*
* @param increment object that specifies the columns and amounts to be used
* for the increment operations
* @throws IOException e
* @return values of columns after the increment
*/
def increment(increment: Increment): Result = ???
/**
* See {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
* <p>
* The {@link Durability} is defaulted to {@link Durability#SYNC_WAL}.
* @param row The row that contains the cell to increment.
* @param family The column family of the cell to increment.
* @param qualifier The column qualifier of the cell to increment.
* @param amount The amount to increment the cell with (or decrement, if the
* amount is negative).
* @return The new value, post increment.
* @throws IOException if a remote or network exception occurs.
*/
def incrementColumnValue(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
amount: Long): Long = ???
/**
* Atomically increments a column value. If the column value already exists
* and is not a big-endian long, this could throw an exception. If the column
* value does not yet exist it is initialized to <code>amount</code> and
* written to the specified column.
*
* <p>Setting durability to {@link Durability#SKIP_WAL} means that in a fail
* scenario you will lose any increments that have not been flushed.
* @param row The row that contains the cell to increment.
* @param family The column family of the cell to increment.
* @param qualifier The column qualifier of the cell to increment.
* @param amount The amount to increment the cell with (or decrement, if the
* amount is negative).
* @param durability The persistence guarantee for this increment.
* @return The new value, post increment.
* @throws IOException if a remote or network exception occurs.
*/
def incrementColumnValue(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
amount: Long, durability: Durability): Long = ???
/**
* @deprecated Use {@link #incrementColumnValue(byte[], byte[], byte[], long, Durability)}
*/
@Deprecated
def incrementColumnValue(row: Array[Byte], family: Array[Byte], qualifier: Array[Byte],
amount: Long, writeToWAL: Boolean) : Long = ???
/**
* Tells whether or not 'auto-flush' is turned on.
*
* @return {@code true} if 'auto-flush' is enabled (default), meaning
* {@link Put} operations don't get buffered/delayed and are immediately
* executed.
*/
def isAutoFlush(): Boolean = true
/**
* Executes all the buffered {@link Put} operations.
* <p>
* This method gets called once automatically for every {@link Put} or batch
* of {@link Put}s (when <code>put(List<Put>)</code> is used) when
* {@link #isAutoFlush} is {@code true}.
* @throws IOException if a remote or network exception occurs.
*/
def flushCommits(): Unit = {}
/**
* Releases any resources held or pending changes in internal buffers.
*
* @throws IOException if a remote or network exception occurs.
*/
def close(): Unit = {}
/**
* Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the
* table region containing the specified row. The row given does not actually have
* to exist. Whichever region would contain the row based on start and end keys will
* be used. Note that the {@code row} parameter is also not passed to the
* coprocessor handler registered for this protocol, unless the {@code row}
* is separately passed as an argument in the service request. The parameter
* here is only used to locate the region used to handle the call.
*
* <p>
* The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published
* coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations:
* </p>
*
* <div style="background-color: #cccccc; padding: 2px">
* <blockquote><pre>
* CoprocessorRpcChannel channel = myTable.coprocessorService(rowkey);
* MyService.BlockingInterface service = MyService.newBlockingStub(channel);
* MyCallRequest request = MyCallRequest.newBuilder()
* ...
* .build();
* MyCallResponse response = service.myCall(null, request);
* </pre></blockquote></div>
*
* @param row The row key used to identify the remote region location
* @return A CoprocessorRpcChannel instance
*/
@InterfaceAudience.Private // TODO add coproc audience level
def coprocessorService(row: Array[Byte]): CoprocessorRpcChannel = ???
/**
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
* and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method with each {@link Service}
* instance.
*
* @param service the protocol buffer {@code Service} implementation to call
* @param startKey start region selection with region containing this row. If {@code null}, the
* selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row.
* If {@code null}, selection will continue through the last table region.
* @param callable this instance's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method will be invoked once per table region, using the {@link Service}
* instance connected to that region.
* @param <T> the {@link Service} subclass to connect to
* @param <R> Return type for the {@code callable} parameter's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
* @return a map of result values keyed by region name
*/
@InterfaceAudience.Private // TODO add coproc audience level
def coprocessorService[T <: Service, R](service: Class[T],
startKey: Array[Byte], endKey: Array[Byte], callable: Batch.Call[T,R]): java.util.Map[Array[Byte],R] = ???
/**
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
* and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method with each {@link Service} instance.
*
* <p>
* The given
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
* method will be called with the return value from each region's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
*</p>
*
* @param service the protocol buffer {@code Service} implementation to call
* @param startKey start region selection with region containing this row. If {@code null}, the
* selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row.
* If {@code null}, selection will continue through the last table region.
* @param callable this instance's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
* will be invoked once per table region, using the {@link Service} instance
* connected to that region.
* @param callback
* @param <T> the {@link Service} subclass to connect to
* @param <R> Return type for the {@code callable} parameter's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
*/
@InterfaceAudience.Private // TODO add coproc audience level
def coprocessorService[T <: Service, R](service: Class[T],
startKey: Array[Byte], endKey: Array[Byte], callable: Batch.Call[T,R],
callback: Batch.Callback[R]): Unit = ???
/**
* See {@link #setAutoFlush(boolean, boolean)}
*
* @param autoFlush
* Whether or not to enable 'auto-flush'.
* @deprecated in 0.96. When called with setAutoFlush(false), this function also
* set clearBufferOnFail to true, which is unexpected but kept for historical reasons.
* Replace it with setAutoFlush(false, false) if this is exactly what you want, or by
* {@link #setAutoFlushTo(boolean)} for all other cases.
*/
@Deprecated
def setAutoFlush(autoFlush: Boolean): Unit = ???
/**
* Turns 'auto-flush' on or off.
* <p>
* When enabled (default), {@link Put} operations don't get buffered/delayed
* and are immediately executed. Failed operations are not retried. This is
* slower but safer.
* <p>
* Turning off {@code #autoFlush} means that multiple {@link Put}s will be
* accepted before any RPC is actually sent to do the write operations. If the
* application dies before pending writes get flushed to HBase, data will be
* lost.
* <p>
* When you turn {@code #autoFlush} off, you should also consider the
* {@code #clearBufferOnFail} option. By default, asynchronous {@link Put}
* requests will be retried on failure until successful. However, this can
* pollute the writeBuffer and slow down batching performance. Additionally,
* you may want to issue a number of Put requests and call
* {@link #flushCommits()} as a barrier. In both use cases, consider setting
* clearBufferOnFail to true to erase the buffer after {@link #flushCommits()}
* has been called, regardless of success.
* <p>
* In other words, if you call {@code #setAutoFlush(false)}; HBase will retry N time for each
* flushCommit, including the last one when closing the table. This is NOT recommended,
* most of the time you want to call {@code #setAutoFlush(false, true)}.
*
* @param autoFlush
* Whether or not to enable 'auto-flush'.
* @param clearBufferOnFail
* Whether to keep Put failures in the writeBuffer. If autoFlush is true, then
* the value of this parameter is ignored and clearBufferOnFail is set to true.
* Setting clearBufferOnFail to false is deprecated since 0.96.
* @see #flushCommits
*/
def setAutoFlush(autoFlush: Boolean, clearBufferOnFail: Boolean): Unit = {
// Doesn't do anything. Autoflush is always true in this implementation.
}
/**
* Set the autoFlush behavior, without changing the value of {@code clearBufferOnFail}
*/
def setAutoFlushTo(autoFlush: Boolean): Unit = {
// Doesn't do anything. Autoflush is always true in this implementation.
}
/**
* Returns the maximum size in bytes of the write buffer for this HTable.
* <p>
* The default value comes from the configuration parameter
* {@code hbase.client.write.buffer}.
* @return The size of the write buffer in bytes.
*/
def getWriteBufferSize(): Long = 4096L * 1024L
/**
* Sets the size of the buffer in bytes.
* <p>
* If the new size is less than the current amount of data in the
* write buffer, the buffer gets flushed.
* @param writeBufferSize The new write buffer size, in bytes.
* @throws IOException if a remote or network exception occurs.
*/
def setWriteBufferSize(writeBufferSize: Long) = ???
/* New method in 0.98.10.1 */
def batchCoprocessorService[R <: com.google.protobuf.Message](x$1: com.google.protobuf.Descriptors.MethodDescriptor,x$2: com.google.protobuf.Message,x$3: Array[Byte],x$4: Array[Byte],x$5: R,x$6: org.apache.hadoop.hbase.client.coprocessor.Batch.Callback[R]): Unit = ???
/* New method in 0.98.10.1 */
def batchCoprocessorService[R <: com.google.protobuf.Message](x$1: com.google.protobuf.Descriptors.MethodDescriptor,x$2: com.google.protobuf.Message,x$3: Array[Byte],x$4: Array[Byte],x$5: R): java.util.Map[Array[Byte],R] = ???
/* New method in 0.98.10.1 */
def checkAndMutate(x$1: Array[Byte],x$2: Array[Byte],x$3: Array[Byte],x$4: org.apache.hadoop.hbase.filter.CompareFilter.CompareOp,x$5: Array[Byte],x$6: org.apache.hadoop.hbase.client.RowMutations): Boolean = ???
}
|
eric-leblouch/htalk
|
src/test/scala/com/ubeeko/htalk/hbase/TestHBaseManager.scala
|
Scala
|
apache-2.0
| 33,266 |
/**
* Copyright 2014 Getty Imges, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tecsisa.akka.http.swagger.samples
import com.wordnik.swagger.annotations.ApiModelProperty
import com.wordnik.swagger.annotations.ApiModel
import scala.annotation.meta.field
import org.joda.time.DateTime
import java.util.Date
object SwaggerModelBuilderSpecValues {
final val TestModelDescription = "hello world, goodbye!"
final val NameDescription = "name123"
final val CountDescription = "count3125"
final val IsStaleDescription = "isStale9325"
final val OffsetDescription = "offestDescription9034"
final val NodesDescription = "nodesDescription9043"
final val EnumDescription = "enumDescription2135432"
final val StartDateDescription = "startDateDescription294290"
final val EndDateDescription = "endDateDescription294290"
final val AmountDescription = "amountDescription222"
final val AllowableDescription = "allowableDesciption"
}
@ApiModel(description = "an entry in the dictionary")
case class DictEntry(
val key: String,
val value: String,
val expire: Option[Long]
)
import SwaggerModelBuilderSpecValues._
case class TestModelWithNoAnnotation()
@Deprecated
case class TestModelWithWrongAnnotation()
@ApiModel
case class TestModelEmptyAnnotation()
@ApiModel
sealed trait TestModelParent {
}
@ApiModel(description = TestModelDescription)
case class TestModel(
@(ApiModelProperty @field)(value = NameDescription)
name: String,
@(ApiModelProperty @field)(value = CountDescription)
count: Int,
@(ApiModelProperty @field)(value = IsStaleDescription)
isStale: Boolean,
@(ApiModelProperty @field)(value = OffsetDescription)
offset: Option[Int] = None,
@(ApiModelProperty @field)(value = NodesDescription)
nodes: List[TestModelNode] = List[TestModelNode](),
@(ApiModelProperty @field)(value = EnumDescription)
enum: TestEnum.TestEnum = TestEnum.AEnum,
@(ApiModelProperty @field)(value = StartDateDescription)
startDate: Date,
@(ApiModelProperty @field)(value = EndDateDescription)
endDate: DateTime,
noAnnotationProperty: String,
secondNoAnnotationProperty: String,
@(ApiModelProperty @field)(value = AllowableDescription, allowableValues="first, second") allowable: String
) extends TestModelParent
@ApiModel(description = TestModelDescription)
case class ModelWithCustomPropertyDatatypes(
@(ApiModelProperty @field)(value = CountDescription, dataType = "long")
count: BigInt,
@(ApiModelProperty @field)(value = IsStaleDescription, dataType = "boolean")
isStale: Any,
@(ApiModelProperty @field)(value = OffsetDescription, dataType = "array[int]")
offset: Iterable[(Int, Boolean)],
@(ApiModelProperty @field)(value = EndDateDescription, dataType = "date", required = false)
endDate: Option[String],
@(ApiModelProperty @field)(value = NameDescription, dataType = "CustomType", required = false)
nonDefaultTypeField: Option[String],
@(ApiModelProperty @field)(value = NameDescription, dataType = "CustomContainer[string]", required = false)
nonDefaultContainerTypeField: Option[String],
@(ApiModelProperty @field)(value = AmountDescription, dataType="BigDecimal")
amount: BigDecimal
)
@ApiModel(description = "ModelBase")
class ModelBase {
@(ApiModelProperty @field)(value = NameDescription)
val name: String = ""
}
@ApiModel(description = "ModelExtension", parent = classOf[ModelBase])
class ModelExtension extends ModelBase {
@(ApiModelProperty @field)(value = EndDateDescription)
val date: Date = DateTime.now().toDate
}
object TestEnum extends Enumeration {
type TestEnum = Value
val AEnum = Value("a")
val BEnum = Value("b")
}
@ApiModel
case class TestModelNode(
value: Option[String]
)
case class A() extends Letter
case class B() extends Letter
@ApiModel(
subTypes = Array(classOf[String], classOf[B])
)
abstract class Letter
@ApiModel
case class TestModelPositions(
@(ApiModelProperty @field)(position = 0, value = "") arg0: String,
@(ApiModelProperty @field)(position = 1, value = "") arg1: String,
@(ApiModelProperty @field)(position = 2, value = "") arg2: String,
@(ApiModelProperty @field)(position = 3, value = "") arg3: String
)
|
Tecsisa/akka-http-swagger
|
src/test/scala/com/tecsisa/akka/http/swagger/samples/Models.scala
|
Scala
|
apache-2.0
| 4,703 |
package org.jetbrains.plugins.scala.lang.psi.impl.expr
import com.intellij.lang.ASTNode
import com.intellij.psi.{PsiElement, PsiElementVisitor}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* Author: Alexander Podkhalyuzin
* Date: 06.03.2008
*/
class ScCatchBlockImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScCatchBlock {
override def toString: String = "CatchBlock"
override def accept(visitor: ScalaElementVisitor) {
visitor.visitCatchBlock(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitCatchBlock(this)
case _ => super.accept(visitor)
}
}
def getLeftParenthesis: Option[PsiElement] = {
val leftParenthesis = findChildByType[PsiElement](ScalaTokenTypes.tLPARENTHESIS)
if (leftParenthesis == null) None else Some(leftParenthesis)
}
def getRightParenthesis: Option[PsiElement] = {
val rightParenthesis = findChildByType[PsiElement](ScalaTokenTypes.tRPARENTHESIS)
if (rightParenthesis == null) None else Some(rightParenthesis)
}
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScCatchBlockImpl.scala
|
Scala
|
apache-2.0
| 1,286 |
package com.avsystem.commons
package macros.misc
import com.avsystem.commons.macros.AbstractMacroCommons
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.reflect.macros.{blackbox, whitebox}
import scala.util.control.NoStackTrace
final class MiscMacros(ctx: blackbox.Context) extends AbstractMacroCommons(ctx) {
import c.universe._
lazy val MaterializeWithAT: Type = getType(tq"$CommonsPkg.meta.MacroInstances.materializeWith")
def infer[T: WeakTypeTag]: Tree =
instrument(inferTpe(weakTypeOf[T], "", NoPosition, withMacrosDisabled = false))
def clueInfer[T: WeakTypeTag](clue: Tree): Tree =
instrument(inferTpe(weakTypeOf[T], clueStr(clue), clue.pos, withMacrosDisabled = false))
def inferNonMacro[T: WeakTypeTag](clue: Tree): Tree =
instrument(inferTpe(weakTypeOf[T], clueStr(clue), clue.pos, withMacrosDisabled = true))
private def clueStr(clue: Tree): String = clue match {
case StringLiteral(str) => str
case _ => abort(s"clue must be a String literal, $clue is not")
}
private def inferTpe(tpe: Type, clue: String, pos: Position, withMacrosDisabled: Boolean): Tree =
inferImplicitValue(tpe, withMacrosDisabled = withMacrosDisabled, expandMacros = true) match {
case EmptyTree => abortAt(clue + implicitNotFoundMsg(tpe), pos)
case t => t
}
def sourceInfo: Tree = {
def enclosingSymName(sym: Symbol) =
sym.filter(_.isTerm).map(_.asTerm.getter).orElse(sym).name.decodedName.toString
val pos = c.enclosingPosition
q"""
$MiscPkg.SourceInfo(
${pos.source.path},
${pos.source.file.name},
${pos.point},
${pos.line},
${pos.column},
${pos.source.lineToString(pos.line - 1)},
$ListObj(..${ownerChain.takeWhile(_ != rootMirror.RootClass).map(enclosingSymName)})
)
"""
}
def crossImpl(forJvm: Tree, forJs: Tree): Tree =
if (isScalaJs) forJs else forJvm
def enumValName: Tree = {
def omitAnonClass(owner: Symbol): Symbol =
if (owner.isConstructor && owner.owner.name.toString.contains("$anon"))
owner.owner.owner
else owner
val owner = omitAnonClass(c.internal.enclosingOwner)
val valid = owner.isTerm && owner.owner == c.prefix.tree.symbol && {
val term = owner.asTerm
term.isVal && term.isFinal && !term.isLazy && term.getter.isPublic &&
term.typeSignature <:< getType(tq"${c.prefix}.Value")
}
if (!valid) {
abort("ValueEnum must be assigned to a public, final, non-lazy val in its companion object " +
"with explicit `Value` type annotation, e.g. `final val MyEnumValue: Value = new MyEnumClass")
}
q"new ${c.prefix}.ValName(${owner.asTerm.getter.name.decodedName.toString})"
}
def compilationError(error: Tree): Tree = error match {
case StringLiteral(errorStr) =>
abortAt(errorStr, error.pos)
case t =>
c.abort(t.pos, "Expected string literal here")
}
case class NonConcreteTypeException(tpe: Type) extends RuntimeException with NoStackTrace
def javaClassName[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T].dealias
if (tpe.typeSymbol.isClass && tpe.typeSymbol != definitions.ArrayClass)
q"new $MiscPkg.JavaClassName(${javaClassName(tpe.erasure.typeSymbol)})"
else
abort(s"$tpe does not represent a regular class")
}
private def javaClassName(sym: Symbol): String = {
val nameSuffix = if (sym.isModuleClass && !sym.isPackageClass) "$" else ""
val selfName = sym.name.encodedName.toString + nameSuffix
val owner = sym.owner
val prefix =
if (owner == rootMirror.RootClass) ""
else if (owner.isPackageClass) javaClassName(owner) + "."
else if (owner.isModuleClass) javaClassName(owner)
else javaClassName(owner) + "$"
prefix + selfName
}
def typeString[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T]
try typeStringParts(tpe) match {
case List(Select(pre, TermName("value"))) => pre
case trees => q"new $MiscPkg.TypeString[$tpe](${mkStringConcat(trees)})"
} catch {
case NonConcreteTypeException(stpe) =>
abort(s"Could not materialize TypeString for $tpe because instance for $stpe is lacking")
}
}
private val allowedSymbols: mutable.Set[Symbol] = new mutable.HashSet
def withAllowed[T](tparams: List[Symbol])(code: => T): T = {
allowedSymbols ++= tparams
try code finally {
allowedSymbols --= tparams
}
}
private def maybeParens(repr: List[Tree], parens: Boolean): List[Tree] =
if (parens) lit("(") :: repr ::: lit(")") :: Nil
else repr
def typeStringParts(tpe: Type, parens: Boolean = false): List[Tree] = {
val resultTpe = getType(tq"$MiscPkg.TypeString[$tpe]")
inferImplicitValue(resultTpe, withMacrosDisabled = true) match {
case EmptyTree => mkTypeString(tpe, parens)
case tree => maybeParens(List(q"$tree.value"), parens)
}
}
def mkStringConcat(trees: List[Tree]): Tree = trees match {
case Nil => StringLiteral("")
case single :: Nil => single
case StringLiteral(str1) :: StringLiteral(str2) :: tail =>
mkStringConcat(StringLiteral(str1 + str2) :: tail)
case head :: tail =>
q"$head+${mkStringConcat(tail)}"
}
def join(trees: List[List[Tree]], sep: String): List[Tree] = trees match {
case Nil => Nil
case List(single) => single
case head :: tail => head ::: lit(sep) :: join(tail, sep)
}
def lit(str: String): Tree =
StringLiteral(str)
private def isOpChar(ch: Char): Boolean =
ch != '`' && !ch.isLetterOrDigit
private def isOpSafe(ch: Char): Boolean =
ch == '.' || ch.isWhitespace
def mkNameString(name: Name, prefix: String = "", suffix: String = ""): String = {
val nameRepr = showCode(Ident(name))
val afterPrefix = if (prefix.nonEmpty && !isOpSafe(prefix.last) && isOpChar(nameRepr.head)) " " else ""
val beforeSuffix = if (suffix.nonEmpty && !isOpSafe(suffix.head) && isOpChar(nameRepr.last)) " " else ""
s"$prefix$afterPrefix$nameRepr$beforeSuffix$suffix"
}
def isRefTo(quantified: Symbol, arg: Type): Boolean = arg match {
case TypeRef(NoPrefix, `quantified`, Nil) => true
case _ => false
}
def areIndependent(quantified: List[Symbol]): Boolean =
quantified.forall(first => quantified.forall(second => !first.typeSignature.contains(second)))
def mkMemberDefString(s: Symbol, wildcard: Boolean = false): List[Tree] = s match {
case ExistentialSingleton(_, name, sig) =>
lit(s"val ${mkNameString(name, suffix = ": ")}") :: mkTypeString(sig)
case ts: TypeSymbol =>
val variance = if (ts.isCovariant) "+" else if (ts.isContravariant) "-" else ""
val beforeName = if (ts.isParameter) variance else "type "
val finalPrefix = if (ts.isParameter) "" else " = "
val baseDecl = if (wildcard) "_" else mkNameString(ts.name, prefix = beforeName)
lit(baseDecl) :: mkSignatureString(ts.typeSignature, finalPrefix)
case ts: TermSymbol =>
val sig = ts.typeSignature
val paramless = sig.typeParams.isEmpty && sig.paramLists.isEmpty
val beforeName =
if (ts.isParameter) ""
else if (ts.isGetter && ts.setter != NoSymbol) "var "
else if (ts.isGetter) "val "
else "def "
val baseDecl = mkNameString(ts.name, prefix = beforeName, suffix = if (paramless) ": " else "")
lit(baseDecl) :: mkSignatureString(sig, if (paramless) "" else ": ")
}
def mkSignatureString(sig: Type, finalPrefix: String): List[Tree] = sig match {
case TypeBounds(lo, hi) =>
val loRepr =
if (lo =:= typeOf[Nothing]) Nil
else lit(" >: ") :: typeStringParts(lo)
val hiRepr =
if (hi =:= typeOf[Any]) Nil
else lit(" <: ") :: typeStringParts(hi)
loRepr ++ hiRepr
case PolyType(tparams, resultType) => withAllowed(tparams) {
lit("[") :: join(tparams.map(mkMemberDefString(_)), ", ") ::: lit("]") ::
mkSignatureString(resultType, finalPrefix)
}
case MethodType(params, resultType) =>
val pre = if (params.headOption.exists(_.isImplicit)) "(implicit " else "("
lit(pre) :: join(params.map(mkMemberDefString(_)), ", ") ::: lit(")") ::
withAllowed(params)(mkSignatureString(resultType, finalPrefix))
case NullaryMethodType(resultType) =>
mkSignatureString(resultType, finalPrefix)
case _ =>
lit(finalPrefix) :: mkTypeString(sig)
}
private val autoImported: Set[Symbol] = Set(
definitions.ScalaPackage,
definitions.JavaLangPackage,
definitions.PredefModule,
rootMirror.RootPackage
)
def isStaticPrefix(pre: Type): Boolean = pre match {
case SingleType(ppre, _) => isStaticPrefix(ppre)
case ThisType(sym) => sym.isPublic && sym.isStatic && sym.isModuleClass
case TypeRef(ppre, sym, Nil) => sym.isPublic && sym.isStatic && sym.isModuleClass && isStaticPrefix(ppre)
case _ => false
}
def isAllowedWithoutPrefix(s: Symbol): Boolean =
(s.isType && s.asType.isAliasType) || allowedSymbols.contains(s)
def mkTypeString(tpe: Type, parens: Boolean = false): List[Tree] = tpe match {
case _ if tpe =:= typeOf[AnyRef] => List(lit("AnyRef"))
case TypeRef(NoPrefix, ExistentialSingleton(_, name, _), Nil) =>
List(lit(mkNameString(name)))
case TypeRef(_, sym, List(arg)) if sym == definitions.ByNameParamClass =>
val res = lit("=> ") :: typeStringParts(arg)
maybeParens(res, parens)
case TypeRef(_, sym, List(arg)) if sym == definitions.RepeatedParamClass =>
val argRepr = typeStringParts(arg)
val starSafe = argRepr.last match {
case StringLiteral(s) => s.charAt(s.length - 1) match {
case ']' | ')' | '}' | '`' => true
case ch => ch.isLetterOrDigit
}
case _ => false
}
argRepr ::: lit(if (starSafe) "*" else " *") :: Nil
case TypeRef(_, sym, args) if definitions.FunctionClass.seq.contains(sym) =>
val fargs = args.init
val fres = args.last
val fargsRes = fargs match {
case Nil => List(lit("()"))
case List(single) => typeStringParts(single, parens = true)
case _ => maybeParens(join(fargs.map(typeStringParts(_)), ", "), parens = true)
}
val res = fargsRes ::: lit(" => ") :: typeStringParts(fres)
maybeParens(res, parens)
case TypeRef(_, sym, args) if definitions.TupleClass.seq.contains(sym) =>
lit("(") :: join(args.map(typeStringParts(_)), ", ") ::: lit(")") :: Nil
case TypeRef(pre, sym, args) if pre != NoPrefix || isAllowedWithoutPrefix(sym) =>
val dealiased = tpe.dealias
if (dealiased.typeSymbol != sym && !isStaticPrefix(pre))
mkTypeString(dealiased, parens = false)
else {
val argsReprs =
if (args.isEmpty) Nil
else lit("[") +: join(args.map(typeStringParts(_)), ", ") :+ lit("]")
mkTypePath(pre, sym.name) ::: argsReprs
}
case SingleType(pre, sym) if pre != NoPrefix || isAllowedWithoutPrefix(sym) =>
mkTypePath(pre, sym.name) :+ lit(".type")
case ThisType(sym) if sym.isStatic && sym.isModuleClass =>
List(lit(mkStaticPrefix(sym) + "type"))
case ExistentialType(quantified, TypeRef(pre, sym, args))
if quantified.corresponds(args)(isRefTo) && quantified.forall(s => !pre.contains(s)) && areIndependent(quantified) =>
withAllowed(quantified) {
val wildcards = lit("[") +: join(quantified.map(mkMemberDefString(_, wildcard = true)), ", ") :+ lit("]")
mkTypePath(pre, sym.name) ::: wildcards
}
case ExistentialType(quantified, underlying) =>
withAllowed(quantified) {
val typeDefs = join(quantified.map(mkMemberDefString(_)), "; ")
maybeParens(typeStringParts(underlying) ::: lit(" forSome {") :: typeDefs ::: lit("}") :: Nil, parens)
}
case RefinedType(bases, scope) =>
val basesRepr = bases match {
case List(anyref) if anyref =:= typeOf[AnyRef] => Nil
case _ => join(bases.map(typeStringParts(_)), " with ")
}
val filteredScope = scope.iterator.filter(s => !s.isTerm || !s.asTerm.isSetter).toList
val scopeRepr =
if (filteredScope.isEmpty) Nil
else {
val memberDefs = withAllowed(List(tpe.typeSymbol))(join(filteredScope.map(mkMemberDefString(_)), "; "))
lit("{") :: memberDefs ::: lit("}") :: Nil
}
val space = if (basesRepr.nonEmpty && scopeRepr.nonEmpty) " " else ""
maybeParens(basesRepr ::: lit(space) :: scopeRepr, parens)
case AnnotatedType(_, underlying) =>
mkTypeString(underlying, parens = false)
case _ =>
throw NonConcreteTypeException(tpe)
}
def mkStaticPrefix(sym: Symbol): String =
if (sym == rootMirror.RootClass) ""
else mkStaticPrefix(sym.owner) + mkNameString(sym.name) + "."
def mkTypePath(pre: Type, name: Name): List[Tree] = pre match {
case t if autoImported.contains(t.termSymbol) =>
List(lit(mkNameString(name)))
case NoPrefix =>
List(lit(mkNameString(name)))
case SingleType(pkg, sym) if sym.name == termNames.PACKAGE && pkg.typeSymbol.isPackageClass =>
mkTypePath(pkg, name)
case SingleType(ppre, sym) if ppre != NoPrefix || isAllowedWithoutPrefix(sym) =>
mkTypePath(ppre, sym.name) ::: lit("." + mkNameString(name)) :: Nil
case ThisType(sym) if sym.isStatic && sym.isModuleClass =>
List(lit(mkStaticPrefix(sym) + mkNameString(name)))
case ThisType(sym) if allowedSymbols.contains(sym) =>
List(lit(mkNameString(name)))
case TypeRef(NoPrefix, ExistentialSingleton(_, valName, _), Nil) =>
List(lit(mkNameString(valName) + "." + mkNameString(name)))
case _ =>
val singletonPrefix = pre.typeSymbol.isModuleClass || pre.termSymbol != NoSymbol
val selection = if (singletonPrefix) "." else "#"
mkTypeString(pre, parens = !singletonPrefix) :+ lit(mkNameString(name, prefix = selection))
}
def lazyMetadata(metadata: Tree): Tree =
q"${c.prefix}($metadata)"
def mkValueOf[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T].dealias
singleValueFor(tpe) match {
case Some(sv) => q"new $MiscPkg.ValueOf[$tpe]($sv)"
case None => abort(s"$tpe is not a singleton type")
}
}
def macroInstances: Tree = {
val resultTpe = c.macroApplication.tpe
val applySig = resultTpe.member(TermName("apply")).typeSignatureIn(resultTpe)
val implicitsTpe = applySig.paramLists.head.head.typeSignature
val instancesTpe = applySig.finalResultType
val instTs = instancesTpe.typeSymbol
if (!(instTs.isClass && instTs.isAbstract)) {
abort(s"Expected trait or abstract class type, got $instancesTpe")
}
val instancesMethods = instancesTpe.members.iterator
.filter(m => m.isAbstract && m.isMethod && !m.asTerm.isSetter).map(_.asMethod).toList.reverse
val CompanionParamName = c.freshName(TermName("companion"))
def impl(singleMethod: Option[Symbol]): Tree = {
val impls = instancesMethods.map { m =>
val sig = m.typeSignatureIn(instancesTpe)
val resultTpe = sig.finalResultType.dealias
val materializer =
if (singleMethod.exists(_ != m))
q"$PredefObj.???"
else findAnnotation(m, MaterializeWithAT) match {
case Some(annot) =>
val errorPos = annot.errorPos.getOrElse(c.enclosingPosition)
annot.tree match {
case Apply(_, List(prefix, macroNameTree)) =>
val macroName = macroNameTree match {
case StringLiteral(name) => name
case t if t.symbol.isSynthetic && t.symbol.name.decodedName == TermName("<init>$default$2") =>
"materialize"
case _ => abortAt("expected string literal as second argument of @materializeWith", errorPos)
}
q"$prefix.${TermName(macroName)}"
case _ =>
abortAt("bad @materializeWith annotation", errorPos)
}
case None =>
val resultCompanion = typedCompanionOf(resultTpe)
.getOrElse(abort(s"$resultTpe has no companion object with `materialize` macro"))
q"$resultCompanion.materialize"
}
val instTpeTree = treeForType(sig.finalResultType)
if (!m.isGetter) {
val tparamDefs = sig.typeParams.map(typeSymbolToTypeDef(_, forMethod = true))
val paramDefs = sig.paramLists.map(_.map(paramSymbolToValDef))
val argss = sig.paramLists match {
case List(Nil) => Nil
case paramss => paramss.filterNot(_.exists(_.isImplicit)).map(_.map(s => q"${s.name.toTermName}"))
}
q"def ${m.name}[..$tparamDefs](...$paramDefs): $instTpeTree = $materializer(...$argss)"
}
else if (m.isVar || m.setter != NoSymbol)
q"var ${m.name}: $instTpeTree = $materializer"
else
q"val ${m.name}: $instTpeTree = $materializer"
}
val implicitsName = c.freshName(TermName("implicits"))
def implicitImports(tpe: Type, expr: Tree): List[Tree] = {
val dtpe = tpe.dealias
if (dtpe =:= typeOf[Unit]) Nil
else if (definitions.TupleClass.seq.contains(dtpe.typeSymbol))
dtpe.typeArgs.zipWithIndex.flatMap {
case (ctpe, idx) => implicitImports(ctpe, q"$expr.${TermName(s"_${idx + 1}")}")
}
else List(q"import $expr._")
}
q"""
new $resultTpe {
def apply($implicitsName: $implicitsTpe, $CompanionParamName: Any): $instancesTpe = {
..${implicitImports(implicitsTpe, Ident(implicitsName))}
new $instancesTpe { ..$impls; () }
}
}
"""
}
//If full implementation doesn't typecheck, find the first problematic typeclass and limit
//compilation errors to that one in order to not overwhelm the user but rather report errors gradually
val fullImpl = impl(None)
debug(show(fullImpl))
val result = c.typecheck(fullImpl, silent = true) match {
case EmptyTree =>
instancesMethods.iterator.map(m => impl(Some(m)))
.find(t => c.typecheck(t, silent = true) == EmptyTree)
.getOrElse(fullImpl)
case t => t
}
enclosingConstructorCompanion match {
case NoSymbol => result
case companionSym =>
// Replace references to companion object being constructed with casted reference to
// `companion` parameter. All this horrible wiring is to workaround stupid overzealous Scala validation of
// self-reference being passed to super constructor parameter (https://github.com/scala/bug/issues/7666)
// We're going to replace some parts of already typechecked tree. This means we must insert already
// typechecked replacements.
val replacementDecl = result.find {
case ValDef(mods, CompanionParamName, _, EmptyTree) => mods.hasFlag(Flag.PARAM)
case _ => false
}
val replacementSym = replacementDecl.fold(NoSymbol)(_.symbol)
// must construct tree which is already fully typechecked
def replacementTree(orig: Tree): Tree = {
val replacementIdent = internal.setType(
internal.setSymbol(Ident(CompanionParamName), replacementSym),
internal.singleType(NoPrefix, replacementSym)
)
val asInstanceOfMethod = definitions.AnyTpe.member(TermName("asInstanceOf"))
val asInstanceOfSelect = internal.setType(
internal.setSymbol(Select(replacementIdent, asInstanceOfMethod), asInstanceOfMethod),
asInstanceOfMethod.info
)
val typeAppliedCast = internal.setType(
internal.setSymbol(TypeApply(asInstanceOfSelect, List(TypeTree(orig.tpe))), asInstanceOfMethod),
orig.tpe
)
typeAppliedCast
}
object replacer extends Transformer {
override def transform(tree: Tree): Tree = tree match {
case This(_) if tree.symbol == companionSym.asModule.moduleClass => replacementTree(tree)
case _ if tree.symbol == companionSym => replacementTree(tree)
case _ => super.transform(tree)
}
}
replacer.transform(result)
}
}
def posPoint: Tree =
q"${c.enclosingPosition.point}"
def applyUnapplyOrFail(tpe: Type): ApplyUnapply =
applyUnapplyFor(tpe).getOrElse(abort(
s"$tpe is not a case class or case-class like type: no matching apply/unapply pair found"))
def applyBody(rawValuesName: TermName, tpe: Type, au: ApplyUnapply): Tree = {
val args = au.params.zipWithIndex.map { case (param, idx) =>
val res = q"$rawValuesName($idx).asInstanceOf[${actualParamType(param.typeSignature)}]"
if (isRepeated(param)) q"$res: _*" else res
}
au.mkApply(args)
}
def unapplyBody(valueName: TermName, tpe: Type, au: ApplyUnapply): Tree = {
if (au.standardCaseClass) q"$ScalaPkg.Array(..${au.params.map(param => q"$valueName.$param")})"
else {
val companion = typedCompanionOf(tpe).getOrElse(EmptyTree)
val unapplyRes = q"$companion.${au.unapply}[..${tpe.typeArgs}]($valueName)"
au.params match {
case Nil => q"$ScalaPkg.Seq.empty[$ScalaPkg.Any]"
case List(_) => q"$ScalaPkg.Seq($unapplyRes.get)"
case _ =>
val resName = c.freshName(TermName("res"))
val elems = au.params.indices.map(i => q"$resName.${TermName(s"_${i + 1}")}")
q"""
val $resName = $unapplyRes.get
$CollectionPkg.compat.immutable.ArraySeq.unsafeWrapArray($ArrayObj[$ScalaPkg.Any](..$elems))
"""
}
}
}
def applier[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T].dealias
val rawValuesName = c.freshName(TermName("rawValues"))
q"""
new $MiscPkg.Applier[$tpe] {
def apply($rawValuesName: $ScalaPkg.Seq[$ScalaPkg.Any]): $tpe =
${applyBody(rawValuesName, tpe, applyUnapplyOrFail(tpe))}
}
"""
}
def unapplier[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T].dealias
val valueName = c.freshName(TermName("value"))
val au = applyUnapplyOrFail(tpe)
if (au.standardCaseClass && tpe <:< ProductTpe)
q"new $MiscPkg.ProductUnapplier[$tpe]"
else
q"""
new $MiscPkg.Unapplier[$tpe] {
def unapply($valueName: $tpe): $ScalaPkg.Seq[$ScalaPkg.Any] =
${unapplyBody(valueName, tpe, au)}
}
"""
}
def applierUnapplier[T: WeakTypeTag]: Tree = instrument {
val tpe = weakTypeOf[T].dealias
val rawValuesName = c.freshName(TermName("rawValues"))
val valueName = c.freshName(TermName("value"))
val au = applyUnapplyOrFail(tpe)
if (au.standardCaseClass && tpe <:< ProductTpe)
q"""
new $MiscPkg.ProductApplierUnapplier[$tpe] {
def apply($rawValuesName: $ScalaPkg.Seq[$ScalaPkg.Any]): $tpe =
${applyBody(rawValuesName, tpe, au)}
}
"""
else
q"""
new $MiscPkg.ApplierUnapplier[$tpe] {
def apply($rawValuesName: $ScalaPkg.Seq[$ScalaPkg.Any]): $tpe =
${applyBody(rawValuesName, tpe, au)}
def unapply($valueName: $tpe): $ScalaPkg.Seq[$ScalaPkg.Any] =
${unapplyBody(valueName, tpe, au)}
}
"""
}
def assertLocal(sym: Symbol): Symbol = {
if (sym.pos.source != c.enclosingPosition.source) {
abort(s"Macro inspection of $sym can only be done in the same source file where it is defined")
}
sym
}
def safeAnnotTree(annot: Annot): Tree = {
if (containsInaccessibleThises(annot.tree)) {
abortAt(s"Reified annotation ${annot.tree} contains inaccessible this-references", annot.tree.pos)
}
c.untypecheck(annot.tree)
}
def classSymbol(sym: Symbol): ClassSymbol = {
if (sym.isClass) sym.asClass
else abort(s"$sym is not a class")
}
def annotationOf[A: WeakTypeTag, T: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val tpe = weakTypeOf[T]
val sym = assertLocal(classSymbol(tpe.dealias.typeSymbol))
val annot = findAnnotation(sym, atpe)
.getOrElse(abort(s"No annotation of type $atpe found on $sym"))
q"$MiscPkg.AnnotationOf(${safeAnnotTree(annot)})"
}
def optAnnotationOf[A: WeakTypeTag, T: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val tpe = weakTypeOf[T]
val sym = assertLocal(classSymbol(tpe.dealias.typeSymbol))
val annotTree = findAnnotation(sym, atpe)
.fold[Tree](q"$CommonsPkg.Opt.Empty")(a => q"$CommonsPkg.Opt(${safeAnnotTree(a)})")
q"$MiscPkg.OptAnnotationOf($annotTree)"
}
def annotationsOf[A: WeakTypeTag, T: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val tpe = weakTypeOf[T]
val sym = assertLocal(classSymbol(tpe.dealias.typeSymbol))
val annots = allAnnotations(sym, atpe).map(safeAnnotTree)
q"$MiscPkg.AnnotationsOf($ListObj(..$annots))"
}
def hasAnnotation[A: WeakTypeTag, T: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val tpe = weakTypeOf[T]
val sym = assertLocal(classSymbol(tpe.dealias.typeSymbol))
if (findAnnotation(sym, atpe).nonEmpty)
q"$MiscPkg.HasAnnotation.create[$atpe, $tpe]"
else
abort(s"No annotation of type $atpe found on $sym")
}
def classBeingConstructed: Symbol = {
val ownerConstr = c.internal.enclosingOwner
if (!ownerConstr.isConstructor) {
abort(s"${c.macroApplication.symbol} can only be used as super constructor argument")
}
classSymbol(ownerConstr.owner)
}
def selfAnnotation[A: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val sym = classBeingConstructed
val annot = findAnnotation(sym, atpe)
.getOrElse(abort(s"No annotation of type $atpe found on $sym"))
q"$MiscPkg.SelfAnnotation(${safeAnnotTree(annot)})"
}
def selfOptAnnotation[A: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val sym = classBeingConstructed
val annotTree = findAnnotation(sym, atpe)
.fold[Tree](q"$CommonsPkg.Opt.Empty")(a => q"$CommonsPkg.Opt(${safeAnnotTree(a)})")
q"$MiscPkg.SelfOptAnnotation($annotTree)"
}
def selfAnnotations[A: WeakTypeTag]: Tree = instrument {
val atpe = weakTypeOf[A]
val sym = classBeingConstructed
val annots = allAnnotations(sym, atpe).map(safeAnnotTree)
q"$MiscPkg.SelfAnnotations($ListObj(..$annots))"
}
def selfInstance[C: WeakTypeTag]: Tree = instrument {
val TypeRef(pre, constrSym, _) = weakTypeOf[C].typeConstructor
val instance = internal.typeRef(pre, constrSym, List(classBeingConstructed.asType.toType))
q"$MiscPkg.SelfInstance($ImplicitsObj.infer[$instance])"
}
def aggregatedAnnots: Tree = {
val aggregatedMethod = c.internal.enclosingOwner
if (!aggregatedMethod.overrides.contains(AggregatedMethodSym)) {
abort("reifyAggregated macro must only be used to implement AnnotationAggregate.aggregated method")
}
if (aggregatedMethod.asMethod.isGetter || !aggregatedMethod.isFinal) {
abort("AnnotationAggregate.aggregated method implemented with reifyAggregated macro must be a final def")
}
val annotTrees = rawAnnotations(aggregatedMethod)
.filter(_.tree.tpe <:< StaticAnnotationTpe).map(a => c.untypecheck(a.tree))
if (annotTrees.isEmpty) {
warning("no aggregated annotations found on enclosing method")
}
q"$ListObj(..$annotTrees)"
}
def simpleClassName[T: WeakTypeTag]: Tree = instrument {
val sym = classSymbol(weakTypeOf[T].dealias.typeSymbol)
q"$MiscPkg.SimpleClassName(${sym.name.decodedName.toString})"
}
}
final class WhiteMiscMacros(ctx: whitebox.Context) extends AbstractMacroCommons(ctx) {
import c.universe._
lazy val WhenAbsentAT: Type = staticType(tq"$CommonsPkg.serialization.whenAbsent[_]")
def whenAbsentValue: Tree = {
val param = c.internal.enclosingOwner match {
case DefaultValueMethod(p) => p
case p => p
}
findAnnotation(param, WhenAbsentAT).map(_.tree).map {
case Apply(_, List(MaybeTyped(arg, _))) => arg
case t => abort(s"unexpected tree for @whenAbsent annotation: $t")
} getOrElse {
abort(s"no @whenAbsent annotation found on $param of ${param.owner}")
}
}
def inferValue: Tree = {
val param = c.internal.enclosingOwner match {
case DefaultValueMethod(p) => p
case p => p
}
if (param.owner.owner.asType.toType <:< AnnotationTpe && findAnnotation(param, InferAT).nonEmpty)
q"""throw new $ScalaPkg.NotImplementedError("infer.value")"""
else
abort(s"infer.value can be only used as default value of @infer annotation parameters")
}
def autoAnnotationMetadata: Tree = {
val param = c.internal.enclosingOwner match {
case DefaultValueMethod(p) => p
case p => p
}
if (param.owner.owner.asType.toType <:< AnnotationTpe)
q"""throw new $ScalaPkg.NotImplementedError("RpcMetadata.auto")"""
else
abort(s"RpcMetadata.auto can be only used as default value of annotation parameters")
}
def normalizeGadtSubtype(tpref: Tree, value: Tree): Tree = {
def print(msg: String): Unit =
if (c.enclosingPosition.line == 57) {
echo(msg)
}
val StringLiteral(tprefStr) = tpref
val quantified = new ListBuffer[Symbol]
print(show(value.tpe))
val unrefined = value.tpe match {
case RefinedType(List(_, second), _) => second
case t => t
}
print(show(unrefined))
val withFullyDetermined = unrefined.map { t =>
if (t.typeSymbol.name.toString.startsWith(tprefStr)) {
t.typeSymbol.typeSignature match {
case TypeBounds(lo, hi) if lo =:= hi =>
lo
case ts =>
print(t.typeSymbol.name.toString + show(ts))
quantified += t.typeSymbol
t
}
} else t
}
print(show(withFullyDetermined))
val withoutMatchedTypes = internal.existentialAbstraction(quantified.result(), withFullyDetermined)
print(show(withoutMatchedTypes))
val innerQuantified = new mutable.HashSet[Symbol]
val outerQuantified = new ListBuffer[Symbol]
withoutMatchedTypes.foreach {
case ExistentialType(iq, _) => innerQuantified ++= iq
case t if t.typeSymbol.isType =>
if (t.typeSymbol.asType.isExistential && !innerQuantified.contains(t.typeSymbol)) {
outerQuantified += t.typeSymbol
}
case _ =>
}
val normTpe = internal.existentialAbstraction(outerQuantified.result(), withoutMatchedTypes)
print(show(normTpe))
q"$value: ${treeForType(normTpe)}"
}
}
|
AVSystem/scala-commons
|
commons-macros/src/main/scala/com/avsystem/commons/macros/misc/MiscMacros.scala
|
Scala
|
mit
| 30,786 |
package com.datawizards.sparklocal.impl.scala.parallel.dataset
import com.datawizards.sparklocal.dataset.{DataSetAPI, KeyValueGroupedDataSetAPI}
import com.datawizards.sparklocal.impl.scala.`lazy`.dataset.DataSetAPIScalaLazyImpl
import com.datawizards.sparklocal.impl.scala.dataset.DataSetAPIScalaBase
import com.datawizards.sparklocal.rdd.RDDAPI
import org.apache.spark.sql.Encoder
import scala.collection.GenIterable
import scala.collection.parallel.ParSeq
import scala.reflect.ClassTag
object DataSetAPIScalaParallelImpl {
private[sparklocal] def create[U: ClassTag](it: GenIterable[U])(implicit enc: Encoder[U]): DataSetAPIScalaBase[U] =
new DataSetAPIScalaParallelImpl(it.toSeq.par)
}
class DataSetAPIScalaParallelImpl[T: ClassTag](private[sparklocal] val data: ParSeq[T]) extends DataSetAPIScalaBase[T] {
override type InternalCollection = ParSeq[T]
override private[sparklocal] def create[U: ClassTag](it: GenIterable[U])(implicit enc: Encoder[U]): DataSetAPIScalaBase[U] =
DataSetAPIScalaParallelImpl.create(it)
private def create[U: ClassTag](data: ParSeq[U]): DataSetAPIScalaBase[U] =
new DataSetAPIScalaParallelImpl(data)
override protected def union(data: InternalCollection, dsScala: DataSetAPIScalaBase[T]): DataSetAPIScalaBase[T] =
create(data.union(dsScala.data.toSeq))
override protected def intersect(data: InternalCollection, dsScala: DataSetAPIScalaBase[T]): DataSetAPIScalaBase[T] =
create(data.intersect(dsScala.data.toSeq))
override protected def diff(data: InternalCollection, dsScala: DataSetAPIScalaBase[T]): DataSetAPIScalaBase[T] =
create(data.diff(dsScala.data.toSeq))
override def distinct(): DataSetAPI[T] =
create(data.distinct)
override def groupByKey[K: ClassTag](func: (T) => K)(implicit enc: Encoder[K]): KeyValueGroupedDataSetAPI[K, T] =
new KeyValueGroupedDataSetAPIScalaParallelImpl(data.groupBy(func))
override def rdd(): RDDAPI[T] = RDDAPI(data)
}
|
piotr-kalanski/spark-local
|
src/main/scala/com/datawizards/sparklocal/impl/scala/parallel/dataset/DataSetAPIScalaParallelImpl.scala
|
Scala
|
apache-2.0
| 1,953 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc.request
import play.api.libs.typedmap.TypedKey
import play.api.mvc.{ Cookies, Flash, Session }
/**
* Keys to request attributes.
*/
object RequestAttrKey {
/**
* The key for the request attribute storing a request id.
*/
val Id = TypedKey[Long]("Id")
/**
* The key for the request attribute storing a [[Cell]] with
* [[play.api.mvc.Cookies]] in it.
*/
val Cookies = TypedKey[Cell[Cookies]]("Cookies")
/**
* The key for the request attribute storing a [[Cell]] with
* the [[play.api.mvc.Session]] cookie in it.
*/
val Session = TypedKey[Cell[Session]]("Session")
/**
* The key for the request attribute storing a [[Cell]] with
* the [[play.api.mvc.Flash]] cookie in it.
*/
val Flash = TypedKey[Cell[Flash]]("Flash")
/**
* The key for the request attribute storing the request's
* tags.
*/
@deprecated("Use attributes instead of tags", "2.6.0")
val Tags = TypedKey[Map[String, String]]("Tags")
/**
* The key for the request attribute storing the server name.
*/
val Server = TypedKey[String]("Server-Name")
}
|
Shruti9520/playframework
|
framework/src/play/src/main/scala/play/api/mvc/request/RequestAttrKey.scala
|
Scala
|
apache-2.0
| 1,189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import scala.collection.mutable
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions.SubqueryExpression
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, With}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.{LEGACY_CTE_PRECEDENCE_POLICY, LegacyBehaviorPolicy}
/**
* Analyze WITH nodes and substitute child plan with CTE definitions.
*/
object CTESubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
LegacyBehaviorPolicy.withName(SQLConf.get.getConf(LEGACY_CTE_PRECEDENCE_POLICY)) match {
case LegacyBehaviorPolicy.EXCEPTION =>
assertNoNameConflictsInCTE(plan)
traverseAndSubstituteCTE(plan)
case LegacyBehaviorPolicy.LEGACY =>
legacyTraverseAndSubstituteCTE(plan)
case LegacyBehaviorPolicy.CORRECTED =>
traverseAndSubstituteCTE(plan)
}
}
/**
* Spark 3.0 changes the CTE relations resolution, and inner relations take precedence. This is
* correct but we need to warn users about this behavior change under EXCEPTION mode, when we see
* CTE relations with conflicting names.
*
* Note that, before Spark 3.0 the parser didn't support CTE in the FROM clause. For example,
* `WITH ... SELECT * FROM (WITH ... SELECT ...)` was not supported. We should not fail for this
* case, as Spark versions before 3.0 can't run it anyway. The parameter `startOfQuery` is used
* to indicate where we can define CTE relations before Spark 3.0, and we should only check
* name conflicts when `startOfQuery` is true.
*/
private def assertNoNameConflictsInCTE(
plan: LogicalPlan,
outerCTERelationNames: Seq[String] = Nil,
startOfQuery: Boolean = true): Unit = {
val resolver = SQLConf.get.resolver
plan match {
case With(child, relations) =>
val newNames = mutable.ArrayBuffer.empty[String]
newNames ++= outerCTERelationNames
relations.foreach {
case (name, relation) =>
if (startOfQuery && outerCTERelationNames.exists(resolver(_, name))) {
throw new AnalysisException(s"Name $name is ambiguous in nested CTE. " +
s"Please set ${LEGACY_CTE_PRECEDENCE_POLICY.key} to CORRECTED so that name " +
"defined in inner CTE takes precedence. If set it to LEGACY, outer CTE " +
"definitions will take precedence. See more details in SPARK-28228.")
}
// CTE relation is defined as `SubqueryAlias`. Here we skip it and check the child
// directly, so that `startOfQuery` is set correctly.
assertNoNameConflictsInCTE(relation.child, newNames.toSeq)
newNames += name
}
assertNoNameConflictsInCTE(child, newNames.toSeq, startOfQuery = false)
case other =>
other.subqueries.foreach(assertNoNameConflictsInCTE(_, outerCTERelationNames))
other.children.foreach(
assertNoNameConflictsInCTE(_, outerCTERelationNames, startOfQuery = false))
}
}
private def legacyTraverseAndSubstituteCTE(plan: LogicalPlan): LogicalPlan = {
plan.resolveOperatorsUp {
case With(child, relations) =>
val resolvedCTERelations = resolveCTERelations(relations, isLegacy = true)
substituteCTE(child, resolvedCTERelations)
}
}
/**
* Traverse the plan and expression nodes as a tree and replace matching references to CTE
* definitions.
* - If the rule encounters a WITH node then it substitutes the child of the node with CTE
* definitions of the node right-to-left order as a definition can reference to a previous
* one.
* For example the following query is valid:
* WITH
* t AS (SELECT 1),
* t2 AS (SELECT * FROM t)
* SELECT * FROM t2
* - If a CTE definition contains an inner WITH node then substitution of inner should take
* precedence because it can shadow an outer CTE definition.
* For example the following query should return 2:
* WITH
* t AS (SELECT 1),
* t2 AS (
* WITH t AS (SELECT 2)
* SELECT * FROM t
* )
* SELECT * FROM t2
* - If a CTE definition contains a subquery that contains an inner WITH node then substitution
* of inner should take precedence because it can shadow an outer CTE definition.
* For example the following query should return 2:
* WITH t AS (SELECT 1 AS c)
* SELECT max(c) FROM (
* WITH t AS (SELECT 2 AS c)
* SELECT * FROM t
* )
* - If a CTE definition contains a subquery expression that contains an inner WITH node then
* substitution of inner should take precedence because it can shadow an outer CTE
* definition.
* For example the following query should return 2:
* WITH t AS (SELECT 1)
* SELECT (
* WITH t AS (SELECT 2)
* SELECT * FROM t
* )
* @param plan the plan to be traversed
* @return the plan where CTE substitution is applied
*/
private def traverseAndSubstituteCTE(plan: LogicalPlan): LogicalPlan = {
plan.resolveOperatorsUp {
case With(child: LogicalPlan, relations) =>
val resolvedCTERelations = resolveCTERelations(relations, isLegacy = false)
substituteCTE(child, resolvedCTERelations)
case other =>
other.transformExpressions {
case e: SubqueryExpression => e.withNewPlan(traverseAndSubstituteCTE(e.plan))
}
}
}
private def resolveCTERelations(
relations: Seq[(String, SubqueryAlias)],
isLegacy: Boolean): Seq[(String, LogicalPlan)] = {
val resolvedCTERelations = new mutable.ArrayBuffer[(String, LogicalPlan)](relations.size)
for ((name, relation) <- relations) {
val innerCTEResolved = if (isLegacy) {
// In legacy mode, outer CTE relations take precedence. Here we don't resolve the inner
// `With` nodes, later we will substitute `UnresolvedRelation`s with outer CTE relations.
// Analyzer will run this rule multiple times until all `With` nodes are resolved.
relation
} else {
// A CTE definition might contain an inner CTE that has a higher priority, so traverse and
// substitute CTE defined in `relation` first.
traverseAndSubstituteCTE(relation)
}
// CTE definition can reference a previous one
resolvedCTERelations += (name -> substituteCTE(innerCTEResolved, resolvedCTERelations.toSeq))
}
resolvedCTERelations.toSeq
}
private def substituteCTE(
plan: LogicalPlan,
cteRelations: Seq[(String, LogicalPlan)]): LogicalPlan =
plan resolveOperatorsUp {
case u @ UnresolvedRelation(Seq(table)) =>
cteRelations.find(r => plan.conf.resolver(r._1, table)).map(_._2).getOrElse(u)
case other =>
// This cannot be done in ResolveSubquery because ResolveSubquery does not know the CTE.
other transformExpressions {
case e: SubqueryExpression => e.withNewPlan(substituteCTE(e.plan, cteRelations))
}
}
}
|
dbtsai/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CTESubstitution.scala
|
Scala
|
apache-2.0
| 8,007 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.user
import se.nimsa.sbx.user.UserProtocol._
import se.nimsa.sbx.util.DbUtil._
import slick.basic.DatabaseConfig
import slick.jdbc.JdbcProfile
import scala.concurrent.{ExecutionContext, Future}
class UserDAO(val dbConf: DatabaseConfig[JdbcProfile])(implicit ec: ExecutionContext) {
import dbConf.profile.api._
val db = dbConf.db
val toUser = (id: Long, user: String, role: String, password: String) => ApiUser(id, user, UserRole.withName(role), Some(password))
val fromUser = (user: ApiUser) => Option((user.id, user.user, user.role.toString(), user.hashedPassword.get))
class UserTable(tag: Tag) extends Table[ApiUser](tag, UserTable.name) {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def user = column[String]("user", O.Length(180))
def role = column[String]("role")
def password = column[String]("password")
def idxUniqueUser = index("idx_unique_user", user, unique = true)
def * = (id, user, role, password) <> (toUser.tupled, fromUser)
}
object UserTable {
val name = "User"
}
val users = TableQuery[UserTable]
class SessionTable(tag: Tag) extends Table[ApiSession](tag, SessionTable.name) {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def userId = column[Long]("userid")
def token = column[String]("token", O.Length(64))
def ip = column[String]("ip", O.Length(64))
def userAgent = column[String]("useragent", O.Length(32))
def updated = column[Long]("updated")
def fkUser = foreignKey("fk_user", userId, users)(_.id, onDelete = ForeignKeyAction.Cascade)
def idxUniqueSession = index("idx_unique_session", (token, ip, userAgent), unique = true)
def * = (id, userId, token, ip, userAgent, updated) <> (ApiSession.tupled, ApiSession.unapply)
}
object SessionTable {
val name = "ApiSession"
}
val sessions = TableQuery[SessionTable]
def create() = createTables(dbConf, (UserTable.name, users), (SessionTable.name, sessions))
def drop() = db.run {
(users.schema ++ sessions.schema).drop
}
def clear() = db.run {
DBIO.seq(users.delete, sessions.delete)
}
def insert(user: ApiUser): Future[ApiUser] = db.run {
users returning users.map(_.id) += user
}.map(generatedId => user.copy(id = generatedId))
def userById(userId: Long): Future[Option[ApiUser]] = db.run {
users.filter(_.id === userId).result.headOption
}
def userByName(user: String): Future[Option[ApiUser]] = db.run {
users.filter(_.user === user).result.headOption
}
def userSessionsByToken(token: String): Future[Seq[(ApiUser, ApiSession)]] = db.run {
(for {
users <- users
sessions <- sessions if sessions.userId === users.id
} yield (users, sessions))
.filter(_._2.token === token)
.result
}
def userSessionByTokenIpAndUserAgent(token: String, ip: String, userAgent: String): Future[Option[(ApiUser, ApiSession)]] = db.run {
(for {
users <- users
sessions <- sessions if sessions.userId === users.id
} yield (users, sessions))
.filter(_._2.token === token)
.filter(_._2.ip === ip)
.filter(_._2.userAgent === userAgent)
.result.headOption
}
def deleteUserByUserId(userId: Long): Future[Int] = db.run {
users.filter(_.id === userId).delete
}
def listUsers(startIndex: Long, count: Long): Future[Seq[ApiUser]] = db.run {
users
.drop(startIndex)
.take(count)
.result
}
def listSessions: Future[Seq[ApiSession]] = db.run {
sessions.result
}
def userSessionByUserIdIpAndUserAgent(userId: Long, ip: String, userAgent: String): Future[Option[ApiSession]] = db.run {
(for {
users <- users
sessions <- sessions if sessions.userId === users.id
} yield (users, sessions))
.filter(_._1.id === userId)
.filter(_._2.ip === ip)
.filter(_._2.userAgent === userAgent)
.map(_._2)
.result.headOption
}
def insertSession(apiSession: ApiSession) = db.run {
sessions returning sessions.map(_.id) += apiSession
}.map(generatedId => apiSession.copy(id = generatedId))
def updateSession(apiSession: ApiSession) = db.run {
sessions.filter(_.id === apiSession.id).update(apiSession)
}
def deleteSessionByUserIdIpAndUserAgent(userId: Long, ip: String, userAgent: String): Future[Int] = db.run {
sessions
.filter(_.userId === userId)
.filter(_.ip === ip)
.filter(_.userAgent === userAgent)
.delete
}
def deleteSessionById(sessionId: Long): Future[Int] = db.run {
sessions.filter(_.id === sessionId).delete
}
}
|
slicebox/slicebox
|
src/main/scala/se/nimsa/sbx/user/UserDAO.scala
|
Scala
|
apache-2.0
| 5,186 |
package util
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.revwalk.RevWalk
import org.eclipse.jgit.treewalk.TreeWalk
import org.eclipse.jgit.transport.RefSpec
/**
* Provides control facilities.
*/
object ControlUtil {
def defining[A, B](value: A)(f: A => B): B = f(value)
def using[A <% { def close(): Unit }, B](resource: A)(f: A => B): B =
try f(resource) finally {
if(resource != null){
try {
resource.close()
} catch {
case e: Throwable => // ignore
}
}
}
def using[T](git: Git)(f: Git => T): T =
try f(git) finally git.getRepository.close
def using[T](git1: Git, git2: Git)(f: (Git, Git) => T): T =
try f(git1, git2) finally {
git1.getRepository.close
git2.getRepository.close
}
def using[T](revWalk: RevWalk)(f: RevWalk => T): T =
try f(revWalk) finally revWalk.release()
def using[T](treeWalk: TreeWalk)(f: TreeWalk => T): T =
try f(treeWalk) finally treeWalk.release()
def withTmpRefSpec[T](ref: RefSpec, git: Git)(f: RefSpec => T): T = {
try {
f(ref)
} finally {
val refUpdate = git.getRepository.updateRef(ref.getDestination)
refUpdate.setForceUpdate(true)
refUpdate.delete()
}
}
def executeIf(condition: => Boolean)(action: => Unit): Boolean =
if(condition){
action
true
} else false
def optionIf[T](condition: => Boolean)(action: => Option[T]): Option[T] =
if(condition) action else None
}
|
loveshell/gitbucket
|
src/main/scala/util/ControlUtil.scala
|
Scala
|
apache-2.0
| 1,502 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.functions.UserDefinedFunction
import org.apache.flink.table.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.plan.util.{FlinkRelOptUtil, RelExplainUtil}
import org.apache.calcite.plan.{RelOptCluster, RelOptRule, RelTraitSet}
import org.apache.calcite.rel.RelDistribution.Type
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.tools.RelBuilder
import org.apache.calcite.util.ImmutableIntList
import java.util
import org.apache.flink.api.dag.Transformation
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* Batch physical RelNode for local sort-based aggregate operator.
*
* @see [[BatchExecGroupAggregateBase]] for more info.
*/
class BatchExecLocalSortAggregate(
cluster: RelOptCluster,
relBuilder: RelBuilder,
traitSet: RelTraitSet,
inputRel: RelNode,
outputRowType: RelDataType,
inputRowType: RelDataType,
grouping: Array[Int],
auxGrouping: Array[Int],
aggCallToAggFunction: Seq[(AggregateCall, UserDefinedFunction)])
extends BatchExecSortAggregateBase(
cluster,
relBuilder,
traitSet,
inputRel,
outputRowType,
inputRowType,
inputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
isMerge = false,
isFinal = false) {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchExecLocalSortAggregate(
cluster,
relBuilder,
traitSet,
inputs.get(0),
outputRowType,
inputRowType,
grouping,
auxGrouping,
aggCallToAggFunction)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("groupBy",
RelExplainUtil.fieldToString(grouping, inputRowType), grouping.nonEmpty)
.itemIf("auxGrouping",
RelExplainUtil.fieldToString(auxGrouping, inputRowType), auxGrouping.nonEmpty)
.item("select", RelExplainUtil.groupAggregationToString(
inputRowType,
outputRowType,
grouping,
auxGrouping,
aggCallToAggFunction,
isMerge = false,
isGlobal = false))
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
// Does not to try to satisfy requirement by localAgg's input if enforce to use two-stage agg.
if (isEnforceTwoStageAgg) {
return None
}
val groupCount = grouping.length
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val canSatisfy = requiredDistribution.getType match {
case Type.HASH_DISTRIBUTED | Type.RANGE_DISTRIBUTED =>
// Cannot satisfy distribution if keys are not group keys of agg
requiredDistribution.getKeys.forall(_ < groupCount)
case _ => false
}
if (!canSatisfy) {
return None
}
val keys = requiredDistribution.getKeys.map(grouping(_))
val inputRequiredDistributionKeys = ImmutableIntList.of(keys: _*)
val inputRequiredDistribution = requiredDistribution.getType match {
case Type.HASH_DISTRIBUTED =>
FlinkRelDistribution.hash(inputRequiredDistributionKeys, requiredDistribution.requireStrict)
case Type.RANGE_DISTRIBUTED =>
FlinkRelDistribution.range(inputRequiredDistributionKeys)
}
val requiredCollation = requiredTraitSet.getTrait(RelCollationTraitDef.INSTANCE)
val providedFieldCollations = (0 until groupCount).map(FlinkRelOptUtil.ofRelFieldCollation)
val providedCollation = RelCollations.of(providedFieldCollations)
val newProvidedTraits = if (providedCollation.satisfies(requiredCollation)) {
getTraitSet.replace(requiredDistribution).replace(requiredCollation)
} else {
getTraitSet.replace(requiredDistribution)
}
val inputRequiredTraits = getInput.getTraitSet.replace(inputRequiredDistribution)
val newInput = RelOptRule.convert(getInput, inputRequiredTraits)
Some(copy(newProvidedTraits, Seq(newInput)))
}
//~ ExecNode methods -----------------------------------------------------------
override def getDamBehavior: DamBehavior = {
if (grouping.length == 0) DamBehavior.FULL_DAM else DamBehavior.MATERIALIZING
}
override def getOperatorName: String = aggOperatorName("LocalSortAggregate")
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecLocalSortAggregate.scala
|
Scala
|
apache-2.0
| 5,417 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.scenario.template
import io.gatling.BaseSpec
import io.gatling.recorder.scenario.RequestElement
import io.netty.handler.codec.http.EmptyHttpHeaders
class ExtractedUrisSpec extends BaseSpec {
def mockRequestElement(uri: String) = new RequestElement(uri, "get", EmptyHttpHeaders.INSTANCE, None, None, 200, Nil)
def extractUris(uris: Seq[String]): ExtractedUris = {
val requestElements = uris.map(mockRequestElement)
new ExtractedUris(requestElements)
}
"extracting uris" should "extract common root" in {
val commonRoot = "http://gatling.io/path1"
val url1 = commonRoot + "/file1"
val url2 = commonRoot + "/file2"
val extractedUris = extractUris(Seq(url1, url2))
extractedUris.vals shouldBe List(Value("uri1", commonRoot))
extractedUris.renderUri(url1).toString shouldBe """uri1 + "/file1""""
extractedUris.renderUri(url2).toString shouldBe """uri1 + "/file2""""
}
it should "extract common roots from different authorities" in {
val gatlingRoot = "http://gatling.io/path1"
val gatlingUrl1 = gatlingRoot + "/file1"
val gatlingUrl2 = gatlingRoot + "/file2"
val nettyRoot = "http://netty.io"
val nettyUrl1 = nettyRoot + "/file1"
val nettyUrl2 = nettyRoot + "/file2"
val extractedUris = extractUris(Seq(gatlingUrl1, gatlingUrl2, nettyUrl1, nettyUrl2))
extractedUris.vals shouldBe List(Value("uri2", gatlingRoot), Value("uri1", nettyRoot))
extractedUris.renderUri(gatlingUrl1).toString shouldBe """uri2 + "/file1""""
extractedUris.renderUri(gatlingUrl2).toString shouldBe """uri2 + "/file2""""
extractedUris.renderUri(nettyUrl1).toString shouldBe """uri1 + "/file1""""
extractedUris.renderUri(nettyUrl2).toString shouldBe """uri1 + "/file2""""
}
it should "preserve port and auth" in {
val gatlingUri = "https://user:[email protected]:8080/?q=v"
val extractedUris = extractUris(Seq(gatlingUri))
extractedUris.vals shouldBe List(Value("uri1", "https://user:[email protected]:8080"))
extractedUris.renderUri(gatlingUri).toString shouldBe """uri1 + "/?q=v""""
}
it should "extract only authorities when they are used with different schemes" in {
val extractedUris = extractUris(Seq("http://gatling.io/path1/file1", "https://gatling.io/path1/file2"))
extractedUris.vals shouldBe List(Value("uri1", "gatling.io"))
extractedUris.renderUri("http://gatling.io/path1/file1").toString shouldBe """"http://" + uri1 + "/path1/file1""""
extractedUris.renderUri("https://gatling.io/path1/file2").toString shouldBe """"https://" + uri1 + "/path1/file2""""
}
it should "extract only authorities when they are used with different ports" in {
val uri1 = "http://gatling.io/path1/file"
val uri2 = "http://gatling.io:8080/path1/file"
val extractedUris = extractUris(Seq(uri1, uri2))
extractedUris.vals shouldBe List(Value("uri1", "gatling.io"))
extractedUris.renderUri(uri1).toString shouldBe """"http://" + uri1 + "/path1/file""""
extractedUris.renderUri(uri2).toString shouldBe """"http://" + uri1 + ":8080/path1/file""""
}
}
|
wiacekm/gatling
|
gatling-recorder/src/test/scala/io/gatling/recorder/scenario/template/ExtractedUrisSpec.scala
|
Scala
|
apache-2.0
| 3,727 |
package cgta.ojs
package lang
import scala.scalajs.js
//////////////////////////////////////////////////////////////
// Created by bjackman @ 12/11/13 12:17 AM
//////////////////////////////////////////////////////////////
trait JsConsole extends js.Object {
def log(xs : Any*)
def error(xs : Any*)
}
|
benjaminjackman/cgta-scala-js
|
src/main/scala/cgta/ojs/lang/JsConsole.scala
|
Scala
|
mit
| 310 |
package org.bitcoins.core.protocol.ln.currency
import org.bitcoins.core._
import org.bitcoins.core.currency.{CurrencyUnit, Satoshis}
import org.bitcoins.core.number.{BasicArithmetic, UInt64}
import org.bitcoins.crypto.NetworkElement
import scodec.bits.ByteVector
import scala.math.BigDecimal.RoundingMode
/** The common currency unit used in the
* LN protocol for updating HTLCs.
*
* @see [[https://github.com/lightningnetwork/lightning-rfc/blob/master/02-peer-protocol.md#adding-an-htlc-update_add_htlc BOLT2]]
*/
sealed abstract class MilliSatoshis
extends NetworkElement
with Ordered[MilliSatoshis]
with BasicArithmetic[MilliSatoshis] {
require(toBigInt >= 0, s"Millisatoshis cannot be negative, got $toBigInt")
protected def underlying: BigInt
/** Output example:
* {{{
* > MilliSatoshis(10)
* 10 msat
* }}}
*/
override def toString: String = {
val num = toBigInt
val postFix = if (num == 1) "msat" else "msats"
s"$num $postFix"
}
def toBigInt: BigInt = underlying
def toLong: Long = toBigInt.bigInteger.longExact
def toBigDecimal: BigDecimal = BigDecimal(toBigInt)
def toLnCurrencyUnit: LnCurrencyUnit = {
LnCurrencyUnits.fromMSat(this)
}
def ==(lnCurrencyUnit: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit == lnCurrencyUnit
}
def !=(lnCurrencyUnit: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit != lnCurrencyUnit
}
def >=(ln: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit >= ln
}
def >(ln: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit > ln
}
def <(ln: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit < ln
}
def <=(ln: LnCurrencyUnit): Boolean = {
toLnCurrencyUnit <= ln
}
def ==(ms: MilliSatoshis): Boolean = {
toBigInt == ms.toBigInt
}
def !=(ms: MilliSatoshis): Boolean = {
toBigInt != ms.toBigInt
}
override def compare(ms: MilliSatoshis): Int = toBigInt compare ms.toBigInt
override def +(ms: MilliSatoshis): MilliSatoshis = {
MilliSatoshis(toBigInt + ms.toBigInt)
}
override def -(ms: MilliSatoshis): MilliSatoshis = {
MilliSatoshis(toBigInt - ms.toBigInt)
}
override def *(factor: BigInt): MilliSatoshis = {
MilliSatoshis(toBigInt * factor)
}
override def *(factor: MilliSatoshis): MilliSatoshis = {
MilliSatoshis(toBigInt * factor.toBigInt)
}
def toUInt64: UInt64 = {
UInt64(underlying)
}
def toSatoshis: Satoshis = {
toLnCurrencyUnit.toSatoshis
}
override def bytes: ByteVector = toUInt64.bytes.reverse
}
object MilliSatoshis {
private case class MilliSatoshisImpl(underlying: BigInt) extends MilliSatoshis
val zero: MilliSatoshis = MilliSatoshis(0)
val one: MilliSatoshis = MilliSatoshis(1)
def apply(underlying: BigInt): MilliSatoshis = {
MilliSatoshisImpl(underlying)
}
def fromPico(picoBitcoins: PicoBitcoins): MilliSatoshis = {
val pico = picoBitcoins.toPicoBitcoinDecimal
// we need to divide by 10 to get to msat
val msatDec = pico / LnCurrencyUnits.MSAT_TO_PICO
//now we need to round, we are going to round the same way round
//outputs when publishing txs to the blockchain
//https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#commitment-transaction-outputs
val rounded = msatDec.setScale(0, RoundingMode.DOWN)
MilliSatoshis(rounded.toBigIntExact.get)
}
def apply(lnCurrencyUnit: LnCurrencyUnit): MilliSatoshis = {
fromPico(picoBitcoins = lnCurrencyUnit.toPicoBitcoins)
}
def apply(currencyUnit: CurrencyUnit): MilliSatoshis = {
fromSatoshis(currencyUnit.satoshis)
}
def fromSatoshis(sat: Satoshis): MilliSatoshis = {
MilliSatoshis(sat.toBigInt * 1000)
}
}
|
bitcoin-s/bitcoin-s
|
core/src/main/scala/org/bitcoins/core/protocol/ln/currency/MilliSatoshis.scala
|
Scala
|
mit
| 3,713 |
package com.nabijaczleweli.fancymagicks.element.caster.forward
import com.nabijaczleweli.fancymagicks.element.caster.NoElementCaster
import com.nabijaczleweli.fancymagicks.element.elements.Element
import com.nabijaczleweli.fancymagicks.util.EntityUtil
import net.minecraft.entity.Entity
class NoElementForwardCaster(who: Entity) extends NoElementCaster(who) {
def this(who: Entity, elems: Seq[Element]) =
this(who)
override protected def entitiesToDamage =
EntityUtil.filterForFrustrum(EntityUtil.entitiesInRadius[Entity](who, chargeup / 10D), EntityUtil frustrumFor who)
}
|
nabijaczleweli/Magicks
|
src/main/scala/com/nabijaczleweli/fancymagicks/element/caster/forward/NoElementForwardCaster.scala
|
Scala
|
mit
| 583 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.subscribers
import monix.execution.ExecutionModel.SynchronousExecution
import monix.execution.exceptions.DummyException
import monix.reactive.{BaseTestSuite, Observable}
import scala.util.{Failure, Success}
object ObservableForeachSuite extends BaseTestSuite {
test("foreach subscribes immediately") { scheduler =>
implicit val s = scheduler.withExecutionModel(SynchronousExecution)
var sum = 0
val f = Observable.fromIterable(0 until 1000).foreach(x => sum += x)
assertEquals(sum, 500 * 999)
assertEquals(f.value, Some(Success(())))
}
test("foreachL subscribes immediately") { scheduler =>
implicit val s = scheduler.withExecutionModel(SynchronousExecution)
var sum = 0
val f = Observable.fromIterable(0 until 1000).foreachL(x => sum += x).runToFuture
assertEquals(sum, 500 * 999)
assertEquals(f.value, Some(Success(())))
}
test("foreach protects against user error") { implicit s =>
val dummy = DummyException("dummy")
val f = Observable.fromIterable(0 until 1000).foreach(_ => throw dummy)
assertEquals(f.value, Some(Failure(dummy)))
}
test("foreachL protects against user error") { implicit s =>
val dummy = DummyException("dummy")
val f = Observable.fromIterable(0 until 1000).foreachL(_ => throw dummy).runToFuture
assertEquals(f.value, Some(Failure(dummy)))
}
}
|
monifu/monifu
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/subscribers/ObservableForeachSuite.scala
|
Scala
|
apache-2.0
| 2,066 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail.batches
/**
* Reusable [[BatchCursor]] base class that can be used for
* implementing cursors by just providing the primitive operations,
* `hasNext`, `next` and `recommendedBatchSize`.
*/
abstract class GenericCursor[+A] extends BatchCursor[A] { self =>
def take(n: Int): BatchCursor[A] = {
if (n <= 0) BatchCursor.empty
else
new GenericCursor[A] {
private[this] var taken = 0
def hasNext(): Boolean =
taken < n && self.hasNext()
def next(): A = {
taken += 1
self.next()
}
def recommendedBatchSize: Int =
self.recommendedBatchSize
}
}
def drop(n: Int): BatchCursor[A] = {
if (n <= 0) self
else
new GenericCursor[A] {
private[this] var dropped = false
def hasNext(): Boolean = {
if (!dropped) {
dropped = true
var count = 0
while (count < n) {
if (!self.hasNext()) return false
self.next()
count += 1
}
}
self.hasNext()
}
def next(): A =
self.next()
def recommendedBatchSize: Int =
self.recommendedBatchSize
}
}
def slice(from: Int, until: Int): BatchCursor[A] =
drop(from).take(until - from)
def map[B](f: A => B): BatchCursor[B] =
new GenericCursor[B] {
def hasNext(): Boolean =
self.hasNext()
def next(): B =
f(self.next())
def recommendedBatchSize: Int =
self.recommendedBatchSize
}
def filter(p: A => Boolean): BatchCursor[A] =
new GenericCursor[A] {
private[this] var item: A = _
private[this] var hasItem: Boolean = false
def hasNext(): Boolean = hasItem || {
do {
if (!self.hasNext()) return false
item = self.next()
} while (!p(item))
hasItem = true
true
}
def next(): A = {
if (hasItem) {
hasItem = false
item
} else {
BatchCursor.empty.next()
}
}
def recommendedBatchSize: Int =
self.recommendedBatchSize
}
def collect[B](pf: PartialFunction[A, B]): BatchCursor[B] =
new GenericCursor[B] {
private[this] var item: A = _
private[this] var hasItem: Boolean = false
def hasNext(): Boolean = hasItem || {
do {
if (!self.hasNext()) return false
item = self.next()
} while (!pf.isDefinedAt(item))
hasItem = true
true
}
def next(): B = {
if (hasItem) {
hasItem = false
pf(item)
} else {
BatchCursor.empty.next()
}
}
def recommendedBatchSize: Int =
self.recommendedBatchSize
}
def toIterator: Iterator[A] =
new Iterator[A] {
def hasNext: Boolean = self.hasNext()
def next(): A = self.next()
}
}
|
alexandru/monifu
|
monix-tail/shared/src/main/scala/monix/tail/batches/GenericCursor.scala
|
Scala
|
apache-2.0
| 3,639 |
package org.libss.lift.list
import org.libss.util.reflection.{FieldPath, FieldPathHelpers}
/**
* Created by Kaa
* on 29.06.2016 at 00:28.
*/
/**
* Base trait for Filtering descriptors: filtering form fields generating and values handling
*
* @tparam E Type of filtering object, corresponding to implementing descriptor
*/
trait FilteringDescriptor[E] extends FieldPathHelpers {
def prefix: Option[String] = None
implicit def filteringControlIdGenFun = (fPath: FieldPath[_]) => prefix.getOrElse("") + fPath.toFieldId
def filteringFields: Seq[FilteringField[E, _]]
def updateFilteringObjectWith(entity: E, fields: Seq[FilteringField[E, _]])
def toStringMap: (Seq[FilteringField[E, _]]) => Map[String, String]
def updateFromMap(filterControls: Seq[FilteringField[E, _]], params: Map[String, String])
def updateControlsWith(filterControls: Seq[FilteringField[E, _]], entity: E)
}
|
kanischev/libss
|
libss-web/src/main/scala/org/libss/lift/list/FilteringDescriptor.scala
|
Scala
|
apache-2.0
| 913 |
/*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.feel.impl.builtin
import org.camunda.feel.impl.FeelIntegrationTest
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
import org.camunda.feel.syntaxtree._
import scala.math.BigDecimal.int2bigDecimal
/**
* @author Philipp
*/
class BuiltinStringFunctionsTest
extends AnyFlatSpec
with Matchers
with FeelIntegrationTest {
"A substring() function" should "return string with _ characters" in {
eval(""" substring("foobar",3) """) should be(ValString("obar"))
}
it should "return string with _ characters starting at _" in {
eval(""" substring("foobar",3,3) """) should be(ValString("oba"))
}
it should "return string with _ characters starting at negative _" in {
eval(""" substring("foobar",-2,1) """) should be(ValString("a"))
}
it should "be invoked with named parameters" in {
eval(""" substring(string: "foobar", start position:3) """) should be(
ValString("obar"))
}
"A string length() function" should "return the length of a String" in {
eval(""" string length("foo") """) should be(ValNumber(3))
}
"A upper case() function" should "return uppercased String" in {
eval(""" upper case("aBc4") """) should be(ValString("ABC4"))
}
"A lower case() function" should "return lowercased String" in {
eval(""" lower case("aBc4") """) should be(ValString("abc4"))
}
"A substring before() function" should "return substring before match" in {
eval(""" substring before("foobar", "bar") """) should be(ValString("foo"))
eval(""" substring before("foobar", "xyz") """) should be(ValString(""))
}
"A substring after() function" should "return substring after match" in {
eval(""" substring after("foobar", "ob") """) should be(ValString("ar"))
eval(""" substring after("", "a") """) should be(ValString(""))
eval(""" substring after("foo", "") """) should be(ValString("foo"))
}
"A replace() function" should "replace a String" in {
eval(""" replace("abcd", "(ab)|(a)", "[1=$1][2=$2]") """) should be(
ValString("[1=ab][2=]cd"))
}
it should "replace a String with regex pattern" in (eval(
""" replace("0123456789", "(\\d{3})(\\d{3})(\\d{4})", "($1) $2-$3") """) should be(
ValString("(012) 345-6789")))
it should "return null if the pattern is invalid" in {
eval(""" replace("abc", "([a-z)", "$1") """) should be(ValNull)
}
"A contains() function" should "return if contains the match" in {
eval(""" contains("foobar", "ob") """) should be(ValBoolean(true))
eval(""" contains("foobar", "of") """) should be(ValBoolean(false))
}
"A starts with() function" should "return if starts with match" in {
eval(""" starts with("foobar", "fo") """) should be(ValBoolean(true))
eval(""" starts with("foobar", "ba") """) should be(ValBoolean(false))
}
"A ends with() function" should "return if ends with match" in {
eval(""" ends with("foobar", "r") """) should be(ValBoolean(true))
eval(""" ends with("foobar", "o") """) should be(ValBoolean(false))
}
"A matches() function" should "return if String matches a pattern" in {
eval(""" matches("foobar", "^fo*b") """) should be(ValBoolean(true))
eval(""" matches("foobar", "^fo*z") """) should be(ValBoolean(false))
}
it should "return null if the pattern is invalid" in {
eval(""" matches("abc", "[a-z") """) should be(ValNull)
}
"A split() function" should "return a list of substrings" in {
eval(""" split("John Doe", "\\s") """) should be(
ValList(List(ValString("John"), ValString("Doe"))))
eval(""" split("a;b;c;;", ";") """) should be(
ValList(
List(ValString("a"),
ValString("b"),
ValString("c"),
ValString(""),
ValString(""))))
}
"An extract() function" should "return a list of strings matching a pattern" in {
eval(""" extract("this is foobar and folbar", "fo[a-z]*") """) should be(
ValList(List(ValString("foobar"), ValString("folbar"))))
eval(""" extract("nothing", "fo[a-z]*") """) should be(ValList(List()))
eval(""" extract("This is fobbar!", "fo[a-z]*") """) should be(
ValList(List(ValString("fobbar"))))
}
it should "return null if the pattern is invalid" in {
eval(""" extract("abc", "[a-z") """) should be(ValNull)
}
}
|
camunda/feel-scala
|
src/test/scala/org/camunda/feel/impl/builtin/BuiltinStringFunctionsTest.scala
|
Scala
|
apache-2.0
| 5,191 |
/*
* Copyright (c) 2014 -2015 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression
package proxies.phases
import scala.reflect.runtime.universe
import scala.tools.reflect.ToolBox
/**
* Implement all mock lambdas created during `MockTypedLambda` phase
*
* Transforms:
* {{{
* list.map[Int, List[Int]](org.scalaide.debug.internal.expression.context.JdiContext.placeholderFunction1[Int](
* "<random-name-of-compiled-lambda>", collection.this.Seq.apply[Int](int)
* ))(immutable.this.List.canBuildFrom[Int])
* }}}
* into:
* {{{
* list.map[Int, List[Int]](__context.newInstance(
* "<random-name-of-compiled-lambda>", Seq.apply(int)
* ))(immutable.this.List.canBuildFrom[Int])
* }}}
*/
case class ImplementTypedLambda(toolbox: ToolBox[universe.type])
extends AnonymousFunctionSupport[AfterTypecheck] {
import toolbox.u._
protected def transformSingleTree(baseTree: Tree, transformFurther: (Tree) => Tree): Tree = baseTree match {
case PlaceholderFunction(proxyType, _, closureArgs) =>
lambdaProxy(proxyType, closureArgs)
case _ => transformFurther(baseTree)
}
}
|
Kwestor/scala-ide
|
org.scala-ide.sdt.debug.expression/src/org/scalaide/debug/internal/expression/proxies/phases/ImplementTypedLambda.scala
|
Scala
|
bsd-3-clause
| 1,148 |
package org.velocity4s.context
import org.apache.velocity.context.Context
trait ScalaContext extends Context {
def keys: Iterable[String]
def +=(elem: (String, AnyRef), elems: (String, AnyRef)*): this.type
def ++=(elems: Iterable[(String, AnyRef)]): this.type
def -=(key: String, keys: String*): this.type
def --=(keys: Iterable[String]): this.type
}
|
kazuhira-r/velocity4s
|
velocity4s/src/main/scala/org/velocity4s/context/ScalaContext.scala
|
Scala
|
apache-2.0
| 367 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import kafka.utils._
import kafka.api.RequestKeys
trait SocketServerStatsMBean {
def getProduceRequestsPerSecond: Double
def getFetchRequestsPerSecond: Double
def getAvgProduceRequestMs: Double
def getMaxProduceRequestMs: Double
def getAvgFetchRequestMs: Double
def getMaxFetchRequestMs: Double
def getBytesReadPerSecond: Double
def getBytesWrittenPerSecond: Double
def getNumFetchRequests: Long
def getNumProduceRequests: Long
def getTotalBytesRead: Long
def getTotalBytesWritten: Long
def getTotalFetchRequestMs: Long
def getTotalProduceRequestMs: Long
}
@threadsafe
class SocketServerStats(val monitorDurationNs: Long, val time: Time) extends SocketServerStatsMBean {
def this(monitorDurationNs: Long) = this(monitorDurationNs, SystemTime)
val produceTimeStats = new SnapshotStats(monitorDurationNs)
val fetchTimeStats = new SnapshotStats(monitorDurationNs)
val produceBytesStats = new SnapshotStats(monitorDurationNs)
val fetchBytesStats = new SnapshotStats(monitorDurationNs)
def recordRequest(requestTypeId: Short, durationNs: Long) {
requestTypeId match {
case r if r == RequestKeys.Produce || r == RequestKeys.MultiProduce =>
produceTimeStats.recordRequestMetric(durationNs)
case r if r == RequestKeys.Fetch || r == RequestKeys.MultiFetch =>
fetchTimeStats.recordRequestMetric(durationNs)
case _ => /* not collecting; let go */
}
}
def recordBytesWritten(bytes: Int): Unit = fetchBytesStats.recordRequestMetric(bytes)
def recordBytesRead(bytes: Int): Unit = produceBytesStats.recordRequestMetric(bytes)
def getProduceRequestsPerSecond: Double = produceTimeStats.getRequestsPerSecond
def getFetchRequestsPerSecond: Double = fetchTimeStats.getRequestsPerSecond
def getAvgProduceRequestMs: Double = produceTimeStats.getAvgMetric / (1000.0 * 1000.0)
def getMaxProduceRequestMs: Double = produceTimeStats.getMaxMetric / (1000.0 * 1000.0)
def getAvgFetchRequestMs: Double = fetchTimeStats.getAvgMetric / (1000.0 * 1000.0)
def getMaxFetchRequestMs: Double = fetchTimeStats.getMaxMetric / (1000.0 * 1000.0)
def getBytesReadPerSecond: Double = produceBytesStats.getAvgMetric
def getBytesWrittenPerSecond: Double = fetchBytesStats.getAvgMetric
def getNumFetchRequests: Long = fetchTimeStats.getNumRequests
def getNumProduceRequests: Long = produceTimeStats.getNumRequests
def getTotalBytesRead: Long = produceBytesStats.getTotalMetric
def getTotalBytesWritten: Long = fetchBytesStats.getTotalMetric
def getTotalFetchRequestMs: Long = fetchTimeStats.getTotalMetric
def getTotalProduceRequestMs: Long = produceTimeStats.getTotalMetric
}
|
piavlo/operations-debs-kafka
|
core/src/main/scala/kafka/network/SocketServerStats.scala
|
Scala
|
apache-2.0
| 3,506 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.encoders
import scala.util.Random
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.{RandomDataGenerator, Row}
import org.apache.spark.sql.catalyst.util.{ArrayData, GenericArrayData}
import org.apache.spark.sql.types._
@SQLUserDefinedType(udt = classOf[ExamplePointUDT])
class ExamplePoint(val x: Double, val y: Double) extends Serializable {
override def hashCode: Int = 41 * (41 + x.toInt) + y.toInt
override def equals(that: Any): Boolean = {
if (that.isInstanceOf[ExamplePoint]) {
val e = that.asInstanceOf[ExamplePoint]
(this.x == e.x || (this.x.isNaN && e.x.isNaN) || (this.x.isInfinity && e.x.isInfinity)) &&
(this.y == e.y || (this.y.isNaN && e.y.isNaN) || (this.y.isInfinity && e.y.isInfinity))
} else {
false
}
}
}
/**
* User-defined type for [[ExamplePoint]].
*/
class ExamplePointUDT extends UserDefinedType[ExamplePoint] {
override def sqlType: DataType = ArrayType(DoubleType, false)
override def pyUDT: String = "pyspark.sql.tests.ExamplePointUDT"
override def serialize(p: ExamplePoint): GenericArrayData = {
val output = new Array[Any](2)
output(0) = p.x
output(1) = p.y
new GenericArrayData(output)
}
override def deserialize(datum: Any): ExamplePoint = {
datum match {
case values: ArrayData =>
if (values.numElements() > 1) {
new ExamplePoint(values.getDouble(0), values.getDouble(1))
} else {
val random = new Random()
new ExamplePoint(random.nextDouble(), random.nextDouble())
}
}
}
override def userClass: Class[ExamplePoint] = classOf[ExamplePoint]
private[spark] override def asNullable: ExamplePointUDT = this
}
class RowEncoderSuite extends SparkFunSuite {
private val structOfString = new StructType().add("str", StringType)
private val structOfUDT = new StructType().add("udt", new ExamplePointUDT, false)
private val arrayOfString = ArrayType(StringType)
private val arrayOfNull = ArrayType(NullType)
private val mapOfString = MapType(StringType, StringType)
private val arrayOfUDT = ArrayType(new ExamplePointUDT, false)
encodeDecodeTest(
new StructType()
.add("null", NullType)
.add("boolean", BooleanType)
.add("byte", ByteType)
.add("short", ShortType)
.add("int", IntegerType)
.add("long", LongType)
.add("float", FloatType)
.add("double", DoubleType)
.add("decimal", DecimalType.SYSTEM_DEFAULT)
.add("string", StringType)
.add("binary", BinaryType)
.add("date", DateType)
.add("timestamp", TimestampType)
.add("udt", new ExamplePointUDT))
encodeDecodeTest(
new StructType()
.add("arrayOfNull", arrayOfNull)
.add("arrayOfString", arrayOfString)
.add("arrayOfArrayOfString", ArrayType(arrayOfString))
.add("arrayOfArrayOfInt", ArrayType(ArrayType(IntegerType)))
.add("arrayOfMap", ArrayType(mapOfString))
.add("arrayOfStruct", ArrayType(structOfString))
.add("arrayOfUDT", arrayOfUDT))
encodeDecodeTest(
new StructType()
.add("mapOfIntAndString", MapType(IntegerType, StringType))
.add("mapOfStringAndArray", MapType(StringType, arrayOfString))
.add("mapOfArrayAndInt", MapType(arrayOfString, IntegerType))
.add("mapOfArray", MapType(arrayOfString, arrayOfString))
.add("mapOfStringAndStruct", MapType(StringType, structOfString))
.add("mapOfStructAndString", MapType(structOfString, StringType))
.add("mapOfStruct", MapType(structOfString, structOfString)))
encodeDecodeTest(
new StructType()
.add("structOfString", structOfString)
.add("structOfStructOfString", new StructType().add("struct", structOfString))
.add("structOfArray", new StructType().add("array", arrayOfString))
.add("structOfMap", new StructType().add("map", mapOfString))
.add("structOfArrayAndMap",
new StructType().add("array", arrayOfString).add("map", mapOfString))
.add("structOfUDT", structOfUDT))
test("encode/decode decimal type") {
val schema = new StructType()
.add("int", IntegerType)
.add("string", StringType)
.add("double", DoubleType)
.add("java_decimal", DecimalType.SYSTEM_DEFAULT)
.add("scala_decimal", DecimalType.SYSTEM_DEFAULT)
.add("catalyst_decimal", DecimalType.SYSTEM_DEFAULT)
val encoder = RowEncoder(schema).resolveAndBind()
val javaDecimal = new java.math.BigDecimal("1234.5678")
val scalaDecimal = BigDecimal("1234.5678")
val catalystDecimal = Decimal("1234.5678")
val input = Row(100, "test", 0.123, javaDecimal, scalaDecimal, catalystDecimal)
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
// Decimal will be converted back to Java BigDecimal when decoding.
assert(convertedBack.getDecimal(3).compareTo(javaDecimal) == 0)
assert(convertedBack.getDecimal(4).compareTo(scalaDecimal.bigDecimal) == 0)
assert(convertedBack.getDecimal(5).compareTo(catalystDecimal.toJavaBigDecimal) == 0)
}
test("RowEncoder should preserve decimal precision and scale") {
val schema = new StructType().add("decimal", DecimalType(10, 5), false)
val encoder = RowEncoder(schema).resolveAndBind()
val decimal = Decimal("67123.45")
val input = Row(decimal)
val row = encoder.toRow(input)
assert(row.toSeq(schema).head == decimal)
}
test("RowEncoder should preserve schema nullability") {
val schema = new StructType().add("int", IntegerType, nullable = false)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType == IntegerType)
assert(encoder.serializer.head.nullable == false)
}
test("RowEncoder should preserve nested column name") {
val schema = new StructType().add(
"struct",
new StructType()
.add("i", IntegerType, nullable = false)
.add(
"s",
new StructType().add("int", IntegerType, nullable = false),
nullable = false),
nullable = false)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType ==
new StructType()
.add("i", IntegerType, nullable = false)
.add(
"s",
new StructType().add("int", IntegerType, nullable = false),
nullable = false))
assert(encoder.serializer.head.nullable == false)
}
test("RowEncoder should support primitive arrays") {
val schema = new StructType()
.add("booleanPrimitiveArray", ArrayType(BooleanType, false))
.add("bytePrimitiveArray", ArrayType(ByteType, false))
.add("shortPrimitiveArray", ArrayType(ShortType, false))
.add("intPrimitiveArray", ArrayType(IntegerType, false))
.add("longPrimitiveArray", ArrayType(LongType, false))
.add("floatPrimitiveArray", ArrayType(FloatType, false))
.add("doublePrimitiveArray", ArrayType(DoubleType, false))
val encoder = RowEncoder(schema).resolveAndBind()
val input = Seq(
Array(true, false),
Array(1.toByte, 64.toByte, Byte.MaxValue),
Array(1.toShort, 255.toShort, Short.MaxValue),
Array(1, 10000, Int.MaxValue),
Array(1.toLong, 1000000.toLong, Long.MaxValue),
Array(1.1.toFloat, 123.456.toFloat, Float.MaxValue),
Array(11.1111, 123456.7890123, Double.MaxValue)
)
val row = encoder.toRow(Row.fromSeq(input))
val convertedBack = encoder.fromRow(row)
input.zipWithIndex.map { case (array, index) =>
assert(convertedBack.getSeq(index) === array)
}
}
test("RowEncoder should support array as the external type for ArrayType") {
val schema = new StructType()
.add("array", ArrayType(IntegerType))
.add("nestedArray", ArrayType(ArrayType(StringType)))
.add("deepNestedArray", ArrayType(ArrayType(ArrayType(LongType))))
val encoder = RowEncoder(schema).resolveAndBind()
val input = Row(
Array(1, 2, null),
Array(Array("abc", null), null),
Array(Seq(Array(0L, null), null), null))
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
assert(convertedBack.getSeq(0) == Seq(1, 2, null))
assert(convertedBack.getSeq(1) == Seq(Seq("abc", null), null))
assert(convertedBack.getSeq(2) == Seq(Seq(Seq(0L, null), null), null))
}
test("RowEncoder should throw RuntimeException if input row object is null") {
val schema = new StructType().add("int", IntegerType)
val encoder = RowEncoder(schema)
val e = intercept[RuntimeException](encoder.toRow(null))
assert(e.getMessage.contains("Null value appeared in non-nullable field"))
assert(e.getMessage.contains("top level row object"))
}
test("RowEncoder should validate external type") {
val e1 = intercept[RuntimeException] {
val schema = new StructType().add("a", IntegerType)
val encoder = RowEncoder(schema)
encoder.toRow(Row(1.toShort))
}
assert(e1.getMessage.contains("java.lang.Short is not a valid external type"))
val e2 = intercept[RuntimeException] {
val schema = new StructType().add("a", StringType)
val encoder = RowEncoder(schema)
encoder.toRow(Row(1))
}
assert(e2.getMessage.contains("java.lang.Integer is not a valid external type"))
val e3 = intercept[RuntimeException] {
val schema = new StructType().add("a",
new StructType().add("b", IntegerType).add("c", StringType))
val encoder = RowEncoder(schema)
encoder.toRow(Row(1 -> "a"))
}
assert(e3.getMessage.contains("scala.Tuple2 is not a valid external type"))
val e4 = intercept[RuntimeException] {
val schema = new StructType().add("a", ArrayType(TimestampType))
val encoder = RowEncoder(schema)
encoder.toRow(Row(Array("a")))
}
assert(e4.getMessage.contains("java.lang.String is not a valid external type"))
}
for {
elementType <- Seq(IntegerType, StringType)
containsNull <- Seq(true, false)
nullable <- Seq(true, false)
} {
test("RowEncoder should preserve array nullability: " +
s"ArrayType($elementType, containsNull = $containsNull), nullable = $nullable") {
val schema = new StructType().add("array", ArrayType(elementType, containsNull), nullable)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType == ArrayType(elementType, containsNull))
assert(encoder.serializer.head.nullable == nullable)
}
}
for {
keyType <- Seq(IntegerType, StringType)
valueType <- Seq(IntegerType, StringType)
valueContainsNull <- Seq(true, false)
nullable <- Seq(true, false)
} {
test("RowEncoder should preserve map nullability: " +
s"MapType($keyType, $valueType, valueContainsNull = $valueContainsNull), " +
s"nullable = $nullable") {
val schema = new StructType().add(
"map", MapType(keyType, valueType, valueContainsNull), nullable)
val encoder = RowEncoder(schema).resolveAndBind()
assert(encoder.serializer.length == 1)
assert(encoder.serializer.head.dataType == MapType(keyType, valueType, valueContainsNull))
assert(encoder.serializer.head.nullable == nullable)
}
}
private def encodeDecodeTest(schema: StructType): Unit = {
test(s"encode/decode: ${schema.simpleString}") {
val encoder = RowEncoder(schema).resolveAndBind()
val inputGenerator = RandomDataGenerator.forType(schema, nullable = false).get
var input: Row = null
try {
for (_ <- 1 to 5) {
input = inputGenerator.apply().asInstanceOf[Row]
val row = encoder.toRow(input)
val convertedBack = encoder.fromRow(row)
assert(input == convertedBack)
}
} catch {
case e: Exception =>
fail(
s"""
|schema: ${schema.simpleString}
|input: ${input}
""".stripMargin, e)
}
}
}
}
|
akopich/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/encoders/RowEncoderSuite.scala
|
Scala
|
apache-2.0
| 12,912 |
package com.twitter.finagle.redis.filter
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finagle.redis.protocol.{Command, Reply}
import com.twitter.finagle.redis.util.BufToString
import com.twitter.finagle.tracing.{Annotation, Trace}
import com.twitter.util.Future
private[redis] class RedisTracingFilter extends SimpleFilter[Command, Reply] {
def apply(command: Command, service: Service[Command, Reply]): Future[Reply] = {
val trace = Trace()
if (trace.isActivelyTracing) {
trace.recordServiceName("redis")
trace.recordRpc(BufToString(command.name))
trace.record(Annotation.ClientSend)
service(command).ensure(trace.record(Annotation.ClientRecv))
} else service(command)
}
}
|
luciferous/finagle
|
finagle-redis/src/main/scala/com/twitter/finagle/redis/filter/RedisTracingFilter.scala
|
Scala
|
apache-2.0
| 743 |
package org.locationtech.geomesa.core.process.tube
import java.util.Date
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom._
import org.geotools.data.Query
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureSource}
import org.geotools.data.store.{EmptyFeatureCollection, ReTypingFeatureCollection}
import org.geotools.factory.CommonFactoryFinder
import org.geotools.feature.visitor._
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.geotools.util.NullProgressListener
import org.locationtech.geomesa.core.data.AccumuloFeatureCollection
import org.locationtech.geomesa.core.index.Constants
import org.locationtech.geomesa.core.process.tube.GapFill.GapFill
import org.locationtech.geomesa.core.util.UniqueMultiCollection
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.opengis.feature.Feature
import org.opengis.filter.Filter
import scala.collection.JavaConversions._
@DescribeProcess(
title = "Tube Select",
description = "Performs a tube select on a Geomesa feature collection based on another feature collection"
)
class TubeSelectProcess extends Logging {
@DescribeResult(description = "Output feature collection")
def execute(
@DescribeParameter(
name = "tubeFeatures",
description = "Input feature collection (must have geometry and datetime)")
tubeFeatures: SimpleFeatureCollection,
@DescribeParameter(
name = "featureCollection",
description = "The data set to query for matching features")
featureCollection: SimpleFeatureCollection,
@DescribeParameter(
name = "filter",
min = 0,
description = "The filter to apply to the featureCollection")
filter: Filter,
@DescribeParameter(
name = "maxSpeed",
min = 0,
description = "Max speed of the object in m/s for nofill & line gapfill methods")
maxSpeed: java.lang.Long,
@DescribeParameter(
name = "maxTime",
min = 0,
description = "Time as seconds for nofill & line gapfill methods")
maxTime: java.lang.Long,
@DescribeParameter(
name = "bufferSize",
min = 0,
description = "Buffer size in meters to use instead of maxSpeed/maxTime calculation")
bufferSize: java.lang.Double,
@DescribeParameter(
name = "maxBins",
min = 0,
description = "Number of bins to use for breaking up query into individual queries")
maxBins: java.lang.Integer,
@DescribeParameter(
name = "gapFill",
min = 0,
description = "Method of filling gap (nofill, line)")
gapFill: String
): SimpleFeatureCollection = {
logger.info("Tube selecting on collection type "+featureCollection.getClass.getName)
// assume for now that firstFeatures is a singleton collection
val tubeVisitor = new TubeVisitor(
tubeFeatures,
featureCollection,
Option(filter).getOrElse(Filter.INCLUDE),
Option(maxSpeed).getOrElse(0L).asInstanceOf[Long],
Option(maxTime).getOrElse(0L).asInstanceOf[Long],
Option(bufferSize).getOrElse(0.0).asInstanceOf[Double],
Option(maxBins).getOrElse(0).asInstanceOf[Int],
Option(gapFill).map(GapFill.withName(_)).getOrElse(GapFill.NOFILL))
if(!featureCollection.isInstanceOf[AccumuloFeatureCollection]) {
logger.warn("The provided feature collection type may not support tubing: "+featureCollection.getClass.getName)
}
if(featureCollection.isInstanceOf[ReTypingFeatureCollection]) {
logger.warn("WARNING: layer name in geoserver must match feature type name in geomesa")
}
featureCollection.accepts(tubeVisitor, new NullProgressListener)
tubeVisitor.getResult.asInstanceOf[TubeResult].results
}
}
object GapFill extends Enumeration{
type GapFill = Value
val NOFILL = Value("nofill")
val LINE = Value("line")
}
class TubeVisitor(val tubeFeatures: SimpleFeatureCollection,
val featureCollection: SimpleFeatureCollection,
val filter: Filter = Filter.INCLUDE,
val maxSpeed: Long,
val maxTime: Long,
val bufferSize: Double,
val maxBins: Int,
val gapFill: GapFill = GapFill.NOFILL)
extends FeatureCalc
with Logging {
var resultCalc: TubeResult = new TubeResult(new EmptyFeatureCollection(featureCollection.getSchema))
def visit(feature: Feature): Unit = {}
override def getResult: CalcResult = resultCalc
def setValue(r: SimpleFeatureCollection) = resultCalc = TubeResult(r)
val ff = CommonFactoryFinder.getFilterFactory2
val bufferDistance = if(bufferSize > 0) bufferSize else maxSpeed * maxTime
def tubeSelect(source: SimpleFeatureSource, query: Query): SimpleFeatureCollection = {
logger.info("Visiting source type: "+source.getClass.getName)
val geomProperty = ff.property(source.getSchema.getGeometryDescriptor.getName)
val dateProperty = ff.property(source.getSchema.getUserData.get(Constants.SF_PROPERTY_START_TIME).asInstanceOf[String])
logger.debug("Querying with date property: "+dateProperty)
logger.debug("Querying with geometry property: "+geomProperty)
// Create a time binned set of tube features with no gap filling
val tubeBuilder = gapFill match {
case GapFill.LINE => new LineGapFill(tubeFeatures, bufferDistance, maxBins)
case _ => new NoGapFill(tubeFeatures, bufferDistance, maxBins)
}
val tube = tubeBuilder.createTube
val queryResults = tube.map { sf =>
val sfMin = tubeBuilder.getStartTime(sf).getTime
val minDate = new Date(sfMin - maxTime*1000)
val sfMax = tubeBuilder.getEndTime(sf).getTime
val maxDate = new Date(sfMax + maxTime*1000)
val dtg1 = ff.greater(dateProperty, ff.literal(minDate))
val dtg2 = ff.less(dateProperty, ff.literal(maxDate))
val geom = sf.getDefaultGeometry.asInstanceOf[Geometry]
// Eventually these can be combined into OR queries and the QueryPlanner can create multiple Accumulo Ranges
// Buf for now we issue multiple queries
val geoms = (0 until geom.getNumGeometries).map { i => geom.getGeometryN(i) }
geoms.flatMap { g =>
val geomFilter = ff.intersects(geomProperty, ff.literal(g))
val combinedFilter = ff.and(List(query.getFilter, geomFilter, dtg1, dtg2, filter))
source.getFeatures(combinedFilter).features
}
}
// Time slices may not be disjoint so we have to buffer results and dedup for now
new UniqueMultiCollection(source.getSchema, queryResults)
}
}
case class TubeResult(results: SimpleFeatureCollection) extends AbstractCalcResult
|
jwkessi/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/process/tube/TubeSelectProcess.scala
|
Scala
|
apache-2.0
| 7,397 |
package com.github.mauricio.async.db.mysql.message.client
import io.netty.buffer.ByteBuf
case class SendLongDataMessage(
statementId: Array[Byte],
value: ByteBuf,
paramId: Int
)
|
dripower/postgresql-async
|
mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/message/client/SendLongDataMessage.scala
|
Scala
|
apache-2.0
| 186 |
package de.frosner.broccoli.websocket
import scala.concurrent.duration.Duration
/**
* Configuration for Broccoli's websocket
*
* @param cacheTimeout The timeout for the websocket message cache
*/
final case class WebSocketConfiguration(cacheTimeout: Duration)
|
FRosner/cluster-broccoli
|
server/src/main/scala/de/frosner/broccoli/websocket/WebSocketConfiguration.scala
|
Scala
|
apache-2.0
| 270 |
def problem1(limit: Int, factors: List[Int]): Int = {
def cond(n: Int): Boolean = {
factors.exists(x => n % x == 0})
}
def allInts: Stream[Int] = {
1.until(limit).toStream
}
def onlyMults: Stream[Int] = {
allInts.filter(cond)
}
onlyMults.sum
}
|
ptwales/Project-Euler
|
Scala/problem01.scala
|
Scala
|
mit
| 276 |
sealed abstract class Z
object Z {
object Z0 extends Z
case class Z1() extends Z
object Z2 extends Z
case class Z3() extends Z
object Z4 extends Z
case class Z5() extends Z
object Z6 extends Z
case class Z7() extends Z
object Z8 extends Z
case class Z9() extends Z
object Z10 extends Z
case class Z11() extends Z
object Z12 extends Z
case class Z13() extends Z
object Z14 extends Z
case class Z15() extends Z
object Z16 extends Z
case class Z17() extends Z
object Z18 extends Z
case class Z19() extends Z
}
object Test {
import Z._
def foo(z: Z) = z match {
case Z0 | Z1() | Z2 | Z3() | Z4 | Z5() | Z6 | Z7() | Z8 | Z9() |
Z10 | Z12 | Z13() | Z14 | Z15() | Z16 | Z17() | Z18 | Z19()
=>
}
}
|
som-snytt/dotty
|
tests/patmat/t6582_exhaust_big.scala
|
Scala
|
apache-2.0
| 858 |
package scalismo.mesh
import scalismo.ScalismoTestSuite
import scalismo.common.PointId
import scalismo.geometry.{_3D, Point, Point3D}
import scalismo.mesh.boundingSpheres.BoundingSphereHelpers
import scalismo.utils.Random
import vtk.{vtkTetra, vtkTriangle}
class BarycentricCoordinateTests extends ScalismoTestSuite {
implicit val rng: Random = Random(1024L)
val epsilon = 1.0e-8
def genPoint()(implicit rng: Random) = Point3D(
rng.scalaRandom.nextDouble() * 20 - 10,
rng.scalaRandom.nextDouble() * 20 - 10,
rng.scalaRandom.nextDouble() * 20 - 10
)
def generatePositiveOrientedSingleTetrahedronMesh(): TetrahedralMesh3D = {
val points = {
val points = IndexedSeq.fill(4)(genPoint)
if (BoundingSphereHelpers.calculateSignedVolume(points(0).toVector,
points(1).toVector,
points(2).toVector,
points(3).toVector) < 0) {
IndexedSeq(points(1), points(2), points(3), points(0))
} else
points
}
TetrahedralMesh3D(points,
TetrahedralList(IndexedSeq(TetrahedralCell(PointId(0), PointId(1), PointId(2), PointId(3)))))
}
def getBarycentricCoordinatesFromVTK(a: Point[_3D],
b: Point[_3D],
c: Point[_3D],
point: Point[_3D]): IndexedSeq[Double] = {
val barycentricCoordinates = new Array[Double](3)
val vtkTriangle = new vtkTriangle()
vtkTriangle.BarycentricCoords(point.toArray, a.toArray, b.toArray, c.toArray, barycentricCoordinates)
vtkTriangle.Delete()
barycentricCoordinates.toIndexedSeq
}
def getBarycentricCoordinatesFromVTK(a: Point[_3D],
b: Point[_3D],
c: Point[_3D],
d: Point[_3D],
point: Point[_3D]): IndexedSeq[Double] = {
val barycentricCoordinates = new Array[Double](4)
val vtkTetra = new vtkTetra()
vtkTetra.BarycentricCoords(point.toArray, a.toArray, b.toArray, c.toArray, d.toArray, barycentricCoordinates)
vtkTetra.Delete()
barycentricCoordinates.toIndexedSeq
}
describe("Barycentric coordinates for a triangle") {
it("should calculate the same values as defined for the vertices") {
for (j <- 0 until 10) {
val a = genPoint()
val b = genPoint()
val c = genPoint()
{
val bc = BarycentricCoordinates.pointInTriangle3D(a, a, b, c)
val bcDef = BarycentricCoordinates.v0
bc.a should be(bcDef.a +- epsilon)
bc.b should be(bcDef.b +- epsilon)
bc.c should be(bcDef.c +- epsilon)
}
{
val bc = BarycentricCoordinates.pointInTriangle3D(a, a, b, c)
val bcDef = BarycentricCoordinates.v0
bc.a should be(bcDef.a +- epsilon)
bc.b should be(bcDef.b +- epsilon)
bc.c should be(bcDef.c +- epsilon)
}
{
val bc = BarycentricCoordinates.pointInTriangle3D(a, a, b, c)
val bcDef = BarycentricCoordinates.v0
bc.a should be(bcDef.a +- epsilon)
bc.b should be(bcDef.b +- epsilon)
bc.c should be(bcDef.c +- epsilon)
}
}
}
it("should return the same coordinates for a point as VTK") {
for (j <- 0 until 10) {
val a = genPoint()
val b = genPoint()
val c = genPoint()
for (i <- 0 until 20) {
val randomBC = BarycentricCoordinates.randomUniform
val randomPointInTriangle =
(a.toVector * randomBC.a + b.toVector * randomBC.b + c.toVector * randomBC.c).toPoint
val bc = BarycentricCoordinates.pointInTriangle3D(randomPointInTriangle, a, b, c)
val bcVTK = getBarycentricCoordinatesFromVTK(a, b, c, randomPointInTriangle)
bc.a should be(bcVTK(0) +- epsilon)
bc.b should be(bcVTK(1) +- epsilon)
bc.c should be(bcVTK(2) +- epsilon)
}
}
}
it("should return the same coordinates in 2d as in 3d for flat triangles") {
for (j <- 0 until 10) {
val a = genPoint.copy(z = 0.0)
val b = genPoint.copy(z = 0.0)
val c = genPoint.copy(z = 0.0)
def to2d(pt: Point[_3D]) = Point(pt.x, pt.y)
for (i <- 0 until 20) {
val randomPoint = genPoint.copy(z = 0.0)
val bc3D = BarycentricCoordinates.pointInTriangle3D(randomPoint, a, b, c)
val bc2D = BarycentricCoordinates.pointInTriangle(to2d(randomPoint), to2d(a), to2d(b), to2d(c))
bc3D.a should be(bc2D.a +- epsilon)
bc3D.b should be(bc2D.b +- epsilon)
bc3D.c should be(bc2D.c +- epsilon)
}
}
}
it("should calculate the barycentric coordinates form the generated point") {
for (j <- 0 until 10) {
val a = genPoint()
val b = genPoint()
val c = genPoint()
for (i <- 0 until 20) {
val randomBC = BarycentricCoordinates.randomUniform
val pointFromBC =
(a.toVector * randomBC.a + b.toVector * randomBC.b + c.toVector * randomBC.c).toPoint
val bc = BarycentricCoordinates.pointInTriangle3D(pointFromBC, a, b, c)
bc.a should be(randomBC.a +- epsilon)
bc.b should be(randomBC.b +- epsilon)
bc.c should be(randomBC.c +- epsilon)
}
}
}
}
describe("Barycentric coordinates for a tetrahedron") {
it("should return the same coordinates for a point as VTK") {
for (j <- 0 until 10) {
val tmesh = generatePositiveOrientedSingleTetrahedronMesh()
val cell = tmesh.cells.head
val a = tmesh.pointSet.point(cell.ptId1)
val b = tmesh.pointSet.point(cell.ptId2)
val c = tmesh.pointSet.point(cell.ptId3)
val d = tmesh.pointSet.point(cell.ptId4)
for (i <- 0 until 20) {
val randomPoint = genPoint()
val bc = BarycentricCoordinates4.pointInTetrahedron(randomPoint, a, b, c, d)
val bcVTK = getBarycentricCoordinatesFromVTK(a, b, c, d, randomPoint)
bc.a should be(bcVTK(0) +- epsilon)
bc.b should be(bcVTK(1) +- epsilon)
bc.c should be(bcVTK(2) +- epsilon)
bc.d should be(bcVTK(3) +- epsilon)
}
}
}
it("should calculate coordinates faster than using VTK") {
val tmesh = generatePositiveOrientedSingleTetrahedronMesh()
val cell = tmesh.cells.head
val a = tmesh.pointSet.point(cell.ptId1)
val b = tmesh.pointSet.point(cell.ptId2)
val c = tmesh.pointSet.point(cell.ptId3)
val d = tmesh.pointSet.point(cell.ptId4)
val N = 10000
val startVTK = System.currentTimeMillis()
for (i <- 0 until N) {
val randomPoint = genPoint()
getBarycentricCoordinatesFromVTK(a, b, c, d, randomPoint)
}
val vtkTime = System.currentTimeMillis() - startVTK
val startScala = System.currentTimeMillis()
for (i <- 0 until N) {
val randomPoint = genPoint()
BarycentricCoordinates4.pointInTetrahedron(randomPoint, a, b, c, d)
}
val scalaTime = System.currentTimeMillis() - startScala
scalaTime should be < vtkTime
}
it("should reconstruct the point from the bc coordinates") {
val tmesh = generatePositiveOrientedSingleTetrahedronMesh()
val cell = tmesh.cells.head
val a = tmesh.pointSet.point(cell.ptId1)
val b = tmesh.pointSet.point(cell.ptId2)
val c = tmesh.pointSet.point(cell.ptId3)
val d = tmesh.pointSet.point(cell.ptId4)
for (i <- 0 until 1000) {
val randomPoint = genPoint()
val bc = BarycentricCoordinates4.pointInTetrahedron(randomPoint, a, b, c, d)
val pt = bc.a *: a.toVector + bc.b *: b.toVector + bc.c *: c.toVector + bc.d *: d.toVector
(randomPoint.toVector - pt).norm should be < 1e-8
}
}
}
}
|
unibas-gravis/scalismo
|
src/test/scala/scalismo/mesh/BarycentricCoordinateTests.scala
|
Scala
|
apache-2.0
| 8,093 |
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
object kmeans_1000g_against_ddd {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("K-Means (1000G against DDD)")
val sc = new SparkContext(conf)
val hdfs_path = "hdfs:/user/ndewit/"
val input_path = hdfs_path + "aligned_both_variants"
val output_path = hdfs_path + "1000G_vs_DDD_Kmeans_results.txt"
val numClusters = if (args.length > 0) { args(0).toInt } else { 2 }
val numIterations = 20
val numRuns = 1
val outliers_limit = if (args.length > 1) { args(1).toInt } else { 0 }
/* ... new cell ... */
import org.apache.spark.mllib.linalg.Vectors
var samples = sc.objectFile[(String, org.apache.spark.mllib.linalg.Vector)](input_path)
var nb_samples = samples.count
/* ... new cell ... */
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}
var check_outliers = Array[String]()
var nb_outliers = check_outliers.size
var predicted_clusters = sc.parallelize(Array[(String, Int)]())
do {
val values_only = samples.values
val kmeans_model = KMeans.train(values_only, numClusters, numIterations, numRuns)
predicted_clusters = samples.mapValues{kmeans_model.predict(_)}.persist
check_outliers = predicted_clusters.
map{ case (patient, cluster) => (cluster, patient) }.
aggregateByKey(scala.collection.mutable.HashSet.empty[String])(_+_, _++_).values.
flatMap{
v =>
if (v.size > outliers_limit) { List("") }
else { v.toList }
}.collect.filter(v => v != "")
nb_outliers = check_outliers.size
samples = samples.filter(s => !check_outliers.contains(s._1))
nb_samples = samples.count
println(nb_outliers + " outliers removed " +
"(" + check_outliers.mkString(", ") + ") " +
": " + nb_samples + " samples remaining.")
} while (nb_outliers > 0 && nb_samples > numClusters)
/* ... new cell ... */
import org.apache.hadoop.fs._
import java.io.BufferedOutputStream
val fs = FileSystem.get(sc.hadoopConfiguration)
class TextFile(file_path : String) {
val physical_file = fs.create(new Path(file_path))
val stream = new BufferedOutputStream(physical_file)
def write(text : String) : Unit = {
stream.write(text.getBytes("UTF-8"))
}
def close() : Unit = {
stream.close()
}
}
val t = new TextFile(output_path)
/* ... new cell ... */
def RID(to_eval : RDD[((String, (Int, Int)), (String, (Int, Int)))]) : Double = {
def choose2(n : Int) : Double = {
return n * (n - 1) / 2;
}
val denom = choose2(nb_samples.toInt) //Denominator of RID is (nb_samples choose 2)
// a : number of pairs in the same cluster in C and in K
// b : number of pairs in different clusters in C and in K
val a = sc.accumulator(0, "Acc a : same cluster in both")
val b = sc.accumulator(0, "Acc b : different cluster in both")
to_eval.foreach{
case ((id1, classes1), (id2, classes2)) =>
if (id1 != id2) {
if (classes1._1 == classes2._1 && classes1._2 == classes2._2) {
a += 1 //Classes match, and they should
}
else if (classes1._1 != classes2._1 && classes1._2 != classes2._2) {
b += 1 //Classes don't match, and they shouldn't
}
}
}
//We divide these counts by two since each pair was counted in both orders (a,b and b,a)
(a.value/2 + b.value/2) / denom
}
val both = predicted_clusters.map{
case (k, predicted) =>
var label = 0
if (k.slice(0,3) == "DDD") { label = 1 }
(k, (label, predicted))
}
val eval_res = RID(both.cartesian(both))
val txt = s"RID = $eval_res"
println(txt)
t.write(txt + "\\n")
predicted_clusters.map{case(a,b) => (b,a)}.
groupByKey.collect.foreach{
case (cluster, list_patients) =>
println("--- Cluster " + cluster.toString + " ---")
t.write("--- Cluster " + cluster.toString + " ---\\n")
list_patients.foreach{
p =>
println(p)
t.write(p + "\\n")
}
}
/* ... new cell ... */
t.close()
}
}
|
neurodev-thesis/notebooks
|
script examples/kmeans_1000g_against_ddd/src/main/scala/kmeans_1000g_against_ddd.scala
|
Scala
|
apache-2.0
| 4,205 |
package app
import org.scalatra.servlet.ScalatraAsyncSupport
import util._
import ControlUtil._
import Implicits._
import service._
import jp.sf.amateras.scalatra.forms._
class SearchController extends SearchControllerBase
with RepositoryService with AccountService with ActivityService
with RepositorySearchService with IssuesService with ReferrerAuthenticator with ScalatraAsyncSupport
trait SearchControllerBase extends ControllerBase { self: RepositoryService
with ActivityService with RepositorySearchService with ReferrerAuthenticator =>
val searchForm = mapping(
"query" -> trim(text(required)),
"owner" -> trim(text(required)),
"repository" -> trim(text(required))
)(SearchForm.apply)
case class SearchForm(query: String, owner: String, repository: String)
post("/search", searchForm){ form =>
redirect(s"/${form.owner}/${form.repository}/search?q=${StringUtil.urlEncode(form.query)}")
}
get("/:owner/:repository/search")(referrersOnly { repository =>
defining(params("q").trim, params.getOrElse("type", "code")){ case (query, target) =>
val page = try {
val i = params.getOrElse("page", "1").toInt
if(i <= 0) 1 else i
} catch {
case e: NumberFormatException => 1
}
target.toLowerCase match {
case "issue" => search.html.issues(
searchIssues(repository.owner, repository.name, query),
countFiles(repository.owner, repository.name, query),
query, page, repository)
case _ => search.html.code(
searchFiles(repository.owner, repository.name, query),
countIssues(repository.owner, repository.name, query),
query, page, repository)
}
}
})
}
|
mqshen/gitbucket
|
src/main/scala/app/SearchController.scala
|
Scala
|
apache-2.0
| 1,740 |
// diversity-maximization: Diversity maximization in Streaming and MapReduce
// Copyright (C) 2016 Matteo Ceccarello <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package it.unipd.dei.diversity
import it.unipd.dei.experiment.Experiment
import it.unipd.dei.diversity.ExperimentUtil.jMap
import org.apache.spark.storage.StorageLevel
import org.apache.spark.{SparkConf, SparkContext}
import org.rogach.scallop.ScallopConf
object MainPoints {
def main(args: Array[String]) {
// Read command line options
val opts = new Conf(args)
opts.verify()
val algorithm = opts.algorithm()
val input = opts.input()
val kList = opts.target().split(",").map{_.toInt}
// The kernel size list is actually optional
val kernelSizeList: Seq[Option[Int]] =
opts.kernelSize.get.map { arg =>
arg.split(",").map({x => Some(x.toInt)}).toSeq
}.getOrElse(Seq(None))
val runs = opts.runs()
val approxRuns = opts.approxRuns()
val computeFarthest = opts.farthest()
val computeMatching = opts.matching()
val directory = opts.directory()
val partitioning = opts.partitioning()
val distance: (Point, Point) => Double = Distance.euclidean
// Set up Spark lazily, it will be initialized only if the algorithm needs it.
lazy val sparkConfig = new SparkConf(loadDefaults = true)
.setAppName("MapReduce coresets")
SerializationUtils.configSerialization(sparkConfig)
lazy val sc = new SparkContext(sparkConfig)
// Cycle through parameter configurations
for {
r <- 0 until runs
k <- kList
kernSize <- kernelSizeList
} {
println(
s"""
|Experiment on $input with:
| k = $k
| k' = $kernSize
""".stripMargin)
val experiment = new Experiment()
.tag("experiment", "Points")
.tag("version", BuildInfo.version)
.tag("git-revision", BuildInfo.gitRevision)
.tag("git-revcount", BuildInfo.gitRevCount)
.tag("git-branch", BuildInfo.gitBranch)
.tag("k", k)
.tag("computeFarthest", computeFarthest)
.tag("computeMatching", computeMatching)
if (kernSize.nonEmpty) {
experiment.tag("kernel-size", kernSize.get)
}
val metadata = SerializationUtils.metadata(input)
for ((k, v) <- metadata) {
experiment.tag(k, v)
}
val dim = metadata("data.dimension").toInt
val n = metadata("data.num-points").toLong
val parallelism = algorithm match {
case "mapreduce" => sc.defaultParallelism
case _ => 1
}
val coreset: Coreset[Point] = algorithm match {
case "mapreduce" =>
if(kernSize.isEmpty) {
throw new IllegalArgumentException("Should specify kernel size on the command line")
}
experiment.tag("parallelism", parallelism)
experiment.tag("partitioning", partitioning)
val inputPoints = SerializationUtils.sequenceFile(sc, input, parallelism)
val points = partitioning match {
case "random" => Partitioning.random(inputPoints, experiment)
case "shuffle" => Partitioning.shuffle(inputPoints, experiment)
case "polar2D" => Partitioning.polar2D(inputPoints, experiment)
case "grid" => Partitioning.grid(inputPoints, experiment)
case "unit-grid" => Partitioning.unitGrid(inputPoints, experiment)
case "radius" => Partitioning.radius(inputPoints, Point.zero(dim), distance, experiment)
case "radius-old" => Partitioning.radiusOld(inputPoints, Point.zero(dim), distance, experiment)
case err => throw new IllegalArgumentException(s"Unknown partitioning scheme $err")
}
Algorithm.mapReduce(points, kernSize.get, k, distance, experiment)
case "streaming" =>
if(kernSize.isEmpty) {
throw new IllegalArgumentException("Should specify kernel size on the command line")
}
val parallelism = sc.defaultParallelism
val points = Partitioning.shuffle(
SerializationUtils.sequenceFile(sc, input, parallelism),
experiment)
.flatMap { pts => pts.iterator }
.persist(StorageLevel.MEMORY_AND_DISK)
val requiredMemory = n*(dim*8 + 4) // in bytes
require(Runtime.getRuntime().maxMemory() < Long.MaxValue)
val availableMemory = Runtime.getRuntime().maxMemory() - (Runtime.getRuntime().totalMemory()-Runtime.getRuntime().freeMemory())
val pointsIterator: Iterator[Point] =
if (requiredMemory < availableMemory) {
println(s"Materializing all the points in RAM (required ${requiredMemory/1048576}m, free ${availableMemory/1048576}m)")
val pts = points.collect()
sc.stop() // we no longer need Spark at this point
pts.iterator
} else {
println("Not enough memory to materialize the points in RAM, streaming from the coreset")
points.toLocalIterator
}
val _coreset = Algorithm.streaming(pointsIterator, k, kernSize.get, distance, experiment)
experiment.append("streaming-implementation",
jMap(
"num-merges" -> _coreset.numRestructurings,
"actual-centers" -> _coreset.kernel.length,
"threshold" -> _coreset.threshold
))
_coreset
case "sequential" =>
val points = SerializationUtils.sequenceFile(input)
Algorithm.sequential(points.toVector, experiment)
case "random" =>
val points = SerializationUtils.sequenceFile(sc, input, sc.defaultParallelism)
Algorithm.random(points, k, distance, experiment)
}
Approximation.approximate(
coreset, k, kernSize.getOrElse(k)*parallelism,
distance, computeFarthest, computeMatching, approxRuns,
Some(pointToRow(distance, dim) _), experiment)
experiment.saveAsJsonFile()
println(experiment.toSimpleString)
}
}
def pointToRow(distance: (Point, Point) => Double, dim: Int)(p: Point)
: Map[String, Any] = Map(
"norm" -> distance(p, Point.zero(dim)),
"point" -> p.toString
)
class Conf(args: Array[String]) extends ScallopConf(args) {
lazy val algorithm = opt[String](default = Some("sequential"))
lazy val partitioning = opt[String](default = Some("random"))
lazy val target = opt[String](required = true)
lazy val kernelSize = opt[String](required = false)
lazy val runs = opt[Int](default = Some(1))
lazy val approxRuns = opt[Int](default = Some(1))
lazy val directory = opt[String](default = Some("/tmp"))
lazy val input = opt[String](required = true)
lazy val farthest = toggle(
default=Some(true),
descrYes = "Compute metrics based on the farthest-point heuristic",
descrNo = "Don't compute metrics based on the farthest-point heuristic")
lazy val matching = toggle(
default=Some(true),
descrYes = "Compute metrics based on the matching heuristic",
descrNo = "Don't compute metrics based on the matching heuristic")
}
}
|
Cecca/diversity-maximization
|
experiments/src/main/scala/it/unipd/dei/diversity/MainPoints.scala
|
Scala
|
gpl-3.0
| 7,861 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
// T is the type of the object that has a Boolean property to verify with an instance of this trait
// This is not a subtype of BeMatcher, because BeMatcher only works after "be", but
// BePropertyMatcher will work after "be", "be a", or "be an"
/**
* Trait extended by matcher objects, which may appear after the word <code>be</code>, that can match against a <code>Boolean</code>
* property. The match will succeed if and only if the <code>Boolean</code> property equals <code>true</code>.
* The object containing the property, which must be of the type specified by the <code>BePropertyMatcher</code>'s type
* parameter <code>T</code>, is passed to the <code>BePropertyMatcher</code>'s
* <code>apply</code> method. The result is a <code>BePropertyMatchResult</code>.
* A <code>BePropertyMatcher</code> is, therefore, a function from the specified type, <code>T</code>, to
* a <code>BePropertyMatchResult</code>.
*
* <p>
* Although <code>BePropertyMatcher</code>
* and <code>Matcher</code> represent similar concepts, they have no inheritance relationship
* because <code>Matcher</code> is intended for use right after <code>should</code> or <code>must</code>
* whereas <code>BePropertyMatcher</code> is intended for use right after <code>be</code>.
* </p>
*
* <p>
* A <code>BePropertyMatcher</code> essentially allows you to write statically typed <code>Boolean</code>
* property assertions similar to the dynamic ones that use symbols:
* </p>
*
* <pre class="stHighlight">
* tempFile should be a ('file) // dynamic: uses reflection
* tempFile should be a (file) // type safe: only works on Files; no reflection used
* </pre>
*
* <p>
* One good way to organize custom matchers is to place them inside one or more traits that
* you can then mix into the suites or specs that need them. Here's an example that
* includes two <code>BePropertyMatcher</code>s:
* </p>
*
* <pre class="stHighlight">
* trait CustomMatchers {
*
* class FileBePropertyMatcher extends BePropertyMatcher[java.io.File] {
* def apply(left: java.io.File) = BePropertyMatchResult(left.isFile, "file")
* }
*
* class DirectoryBePropertyMatcher extends BePropertyMatcher[java.io.File] {
* def apply(left: java.io.File) = BePropertyMatchResult(left.isDirectory, "directory")
* }
*
* val file = new FileBePropertyMatcher
* val directory = new DirectoryBePropertyMatcher
* }
* </pre>
*
* <p>
* Because the type parameter of these two <code>BePropertyMatcher</code>s is <code>java.io.File</code>, they
* can only be used with instances of that type. (The compiler will enforce this.) All they do is create a
* <code>BePropertyMatchResult</code> whose <code>matches</code> field is <code>true</code> if and only if the <code>Boolean</code> property
* is <code>true</code>. The second field, <code>propertyName</code>, is simply the string name of the property.
* The <code>file</code> and <code>directory</code> <code>val</code>s create variables that can be used in
* matcher expressions that test whether a <code>java.io.File</code> is a file or a directory. Here's an example:
* </p>
*
* <pre class="stHighlight">
* class ExampleSpec extends Spec with ShouldMatchers with CustomMatchers {
*
* describe("A temp file") {
*
* it("should be a file, not a directory") {
*
* val tempFile = java.io.File.createTempFile("delete", "me")
*
* try {
* tempFile should be a (file)
* tempFile should not be a (directory)
* }
* finally {
* tempFile.delete()
* }
* }
* }
* }
* </pre>
*
* <p>
* These matches should succeed, but if for example the first match, <code>tempFile should be a (file)</code>, were to fail, you would get an error message like:
* </p>
*
* <pre class="stExamples">
* /tmp/delme1234me was not a file
* </pre>
*
* <p>
* For more information on <code>BePropertyMatchResult</code> and the meaning of its fields, please
* see the documentation for <a href="BePropertyMatchResult.html"><code>BePropertyMatchResult</code></a>. To understand why <code>BePropertyMatcher</code>
* is contravariant in its type parameter, see the section entitled "Matcher's variance" in the
* documentation for <a href="Matcher.html"><code>Matcher</code></a>.
* </p>
*
* @author Bill Venners
*/
trait BePropertyMatcher[-T] extends Function1[T, BePropertyMatchResult] {
thisBePropertyMatcher =>
/**
* Check to see if a <code>Boolean</code> property on the specified object, <code>objectWithProperty</code>, matches its
* expected value, and report the result in
* the returned <code>BePropertyMatchResult</code>. The <code>objectWithProperty</code> is
* usually the value to the left of a <code>should</code> or <code>must</code> invocation. For example, <code>tempFile</code>
* would be passed as the <code>objectWithProperty</code> in:
*
* <pre class="stHighlight">
* tempFile should be a (file)
* </pre>
*
* @param objectWithProperty the object with the <code>Boolean</code> property against which to match
* @return the <code>BePropertyMatchResult</code> that represents the result of the match
*/
def apply(objectWithProperty: T): BePropertyMatchResult
/**
* Compose this <code>BePropertyMatcher</code> with the passed function, returning a new <code>BePropertyMatcher</code>.
*
* <p>
* This method overrides <code>compose</code> on <code>Function1</code> to
* return a more specific function type of <code>BePropertyMatcher</code>.
* </p>
*/
override def compose[U](g: U => T): BePropertyMatcher[U] =
new BePropertyMatcher[U] {
def apply(u: U) = thisBePropertyMatcher.apply(g(u))
}
}
/**
* Companion object for trait <code>BePropertyMatcher</code> that provides a
* factory method that creates a <code>BePropertyMatcher[T]</code> from a
* passed function of type <code>(T => BePropertyMatchResult)</code>.
*
* @author Bill Venners
*/
object BePropertyMatcher {
/**
* Factory method that creates a <code>BePropertyMatcher[T]</code> from a
* passed function of type <code>(T => BePropertyMatchResult)</code>.
*
* @author Bill Venners
*/
def apply[T](fun: T => BePropertyMatchResult): BePropertyMatcher[T] =
new BePropertyMatcher[T] {
def apply(left: T) = fun(left)
}
}
|
epishkin/scalatest-google-code
|
src/main/scala/org/scalatest/matchers/BePropertyMatcher.scala
|
Scala
|
apache-2.0
| 6,995 |
package com.eevolution.context.dictionary.domain.model
import ai.x.play.json.Jsonx
import com.eevolution.context.dictionary.api.{ActiveEnabled, DomainModel, Identifiable, Traceable}
import org.joda.time.DateTime
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Desktop Trl Entity
* @param desktopId Desktop ID
* @param language Language
* @param tenantId Tenant ID
* @param organizationId Organization ID
* @param isActive Is Active
* @param created Created
* @param createdBy Created By
* @param updated Updated
* @param updatedBy Updated By
* @param name Name
* @param description Description
* @param help Help
* @param isTranslated Is Translated
* @param uuid UUID
*/
case class DesktopTrl(desktopId: Int,
language: String,
tenantId: Int,
organizationId: Int,
isActive:Boolean=true,
created:DateTime = DateTime.now,
createdBy: Int,
updated: DateTime = DateTime.now,
updatedBy: Int,
name: String,
description: Option[String],
help: Option[String],
isTranslated: Boolean = false,
uuid: String
) extends DomainModel
with ActiveEnabled
with Identifiable
with Traceable {
override type ActiveEnabled = this.type
override type Identifiable = this.type
override type Traceable = this.type
override def Id: Int = desktopId
override val entityName: String = "AD_Desktop_Trl"
override val identifier: String = null
}
object DesktopTrl {
implicit lazy val jsonFormat = Jsonx.formatCaseClass[DesktopTrl]
def create(desktopId: Int,
language: String,
tenantId: Int,
organizationId: Int,
isActive:Boolean,
created:DateTime,
createdBy: Int,
updated: DateTime,
updatedBy: Int,
name: String,
description:String,
help: String,
isTranslated: Boolean,
uuid:String) = DesktopTrl(desktopId, language, tenantId, organizationId, isActive,created, createdBy,
updated, updatedBy, name, None, None, isTranslated, uuid)
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/model/DesktopTrl.scala
|
Scala
|
gpl-3.0
| 3,217 |
package io.kaitai.struct.languages
import io.kaitai.struct.datatype.DataType._
import io.kaitai.struct.datatype._
import io.kaitai.struct.exprlang.Ast
import io.kaitai.struct.format._
import io.kaitai.struct.languages.components._
import io.kaitai.struct.translators.NimTranslator
import io.kaitai.struct.{ClassTypeProvider, RuntimeConfig, Utils}
class NimCompiler(typeProvider: ClassTypeProvider, config: RuntimeConfig)
extends LanguageCompiler(typeProvider, config)
with SingleOutputFile
with EveryReadIsExpression
with UpperCamelCaseClasses
with FixedContentsUsingArrayByteLiteral
with UniversalFooter
with AllocateIOLocalVar
with SwitchIfOps
with UniversalDoc {
import NimCompiler._
// Written from scratch
def blankLine: Unit = out.puts
def imports = importList.toList.map((x) => s"import $x").mkString("\n")
def namespaced(names: List[String]): String = names.map(n => camelCase(n, true)).mkString("_")
def typeSectionHeader: Unit = {
out.puts("type")
out.inc
}
def typeSectionFooter: Unit = {
out.dec
out.puts
}
def instanceForwardDeclaration(className: List[String], instName: InstanceIdentifier, dataType: DataType): Unit = {
out.puts(s"proc ${idToStr(instName).dropRight(4)}*(this: ${namespaced(className)}): ${ksToNim(dataType)}")
}
def fromFile(name: List[String]): Unit = {
val n = namespaced(name)
out.puts(s"proc fromFile*(_: typedesc[$n], filename: string): $n =")
out.inc
out.puts(s"$n.read(newKaitaiFileStream(filename), nil, nil)")
out.dec
out.puts
}
override def opaqueClassDeclaration(classSpec: ClassSpec): Unit =
out.puts("import \"" + classSpec.name.head + "\"")
override def innerEnums = false
override val translator: NimTranslator = new NimTranslator(typeProvider, importList)
override def universalFooter: Unit = {
out.dec
}
override def allocateIO(id: Identifier, rep: RepeatSpec): String = {
val ioName = s"${idToStr(id)}Io"
val arg = rep match {
case NoRepeat => idToStr(id) + "Expr"
case _ => translator.doName(Identifier.ITERATOR2)
}
out.puts(s"let $ioName = newKaitaiStream($arg)")
ioName
}
// Members declared in io.kaitai.struct.languages.components.SingleOutputFile
override def outImports(topClass: ClassSpec) =
importList.toList.map((x) => s"import $x").mkString("\n") + "\n\n"
// Members declared in io.kaitai.struct.languages.components.ExtraAttrs
// def extraAttrForIO(id: Identifier, rep: RepeatSpec): List[AttrSpec] = ???
// Members declared in io.kaitai.struct.languages.components.ExceptionNames
override def ksErrorName(err: KSError): String = "KaitaiError" // TODO: maybe add more debugging info
// Members declared in io.kaitai.struct.languages.components.LanguageCompiler
override def importFile(file: String): Unit = {
importList.add(file)
}
override def alignToByte(io: String): Unit = out.puts(s"alignToByte($io)")
override def attrFixedContentsParse(attrName: Identifier, contents: String): Unit = {
out.puts(s"this.${idToStr(attrName)} = $normalIO.ensureFixedContents($contents)")
}
// def attrParse(attr: AttrLikeSpec, id: Identifier, defEndian: Option[Endianness]): Unit = ???
override def attrParseHybrid(leProc: () => Unit, beProc: () => Unit): Unit = {
out.puts("if this.isLe:")
out.inc
leProc()
out.dec
out.puts("else:")
out.inc
beProc()
out.dec
}
// Works but is crappily written; I want to rewrite this later XXX
override def attrProcess(proc: ProcessExpr, varSrc: Identifier, varDest: Identifier, rep: RepeatSpec): Unit = {
val srcExpr = rep match {
case RepeatEos | RepeatExpr(_) | RepeatUntil(_) => privateMemberName(varSrc) + "[i]"
case NoRepeat => privateMemberName(varSrc)
}
val expr = proc match {
case ProcessXor(xorValue) =>
s"$srcExpr.processXor(${expression(xorValue)})"
case ProcessZlib =>
s"$srcExpr.processZlib()"
case ProcessRotate(isLeft, rotValue) =>
val expr = if (isLeft) {
expression(rotValue)
} else {
s"8 - (${expression(rotValue)})"
}
s"$srcExpr.processRotateLeft(int($expr))"
case ProcessCustom(name, args) =>
val namespace = name.head
val procPath = name.mkString(".")
importList.add(namespace)
s"$procPath($srcExpr, ${args.map(expression).mkString(", ")})"
}
handleAssignment(varDest, expr, rep, false)
}
override def attributeDeclaration(attrName: Identifier, attrType: DataType, isNullable: Boolean): Unit = {
out.puts(s"`${idToStr(attrName)}`*: ${ksToNim(attrType)}")
}
override def instanceDeclaration(attrName: InstanceIdentifier, attrType: DataType, isNullable: Boolean): Unit = {
out.puts(s"`${idToStr(attrName)}`: ${ksToNim(attrType)}")
out.puts(s"`${instanceFlagIdentifier(attrName)}`: bool")
}
override def attributeReader(attrName: Identifier, attrType: DataType, isNullable: Boolean): Unit = {}
override def classConstructorHeader(name: List[String], parentType: DataType, rootClassName: List[String], isHybrid: Boolean, params: List[ParamDefSpec]): Unit = {}
override def classHeader(name: List[String]): Unit = {
out.puts(s"${namespaced(name)}* = ref object of KaitaiStruct")
out.inc
}
override def condIfHeader(expr: Ast.expr): Unit = {
out.puts(s"if ${expression(expr)}:")
out.inc
}
override def classFooter(name: List[String]): Unit = {
typeProvider.nowClass.meta.endian match {
case Some(_: CalcEndian) | Some(InheritedEndian) =>
out.puts(s"${idToStr(EndianIdentifier)}: bool")
case _ =>
}
universalFooter
}
override def condRepeatEosHeader(id: Identifier, io: String, dataType: DataType, needRaw: NeedRaw): Unit = {
out.puts("block:")
out.inc
out.puts("var i: int")
out.puts(s"while not $io.isEof:")
out.inc
}
override def condRepeatEosFooter: Unit = {
out.puts("inc i")
out.dec
out.dec
}
override def condRepeatExprHeader(id: Identifier, io: String, dataType: DataType, needRaw: NeedRaw, repeatExpr: Ast.expr): Unit = {
out.puts(s"for i in 0 ..< int(${expression(repeatExpr)}):")
out.inc
}
override def condRepeatUntilHeader(id: Identifier, io: String, dataType: DataType, needRaw: NeedRaw, repeatExpr: Ast.expr): Unit = {
out.puts("block:")
out.inc
out.puts("var i: int")
out.puts("while true:")
out.inc
}
override def condRepeatUntilFooter(id: Identifier, io: String, dataType: DataType, needRaw: NeedRaw, repeatExpr: Ast.expr): Unit = {
typeProvider._currentIteratorType = Some(dataType)
out.puts(s"if ${expression(repeatExpr)}:")
out.inc
out.puts("break")
out.dec
out.puts("inc i")
out.dec
out.dec
}
// For this to work, we need a {.lenientCase.} pragma which disables nim's exhaustive case coverage check
override def enumDeclaration(curClass: List[String], enumName: String, enumColl: Seq[(Long, EnumValueSpec)]): Unit = {
val enumClass = namespaced(curClass)
out.puts(s"${enumClass}_${camelCase(enumName, true)}* = enum")
out.inc
enumColl.foreach { case (id, label) =>
val order = if (s"$id" == "-9223372036854775808") "low(int64)" else s"$id"
out.puts(s"${label.name} = $order")
}
out.dec
}
// def enumFooter: Unit = {
// universalFooter
// out.puts
// }
// def enumTemplate: Unit = {
// out.puts("template defineEnum(typ) =")
// out.inc
// out.puts("type typ* = distinct int64")
// out.puts("proc `==`*(x, y: typ): bool {.borrow.}")
// out.dec
// }
// def enumTemplateFooter: Unit = out.puts
// def enumHeader: Unit = {
// out.puts("const")
// out.inc
// }
// def enumConstantsFooter: Unit = {
// universalFooter
// out.puts
// }
// def enumConstants(curClass: List[String], enumName: String, enumColl: Seq[(Long, EnumValueSpec)]): Unit = {
// val enumClass = namespaced(curClass)
// enumColl.foreach { case (id: Long, label: EnumValueSpec) =>
// // This hack is needed because the lowest int64 literal is not supported in Nim
// val const = if (s"$id" == "-9223372036854775808") "low(int64)" else s"$id"
// out.puts(s"${label.name}* = ${enumClass}_$enumName($const)") }
// }
override def fileHeader(topClassName: String): Unit = {
importList.add(config.nimModule)
importList.add("options")
}
override def indent: String = " "
override def instanceCalculate(instName: Identifier, dataType: DataType, value: Ast.expr): Unit = {
val cast = s"${ksToNim(dataType)}(${expression(value)})"
handleAssignmentSimple(instName, cast)
}
override def instanceCheckCacheAndReturn(instName: InstanceIdentifier, dataType: DataType): Unit = {
out.puts(s"if this.${instanceFlagIdentifier(instName)}:")
out.inc
out.puts(s"return ${privateMemberName(instName)}")
out.dec
}
override def instanceHeader(className: List[String], instName: InstanceIdentifier, dataType: DataType, isNullable: Boolean): Unit = {
out.puts(s"proc ${idToStr(instName).dropRight(4)}(this: ${namespaced(className)}): ${ksToNim(dataType)} = ")
out.inc
}
override def instanceFooter = {
universalFooter
out.puts
}
override def instanceReturn(instName: InstanceIdentifier, attrType: DataType): Unit = {
out.puts(s"this.${instanceFlagIdentifier(instName)} = true")
out.puts(s"return ${privateMemberName(instName)}")
}
// def normalIO: String = ???
override def outFileName(topClassName: String): String = s"$topClassName.nim"
override def pushPos(io: String): Unit = out.puts(s"let pos = $io.pos()")
override def popPos(io: String): Unit = out.puts(s"$io.seek(pos)")
override def readFooter(): Unit = {
universalFooter
out.puts
}
override def readHeader(endian: Option[FixedEndian], isEmpty: Boolean): Unit = {
val t = namespaced(typeProvider.nowClass.name)
val p = ksToNim(typeProvider.nowClass.parentType)
val r = namespaced(typeProvider.topClass.name)
val paramsArg = Utils.join(typeProvider.nowClass.params.map((p) =>
s"${paramName(p.id)}: any"
), ", ", ", ", "")
endian match {
case None =>
out.puts(s"proc read*(_: typedesc[$t], io: KaitaiStream, root: KaitaiStruct, parent: $p$paramsArg): $t =")
out.inc
out.puts("template this: untyped = result")
out.puts(s"this = new($t)")
// The cast in the if clause is used to bypass semantic analysis
// The cast in the else clause should be a normal type conversion instead,
// but for some reason it doesn't work. Needs further investigation
out.puts(s"let root = if root == nil: cast[$r](this) else: cast[$r](root)")
out.puts(s"this.io = io")
out.puts(s"this.root = root")
out.puts(s"this.parent = parent")
typeProvider.nowClass.params.foreach((p) => handleAssignmentSimple(p.id, s"${ksToNim(p.dataType)}(${paramName(p.id)})"))
typeProvider.nowClass.meta.endian match {
case Some(_: CalcEndian) =>
out.puts(s"this.${idToStr(EndianIdentifier)} = false")
case Some(InheritedEndian) =>
out.puts(s"this.${idToStr(EndianIdentifier)} = " +
s"this.${idToStr(ParentIdentifier)}." +
s"${idToStr(EndianIdentifier)}")
case _ =>
}
out.puts
case Some(e) =>
out.puts
out.puts(s"proc read${camelCase(e.toSuffix, true)}(this: $t) =")
out.inc
}
}
// def results(topClass: ClassSpec): Map[String, String] = ???
override def runRead(name: List[String]): Unit = out.puts("read()") // TODO: missing type argument
override def runReadCalc(): Unit = {
out.puts
out.puts("if this.isLe:")
out.inc
out.puts("readLe(this)")
out.dec
out.puts("else:")
out.inc
out.puts("readBe(this)")
out.dec
}
override def seek(io: String, pos: Ast.expr): Unit = out.puts(s"$io.seek(int(${expression(pos)}))")
override def useIO(ioEx: Ast.expr): String = {
out.puts(s"let io = ${expression(ioEx)}")
"io"
}
override def classForwardDeclaration(name: List[String]): Unit = {
val t = namespaced(typeProvider.nowClass.name)
val p = ksToNim(typeProvider.nowClass.parentType)
val paramsArg = Utils.join(typeProvider.nowClass.params.map((p) =>
s"${paramName(p.id)}: any"
), ", ", ", ", "")
out.puts(s"proc read*(_: typedesc[$t], io: KaitaiStream, root: KaitaiStruct, parent: $p$paramsArg): $t")
}
// Members declared in io.kaitai.struct.languages.components.ObjectOrientedLanguage
override def idToStr(id: Identifier): String = {
id match {
case IoIdentifier => "io"
case NamedIdentifier(name) => camelCase(name, false)
case InstanceIdentifier(name) => camelCase(name, false) + "Inst"
case IoStorageIdentifier(innerId) => "io" + camelCase(idToStr(innerId), true)
case SpecialIdentifier(name) => camelCase(name, false)
case NumberedIdentifier(idx) => s"${NumberedIdentifier.TEMPLATE}$idx"
case RawIdentifier(innerId) => "raw" + camelCase(idToStr(innerId), true)
}
}
override def localTemporaryName(id: Identifier): String = idToStr(id)
override def privateMemberName(id: Identifier): String = {
val name = idToStr(id)
val prefix = "this"
s"$prefix.$name"
}
override def publicMemberName(id: Identifier): String = idToStr(id)
// Members declared in io.kaitai.struct.languages.components.EveryReadIsExpression
override def bytesPadTermExpr(expr0: String, padRight: Option[Int], terminator: Option[Int], include: Boolean): String = {
val expr1 = padRight match {
case Some(padByte) => s"$expr0.bytesStripRight($padByte)"
case None => expr0
}
val expr2 = terminator match {
case Some(term) => s"$expr1.bytesTerminate($term, $include)"
case None => expr1
}
expr2
}
def handleAssignmentIterative(id: Identifier, expr: String): Unit = {
// Need better design for this XXX
val exprName = id match {
case _: RawIdentifier => translator.doName(Identifier.ITERATOR2)
case _ => translator.doName(Identifier.ITERATOR)
}
out.puts(s"let $exprName = $expr")
out.puts(s"${privateMemberName(id)}.add($exprName)")
}
override def handleAssignmentRepeatEos(id: Identifier, expr: String): Unit = {
handleAssignmentIterative(id, expr)
}
override def handleAssignmentRepeatExpr(id: Identifier, expr: String): Unit = {
handleAssignmentIterative(id, expr)
}
override def handleAssignmentRepeatUntil(id: Identifier, expr: String, isRaw: Boolean): Unit = {
handleAssignmentIterative(id, expr)
}
override def handleAssignmentSimple(id: Identifier, expr: String): Unit = {
// Need better design for this XXX
val exprName = idToStr(id) + "Expr"
out.puts(s"let $exprName = $expr")
out.puts(s"${privateMemberName(id)} = $exprName")
}
override def handleAssignmentTempVar(dataType: DataType, id: String, expr: String): Unit = {}
override def parseExpr(dataType: DataType, assignType: DataType, io: String, defEndian: Option[FixedEndian]): String = {
val expr = dataType match {
case t: ReadableType =>
s"$io.read${Utils.capitalize(t.apiCall(defEndian))}()"
case blt: BytesLimitType =>
s"$io.readBytes(int(${expression(blt.size)}))"
case _: BytesEosType =>
s"$io.readBytesFull()"
case BytesTerminatedType(terminator, include, consume, eosError, _) =>
s"$io.readBytesTerm($terminator, $include, $consume, $eosError)"
case BitsType1(bitEndian) =>
s"$io.readBitsInt${camelCase(bitEndian.toSuffix, true)}(1) != 0"
case BitsType(width: Int, bitEndian) =>
s"$io.readBitsInt${camelCase(bitEndian.toSuffix, true)}($width)"
case t: UserType =>
val addArgs = {
val parent = t.forcedParent match {
case Some(USER_TYPE_NO_PARENT) => "nil"
case Some(fp) => translator.translate(fp)
case None => "this"
}
s", this.root, $parent"
}
val addParams = Utils.join(t.args.map((a) => translator.translate(a)), ", ", ", ", "")
val concreteName = namespaced(t.classSpec match {
case Some(cs) => cs.name
case None => t.name
})
s"${concreteName}.read($io$addArgs$addParams)"
}
if (assignType != dataType) {
s"${ksToNim(assignType)}($expr)"
} else {
expr
}
}
override def userTypeDebugRead(id: String, dataType: DataType, assignType: DataType): Unit = {} // TODO
// Members declared in io.kaitai.struct.languages.components.SwitchOps
override def switchCasesUsingIf[T](
id: Identifier,
on: Ast.expr,
onType: DataType,
cases: Map[Ast.expr, T],
normalCaseProc: (T) => Unit,
elseCaseProc: (T) => Unit
): Unit = {
switchIfStart(id, on, onType)
// Pass 1: only normal case clauses
var first = true
cases.foreach { case (condition, result) =>
condition match {
case SwitchType.ELSE_CONST =>
// skip for now
case _ =>
if (first) {
switchIfCaseFirstStart(condition)
first = false
} else {
switchIfCaseStart(condition)
}
normalCaseProc(result)
switchIfCaseEnd()
}
}
// Pass 2: else clause, if it is there
cases.get(SwitchType.ELSE_CONST).foreach { (result) =>
if (cases.size == 1) {
elseCaseProc(result)
} else {
switchIfElseStart()
elseCaseProc(result)
switchIfElseEnd()
}
}
switchIfEnd()
}
override def switchCaseEnd(): Unit = universalFooter
override def switchCaseStart(condition: Ast.expr): Unit = {}
override def switchElseStart(): Unit = {}
override def switchEnd(): Unit = {}
override def switchStart(id: Identifier, on: Ast.expr): Unit = {}
// Members declared in io.kaitai.struct.languages.components.SwitchIfOps
override def switchRequiresIfs(onType: DataType): Boolean = true
override def switchIfStart(id: Identifier, on: Ast.expr, onType: DataType): Unit = {
out.puts("block:")
out.inc
out.puts(s"let on = ${expression(on)}")
}
override def switchIfCaseFirstStart(condition: Ast.expr): Unit = {
out.puts(s"if on == ${expression(condition)}:")
out.inc
}
override def switchIfCaseStart(condition: Ast.expr): Unit = {
out.puts(s"elif on == ${expression(condition)}:")
out.inc
}
override def switchIfCaseEnd(): Unit = out.dec
override def switchIfElseStart(): Unit = {
out.puts("else:")
out.inc
}
override def switchIfEnd(): Unit = out.dec
// Members declared in io.kaitai.struct.languages.components.UniversalDoc
override def universalDoc(doc: DocSpec): Unit = {
out.puts
out.puts( "##[")
doc.summary.foreach(summary => out.puts(summary))
doc.ref.foreach {
case TextRef(text) =>
out.puts("@see \"" + text + "\"")
case ref: UrlRef =>
out.puts(s"@see ${ref.toAhref}")
}
out.puts( "]##")
}
def instanceFlagIdentifier(id: InstanceIdentifier) = s"${idToStr(id)}Flag"
}
object NimCompiler extends LanguageCompilerStatic
with StreamStructNames
with UpperCamelCaseClasses
with ExceptionNames {
override def getCompiler(
tp: ClassTypeProvider,
config: RuntimeConfig
): LanguageCompiler = new NimCompiler(tp, config)
// Members declared in io.kaitai.struct.languages.components.StreamStructNames
override def kstreamName: String = "KaitaiStream"
override def kstructName: String = "KaitaiStruct"
def ksErrorName(err: KSError): String = "KaitaiError" // TODO: maybe add more debugging info
def camelCase(s: String, upper: Boolean): String = {
if (upper) {
s.split("_").map(Utils.capitalize).mkString
} else {
if (s.startsWith("_")) {
camelCase(s.substring(1), false)
} else {
val firstWord :: restWords = s.split("_").toList
(firstWord :: restWords.map(Utils.capitalize)).mkString
}
}
}
def namespaced(names: List[String]): String = names.map(n => camelCase(n, true)).mkString("_")
def ksToNim(attrType: DataType): String = {
attrType match {
case Int1Type(false) => "uint8"
case IntMultiType(false, Width2, _) => "uint16"
case IntMultiType(false, Width4, _) => "uint32"
case IntMultiType(false, Width8, _) => "uint64"
case Int1Type(true) => "int8"
case IntMultiType(true, Width2, _) => "int16"
case IntMultiType(true, Width4, _) => "int32"
case IntMultiType(true, Width8, _) => "int64"
case FloatMultiType(Width4, _) => "float32"
case FloatMultiType(Width8, _) => "float64"
case BitsType(_, _) => "uint64"
case _: BooleanType => "bool"
case CalcIntType => "int"
case CalcFloatType => "float64"
case _: StrType => "string"
case _: BytesType => "seq[byte]"
case KaitaiStructType | CalcKaitaiStructType => "KaitaiStruct"
case KaitaiStreamType | OwnedKaitaiStreamType => "KaitaiStream"
case t: UserType => namespaced(t.classSpec match {
case Some(cs) => cs.name
case None => t.name
})
case t: EnumType => namespaced(t.enumSpec.get.name)
case at: ArrayType => s"seq[${ksToNim(at.elType)}]"
case st: SwitchType => ksToNim(st.combinedType)
case AnyType => "KaitaiStruct"
}
}
}
|
kaitai-io/kaitai_struct_compiler
|
shared/src/main/scala/io/kaitai/struct/languages/NimCompiler.scala
|
Scala
|
gpl-3.0
| 21,476 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2015 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
import breeze.linalg.{DenseMatrix, DenseVector}
import edu.latrobe._
object TestUtils {
val tolerance0: Real = 1e-5f
val tolerance1: Real = 1e-4f
val tolerance2: Real = 1e-3f
final def similarity(a: DenseVector[Real], b: DenseVector[Real]): Real = {
val diff = a - b
val result = VectorEx.dot(diff, diff) / diff.size
println(f"Similarity: $result%.4g")
result
}
final def similarity(a: DenseMatrix[Real], b: DenseMatrix[Real]): Real = {
val diff = a - b
val result = MatrixEx.dot(diff, diff) / diff.size
println(f"Similarity: $result%.4g")
if (Real.isNaN(result)) {
throw new UnknownError
}
result
}
final def similarity(a: Tensor, b: Tensor)
: Real = similarity(a.valuesMatrix, b.valuesMatrix)
}
|
bashimao/ltudl
|
demos/src/test/scala/edu/latrobe/blaze/TestUtils.scala
|
Scala
|
apache-2.0
| 1,477 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import cascading.property.ConfigDef.Getter
import cascading.pipe._
import cascading.flow._
import cascading.operation._
import cascading.operation.filter._
import cascading.tuple._
import scala.util.Random
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.immutable.Queue
object RichPipe extends java.io.Serializable {
private val nextPipe = new AtomicInteger(-1)
def apply(p: Pipe): RichPipe = new RichPipe(p)
implicit def toPipe(rp: RichPipe): Pipe = rp.pipe
def getNextName: String = "_pipe_" + nextPipe.incrementAndGet.toString
private[scalding] val FormerNameBitLength = 12
private[scalding] val FormerAssignedPipeNamePattern = "^_pipe_([0-9]+).*$".r
private[scalding] val FromUuidPattern = "^.*[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-([0-9a-f]{12}).*$".r
// grab some bit of the previous pipe name to help walk up the graph across name assignments
private def getFormerNameBit(p: Pipe): String = p.getName match {
case FormerAssignedPipeNamePattern(pipeNumber) => pipeNumber
case FromUuidPattern(lastGroup) => lastGroup /* 12 characters */
case s if s.length > FormerNameBitLength => s.substring(s.length - FormerNameBitLength, s.length)
case s => s
}
/**
* Assign a new, guaranteed-unique name to the pipe.
* @param p a pipe, whose name should be changed
* @return a pipe with a new name which is guaranteed to be new and never re-assigned by this function
*
* Note: the assigned name includes a few characters from the former name to assisgit dift in debugging.
*/
def assignName(p: Pipe): Pipe = new Pipe(getNextName + "-" + getFormerNameBit(p), p)
private val REDUCER_KEY = "mapred.reduce.tasks"
/**
* Gets the underlying config for this pipe and sets the number of reducers
* useful for cascading GroupBy/CoGroup pipes.
*/
def setReducers(p: Pipe, reducers: Int): Pipe = {
if (reducers > 0) {
p.getStepConfigDef()
.setProperty(REDUCER_KEY, reducers.toString)
p.getStepConfigDef()
.setProperty(Config.WithReducersSetExplicitly, "true")
} else if (reducers != -1) {
throw new IllegalArgumentException(s"Number of reducers must be non-negative. Got: ${reducers}")
}
p
}
// A pipe can have more than one description when merged together, so we store them delimited with 255.toChar.
// Cannot use 1.toChar as we get an error if it is not a printable character.
private def encodePipeDescriptions(descriptions: Seq[String]): String = {
descriptions.map(_.replace(255.toChar, ' ')).filter(_.nonEmpty).mkString(255.toChar.toString)
}
private def decodePipeDescriptions(encoding: String): Seq[String] = {
encoding.split(255.toChar).toSeq
}
def getPipeDescriptions(p: Pipe): Seq[String] = {
if (p.getStepConfigDef.isEmpty)
Nil
else {
// We use empty getter so we can get latest config value of Config.PipeDescriptions in the step ConfigDef.
val encodedResult = p.getStepConfigDef.apply(Config.PipeDescriptions, new Getter {
override def update(s: String, s1: String): String = ???
override def get(s: String): String = null
})
Option(encodedResult)
.filterNot(_.isEmpty)
.map(decodePipeDescriptions)
.getOrElse(Nil)
}
}
def setPipeDescriptions(p: Pipe, descriptions: Seq[String]): Pipe = {
p.getStepConfigDef().setProperty(
Config.PipeDescriptions,
encodePipeDescriptions(getPipeDescriptions(p) ++ descriptions))
p
}
def setPipeDescriptionFrom(p: Pipe, ste: Option[StackTraceElement]): Pipe = {
ste.foreach { ste =>
setPipeDescriptions(p, List(ste.toString))
}
p
}
/**
* If there is exactly one previous Pipe, get it, otherwise None
*/
def getSinglePreviousPipe(p: Pipe): Option[Pipe] =
if (p.getPrevious != null && p.getPrevious.length == 1) p.getPrevious.headOption
else None
/**
* Is the given Pipe a source (it has no previous and is not a splice
*/
def isSourcePipe(pipe: Pipe): Boolean =
pipe.getParent == null &&
(pipe.getPrevious == null || pipe.getPrevious.isEmpty) &&
(!pipe.isInstanceOf[Splice])
}
/**
* This is an enrichment-pattern class for cascading.pipe.Pipe.
* The rule is to never use this class directly in input or return types, but
* only to add methods to Pipe.
*/
class RichPipe(val pipe: Pipe) extends java.io.Serializable with JoinAlgorithms {
// We need this for the implicits
import Dsl._
import RichPipe.assignName
/**
* Rename the current pipe
*/
def name(s: String): Pipe = new Pipe(s, pipe)
/**
* Beginning of block with access to expensive nonserializable state. The state object should
* contain a function release() for resource management purpose.
*/
def using[C <: { def release(): Unit }](bf: => C) = new {
/**
* For pure side effect.
*/
def foreach[A](f: Fields)(fn: (C, A) => Unit)(implicit conv: TupleConverter[A], set: TupleSetter[Unit], flowDef: FlowDef, mode: Mode) = {
conv.assertArityMatches(f)
val newPipe = new Each(pipe, f, new SideEffectMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
Fields.NONE, conv, set))
NullSource.writeFrom(newPipe)(flowDef, mode)
newPipe
}
/**
* map with state
*/
def map[A, T](fs: (Fields, Fields))(fn: (C, A) => T)(implicit conv: TupleConverter[A], set: TupleSetter[T]) = {
conv.assertArityMatches(fs._1)
set.assertArityMatches(fs._2)
val mf = new SideEffectMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
fs._2, conv, set)
new Each(pipe, fs._1, mf, defaultMode(fs._1, fs._2))
}
/**
* flatMap with state
*/
def flatMap[A, T](fs: (Fields, Fields))(fn: (C, A) => TraversableOnce[T])(implicit conv: TupleConverter[A], set: TupleSetter[T]) = {
conv.assertArityMatches(fs._1)
set.assertArityMatches(fs._2)
val mf = new SideEffectFlatMapFunction(bf, fn,
new Function1[C, Unit] with java.io.Serializable {
def apply(c: C): Unit = { c.release() }
},
fs._2, conv, set)
new Each(pipe, fs._1, mf, defaultMode(fs._1, fs._2))
}
}
/**
* Keep only the given fields, and discard the rest.
* takes any number of parameters as long as we can convert
* them to a fields object
*/
def project(fields: Fields): Pipe =
new Each(pipe, fields, new Identity(fields))
/**
* Discard the given fields, and keep the rest.
* Kind of the opposite of project method.
*/
def discard(f: Fields): Pipe =
new Each(pipe, f, new NoOp, Fields.SWAP)
/**
* Insert a function into the pipeline:
*/
def thenDo[T, U](pfn: (T) => U)(implicit in: (RichPipe) => T): U = pfn(in(this))
/**
* group the Pipe based on fields
*
* builder is typically a block that modifies the given GroupBuilder
* the final OUTPUT of the block is used to schedule the new pipe
* each method in GroupBuilder returns this, so it is recommended
* to chain them and use the default input:
*
* {{{
* _.size.max('f1) etc...
* }}}
*/
def groupBy(f: Fields)(builder: GroupBuilder => GroupBuilder): Pipe =
builder(new GroupBuilder(f)).schedule(pipe.getName, pipe)
/**
* Returns the set of distinct tuples containing the specified fields
*/
def distinct(f: Fields): Pipe =
groupBy(f) { _.size('__uniquecount__) }.project(f)
/**
* Returns the set of unique tuples containing the specified fields. Same as distinct
*/
def unique(f: Fields): Pipe = distinct(f)
/**
* Merge or Concatenate several pipes together with this one:
*/
def ++(that: Pipe): Pipe = {
if (this.pipe == that) {
// Cascading fails on self merge:
// solution by Jack Guo
new Merge(assignName(this.pipe), assignName(new Each(that, new Identity)))
} else {
new Merge(assignName(this.pipe), assignName(that))
}
}
/**
* Group all tuples down to one reducer.
* (due to cascading limitation).
* This is probably only useful just before setting a tail such as Database
* tail, so that only one reducer talks to the DB. Kind of a hack.
*/
def groupAll: Pipe = groupAll { _.pass }
/**
* == Warning ==
* This kills parallelism. All the work is sent to one reducer.
*
* Only use this in the case that you truly need all the data on one
* reducer.
*
* Just about the only reasonable case of this method is to reduce all values of a column
* or count all the rows.
*/
def groupAll(gs: GroupBuilder => GroupBuilder) =
map(() -> '__groupAll__) { (u: Unit) => 1 }
.groupBy('__groupAll__) { gs(_).reducers(1) }
.discard('__groupAll__)
/**
* Force a random shuffle of all the data to exactly n reducers
*/
def shard(n: Int): Pipe = groupRandomly(n) { _.pass }
/**
* Force a random shuffle of all the data to exactly n reducers,
* with a given seed if you need repeatability.
*/
def shard(n: Int, seed: Int): Pipe = groupRandomly(n, seed) { _.pass }
/**
* Like groupAll, but randomly groups data into n reducers.
*
* you can provide a seed for the random number generator
* to get reproducible results
*/
def groupRandomly(n: Int)(gs: GroupBuilder => GroupBuilder): Pipe =
groupRandomlyAux(n, None)(gs)
/**
* like groupRandomly(n : Int) with a given seed in the randomization
*/
def groupRandomly(n: Int, seed: Long)(gs: GroupBuilder => GroupBuilder): Pipe =
groupRandomlyAux(n, Some(seed))(gs)
// achieves the behavior that reducer i gets i_th shard
// by relying on cascading to use java's hashCode, which hash ints
// to themselves
protected def groupRandomlyAux(n: Int, optSeed: Option[Long])(gs: GroupBuilder => GroupBuilder): Pipe = {
using(statefulRandom(optSeed))
.map(() -> '__shard__) { (r: Random, _: Unit) => r.nextInt(n) }
.groupBy('__shard__) { gs(_).reducers(n) }
.discard('__shard__)
}
private def statefulRandom(optSeed: Option[Long]): Random with Stateful = {
val random = new Random with Stateful
optSeed.foreach { x => random.setSeed(x) }
random
}
/**
* Put all rows in random order
*
* you can provide a seed for the random number generator
* to get reproducible results
*/
def shuffle(shards: Int): Pipe = groupAndShuffleRandomly(shards) { _.pass }
def shuffle(shards: Int, seed: Long): Pipe = groupAndShuffleRandomly(shards, seed) { _.pass }
/**
* Like shard, except do some operation im the reducers
*/
def groupAndShuffleRandomly(reducers: Int)(gs: GroupBuilder => GroupBuilder): Pipe =
groupAndShuffleRandomlyAux(reducers, None)(gs)
/**
* Like groupAndShuffleRandomly(reducers : Int) but with a fixed seed.
*/
def groupAndShuffleRandomly(reducers: Int, seed: Long)(gs: GroupBuilder => GroupBuilder): Pipe =
groupAndShuffleRandomlyAux(reducers, Some(seed))(gs)
private def groupAndShuffleRandomlyAux(reducers: Int, optSeed: Option[Long])(gs: GroupBuilder => GroupBuilder): Pipe = {
using(statefulRandom(optSeed))
.map(() -> ('__shuffle__)) { (r: Random, _: Unit) => r.nextDouble() }
.groupRandomlyAux(reducers, optSeed){ g: GroupBuilder =>
gs(g.sortBy('__shuffle__))
}
.discard('__shuffle__)
}
/**
* Adds a field with a constant value.
*
* == Usage ==
* {{{
* insert('a, 1)
* }}}
*/
def insert[A](fs: Fields, value: A)(implicit setter: TupleSetter[A]): Pipe =
map[Unit, A](() -> fs) { _: Unit => value }(implicitly[TupleConverter[Unit]], setter)
/**
* Rename some set of N fields as another set of N fields
*
* == Usage ==
* {{{
* rename('x -> 'z)
* rename(('x,'y) -> ('X,'Y))
* }}}
*
* == Warning ==
* `rename('x,'y)` is interpreted by scala as `rename(Tuple2('x,'y))`
* which then does `rename('x -> 'y)`. This is probably not what is intended
* but the compiler doesn't resolve the ambiguity. YOU MUST CALL THIS WITH
* A TUPLE2! If you don't, expect the unexpected.
*/
def rename(fields: (Fields, Fields)): Pipe = {
val (fromFields, toFields) = fields
val in_arity = fromFields.size
val out_arity = toFields.size
assert(in_arity == out_arity, "Number of field names must match for rename")
new Each(pipe, fromFields, new Identity(toFields), Fields.SWAP)
}
/**
* Keep only items that satisfy this predicate.
*/
def filter[A](f: Fields)(fn: (A) => Boolean)(implicit conv: TupleConverter[A]): Pipe = {
conv.assertArityMatches(f)
new Each(pipe, f, new FilterFunction(fn, conv))
}
/**
* Keep only items that don't satisfy this predicate.
* `filterNot` is equal to negating a `filter` operation.
*
* {{{ filterNot('name) { name: String => name contains "a" } }}}
*
* is the same as:
*
* {{{ filter('name) { name: String => !(name contains "a") } }}}
*/
def filterNot[A](f: Fields)(fn: (A) => Boolean)(implicit conv: TupleConverter[A]): Pipe =
filter[A](f)(!fn(_))
/**
* Text files can have corrupted data. If you use this function and a
* cascading trap you can filter out corrupted data from your pipe.
*/
def verifyTypes[A](f: Fields)(implicit conv: TupleConverter[A]): Pipe = {
pipe.filter(f) { (a: A) => true }
}
/**
* Given a function, partitions the pipe into several groups based on the
* output of the function. Then applies a GroupBuilder function on each of the
* groups.
*
* Example:
* pipe
* .mapTo(()->('age, 'weight) { ... }
* .partition('age -> 'isAdult) { _ > 18 } { _.average('weight) }
* pipe now contains the average weights of adults and minors.
*/
def partition[A, R](fs: (Fields, Fields))(fn: (A) => R)(
builder: GroupBuilder => GroupBuilder)(
implicit conv: TupleConverter[A],
ord: Ordering[R],
rset: TupleSetter[R]): Pipe = {
val (fromFields, toFields) = fs
conv.assertArityMatches(fromFields)
rset.assertArityMatches(toFields)
val tmpFields = new Fields("__temp__")
tmpFields.setComparator("__temp__", ord)
map(fromFields -> tmpFields)(fn)(conv, TupleSetter.singleSetter[R])
.groupBy(tmpFields)(builder)
.map[R, R](tmpFields -> toFields){ (r: R) => r }(TupleConverter.singleConverter[R], rset)
.discard(tmpFields)
}
/**
* If you use a map function that does not accept TupleEntry args,
* which is the common case, an implicit conversion in GeneratedConversions
* will convert your function into a `(TupleEntry => T)`. The result type
* T is converted to a cascading Tuple by an implicit `TupleSetter[T]`.
* acceptable T types are primitive types, cascading Tuples of those types,
* or `scala.Tuple(1-22)` of those types.
*
* After the map, the input arguments will be set to the output of the map,
* so following with filter or map is fine without a new using statement if
* you mean to operate on the output.
*
* {{{
* map('data -> 'stuff)
* }}}
*
* * if output equals input, REPLACE is used.
* * if output or input is a subset of the other SWAP is used.
* * otherwise we append the new fields (cascading Fields.ALL is used)
*
* {{{
* mapTo('data -> 'stuff)
* }}}
*
* Only the results (stuff) are kept (cascading Fields.RESULTS)
*
* == Note ==
* Using mapTo is the same as using map followed by a project for
* selecting just the output fields
*/
def map[A, T](fs: (Fields, Fields))(fn: A => T)(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
each(fs)(new MapFunction[A, T](fn, _, conv, setter))
}
def mapTo[A, T](fs: (Fields, Fields))(fn: A => T)(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
eachTo(fs)(new MapFunction[A, T](fn, _, conv, setter))
}
def flatMap[A, T](fs: (Fields, Fields))(fn: A => TraversableOnce[T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
each(fs)(new FlatMapFunction[A, T](fn, _, conv, setter))
}
def flatMapTo[A, T](fs: (Fields, Fields))(fn: A => TraversableOnce[T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
eachTo(fs)(new FlatMapFunction[A, T](fn, _, conv, setter))
}
/**
* Filters all data that is defined for this partial function and then applies that function
*/
def collect[A, T](fs: (Fields, Fields))(fn: PartialFunction[A, T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
pipe.each(fs)(new CollectFunction[A, T](fn, _, conv, setter))
}
def collectTo[A, T](fs: (Fields, Fields))(fn: PartialFunction[A, T])(implicit conv: TupleConverter[A], setter: TupleSetter[T]): Pipe = {
conv.assertArityMatches(fs._1)
setter.assertArityMatches(fs._2)
pipe.eachTo(fs)(new CollectFunction[A, T](fn, _, conv, setter))
}
/**
* the same as
*
* {{{
* flatMap(fs) { it : TraversableOnce[T] => it }
* }}}
*
* Common enough to be useful.
*/
def flatten[T](fs: (Fields, Fields))(implicit conv: TupleConverter[TraversableOnce[T]], setter: TupleSetter[T]): Pipe =
flatMap[TraversableOnce[T], T](fs)({ it: TraversableOnce[T] => it })(conv, setter)
/**
* the same as
*
* {{{
* flatMapTo(fs) { it : TraversableOnce[T] => it }
* }}}
*
* Common enough to be useful.
*/
def flattenTo[T](fs: (Fields, Fields))(implicit conv: TupleConverter[TraversableOnce[T]], setter: TupleSetter[T]): Pipe =
flatMapTo[TraversableOnce[T], T](fs)({ it: TraversableOnce[T] => it })(conv, setter)
/**
* Force a materialization to disk in the flow.
* This is useful before crossWithTiny if you filter just before. Ideally scalding/cascading would
* see this (and may in future versions), but for now it is here to aid in hand-tuning jobs
*/
lazy val forceToDisk: Pipe = new Checkpoint(pipe)
/**
* Convenience method for integrating with existing cascading Functions
*/
def each(fs: (Fields, Fields))(fn: Fields => Function[_]) = {
new Each(pipe, fs._1, fn(fs._2), defaultMode(fs._1, fs._2))
}
/**
* Same as above, but only keep the results field.
*/
def eachTo(fs: (Fields, Fields))(fn: Fields => Function[_]) = {
new Each(pipe, fs._1, fn(fs._2), Fields.RESULTS)
}
/**
* This is an analog of the SQL/Excel unpivot function which converts columns of data
* into rows of data. Only the columns given as input fields are expanded in this way.
* For this operation to be reversible, you need to keep some unique key on each row.
* See GroupBuilder.pivot to reverse this operation assuming you leave behind a grouping key
* == Example ==
* {{{
* pipe.unpivot(('w,'x,'y,'z) -> ('feature, 'value))
* }}}
*
* takes rows like:
* {{{
* key, w, x, y, z
* 1, 2, 3, 4, 5
* 2, 8, 7, 6, 5
* }}}
* to:
* {{{
* key, feature, value
* 1, w, 2
* 1, x, 3
* 1, y, 4
* }}}
* etc...
*/
def unpivot(fieldDef: (Fields, Fields)): Pipe = {
assert(fieldDef._2.size == 2, "Must specify exactly two Field names for the results")
// toKeyValueList comes from TupleConversions
pipe.flatMap(fieldDef) { te: TupleEntry => TupleConverter.KeyValueList(te) }
.discard(fieldDef._1)
}
/**
* Keep at most n elements. This is implemented by keeping
* approximately n/k elements on each of the k mappers or reducers (whichever we wind
* up being scheduled on).
*/
def limit(n: Long): Pipe = new Each(pipe, new Limit(n))
/**
* Sample a fraction of elements. fraction should be between 0.00 (0%) and 1.00 (100%)
* you can provide a seed to get reproducible results
*
*/
def sample(fraction: Double): Pipe = new Each(pipe, new Sample(fraction))
def sample(fraction: Double, seed: Long): Pipe = new Each(pipe, new Sample(seed, fraction))
/**
* Sample fraction of elements with return. fraction should be between 0.00 (0%) and 1.00 (100%)
* you can provide a seed to get reproducible results
*
*/
def sampleWithReplacement(fraction: Double): Pipe = new Each(pipe, new SampleWithReplacement(fraction), Fields.ALL)
def sampleWithReplacement(fraction: Double, seed: Int): Pipe = new Each(pipe, new SampleWithReplacement(fraction, seed), Fields.ALL)
/**
* Print all the tuples that pass to stderr
*/
def debug: Pipe = debug(PipeDebug())
/**
* Print the tuples that pass with the options configured in debugger
* For instance:
* {{{ debug(PipeDebug().toStdOut.printTuplesEvery(100)) }}}
*/
def debug(dbg: PipeDebug): Pipe = dbg(pipe)
/**
* Write all the tuples to the given source and return this Pipe
*/
def write(outsource: Source)(implicit flowDef: FlowDef, mode: Mode) = {
/* This code is to hack around a known Cascading bug that they have decided not to fix. In a graph:
A -> FlatMap -> write(tsv) -> FlatMap
in the second flatmap cascading will read from the written tsv for running it. However TSV's use toString and so is not a bijection.
here we stick in an identity function before the tsv write to keep to force cascading to do any fork/split beforehand.
*/
val writePipe: Pipe = outsource match {
case t: Tsv => new Each(pipe, Fields.ALL, IdentityFunction, Fields.REPLACE)
case _ => pipe
}
outsource.writeFrom(writePipe)(flowDef, mode)
pipe
}
/**
* Adds a trap to the current pipe,
* which will capture all exceptions that occur in this pipe
* and save them to the trapsource given
*
* Traps do not include the original fields in a tuple,
* only the fields seen in an operation.
* Traps also do not include any exception information.
*
* There can only be at most one trap for each pipe.
*/
def addTrap(trapsource: Source)(implicit flowDef: FlowDef, mode: Mode) = {
flowDef.addTrap(pipe, trapsource.createTap(Write)(mode))
pipe
}
/**
* Divides sum of values for this variable by their sum; assumes without checking that division is supported
* on this type and that sum is not zero
*
* If those assumptions do not hold, will throw an exception -- consider checking sum sepsarately and/or using addTrap
*
* in some cases, crossWithTiny has been broken, the implementation supports a work-around
*/
def normalize(f: Fields, useTiny: Boolean = true): Pipe = {
val total = groupAll { _.sum[Double](f -> '__total_for_normalize__) }
(if (useTiny) {
crossWithTiny(total)
} else {
crossWithSmaller(total)
})
.map(Fields.merge(f, '__total_for_normalize__) -> f) { args: (Double, Double) =>
args._1 / args._2
}
}
/**
* Maps the input fields into an output field of type T. For example:
*
* {{{
* pipe.pack[(Int, Int)] (('field1, 'field2) -> 'field3)
* }}}
*
* will pack fields 'field1 and 'field2 to field 'field3, as long as 'field1 and 'field2
* can be cast into integers. The output field 'field3 will be of tupel `(Int, Int)`
*
*/
def pack[T](fs: (Fields, Fields))(implicit packer: TuplePacker[T], setter: TupleSetter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(toFields.size == 1, "Can only output 1 field in pack")
val conv = packer.newConverter(fromFields)
pipe.map(fs) { input: T => input } (conv, setter)
}
/**
* Same as pack but only the to fields are preserved.
*/
def packTo[T](fs: (Fields, Fields))(implicit packer: TuplePacker[T], setter: TupleSetter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(toFields.size == 1, "Can only output 1 field in pack")
val conv = packer.newConverter(fromFields)
pipe.mapTo(fs) { input: T => input } (conv, setter)
}
/**
* The opposite of pack. Unpacks the input field of type `T` into
* the output fields. For example:
*
* {{{
* pipe.unpack[(Int, Int)] ('field1 -> ('field2, 'field3))
* }}}
*
* will unpack 'field1 into 'field2 and 'field3
*/
def unpack[T](fs: (Fields, Fields))(implicit unpacker: TupleUnpacker[T], conv: TupleConverter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(fromFields.size == 1, "Can only take 1 input field in unpack")
val fields = (fromFields, unpacker.getResultFields(toFields))
val setter = unpacker.newSetter(toFields)
pipe.map(fields) { input: T => input } (conv, setter)
}
/**
* Same as unpack but only the to fields are preserved.
*/
def unpackTo[T](fs: (Fields, Fields))(implicit unpacker: TupleUnpacker[T], conv: TupleConverter[T]): Pipe = {
val (fromFields, toFields) = fs
assert(fromFields.size == 1, "Can only take 1 input field in unpack")
val fields = (fromFields, unpacker.getResultFields(toFields))
val setter = unpacker.newSetter(toFields)
pipe.mapTo(fields) { input: T => input } (conv, setter)
}
/**
* Set of pipes reachable from this pipe (transitive closure of 'Pipe.getPrevious')
*/
def upstreamPipes: Set[Pipe] =
Iterator
.iterate(Seq(pipe))(pipes => for (p <- pipes; prev <- p.getPrevious) yield prev)
.takeWhile(_.length > 0)
.flatten
.toSet
/**
* This finds all the boxed serializations stored in the flow state map for this
* flowdef. We then find all the pipes back in the DAG from this pipe and apply
* those serializations.
*/
private[scalding] def applyFlowConfigProperties(flowDef: FlowDef): Pipe = {
case class ToVisit[T](queue: Queue[T], inQueue: Set[T]) {
def maybeAdd(t: T): ToVisit[T] = if (inQueue(t)) this else {
ToVisit(queue :+ t, inQueue + t)
}
def next: Option[(T, ToVisit[T])] =
if (inQueue.isEmpty) None
else Some((queue.head, ToVisit(queue.tail, inQueue - queue.head)))
}
@annotation.tailrec
def go(p: Pipe, visited: Set[Pipe], toVisit: ToVisit[Pipe]): Set[Pipe] = {
val notSeen: Set[Pipe] = p.getPrevious.filter(i => !visited.contains(i)).toSet
val nextVisited: Set[Pipe] = visited + p
val nextToVisit = notSeen.foldLeft(toVisit) { case (prev, n) => prev.maybeAdd(n) }
nextToVisit.next match {
case Some((h, innerNextToVisit)) => go(h, nextVisited, innerNextToVisit)
case _ => nextVisited
}
}
val allPipes = go(pipe, Set[Pipe](), ToVisit[Pipe](Queue.empty, Set.empty))
FlowStateMap.get(flowDef).foreach { fstm =>
fstm.flowConfigUpdates.foreach {
case (k, v) =>
allPipes.foreach { p =>
p.getStepConfigDef().setProperty(k, v)
}
}
}
pipe
}
}
/**
* A simple trait for releasable resource. Provides noop implementation.
*/
trait Stateful {
def release(): Unit = ()
}
|
tdyas/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/RichPipe.scala
|
Scala
|
apache-2.0
| 27,767 |
package core
import java.util.Properties
import akka.event.Logging
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig}
import akka.actor._
import org.apache.kafka.clients.producer.{ProducerRecord,Callback,RecordMetadata}
import domain.Tweet
class ProducerActor extends Actor {
val log = Logging(context.system, this)
val kafkaBrokers = sys.env("TWEET_PRODUCER_KAFKA_BROKERS")
val kafkaTopic = sys.env("TWEET_PRODUCER_KAFKA_TOPIC")
val props = new Properties()
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBrokers)
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
val producer = new KafkaProducer[String, String](props)
def receive: Receive = {
case tweet: Tweet =>
val record = new ProducerRecord[String,String](kafkaTopic, tweet.json)
producer.send(record, new Callback {
override def onCompletion(result: RecordMetadata, exception: Exception) {
if (exception != null) {
log.warning("Failed to send record", exception)
}
}
})
}
}
|
cesleem/iot-demo
|
twitter/src/main/scala/core/ProducerActor.scala
|
Scala
|
apache-2.0
| 1,236 |
import sbt._
import sbt.Keys._
object Common {
val akkaV = "2.3.9"
val dependencies = Seq(
// "com.typesafe.akka" %% "akka-actor" % akkaV,
// "com.typesafe.akka" %% "akka-testkit" % akkaV % "test",
"org.apache.spark" %% "spark-core" % "1.5.1" % "provided",
"commons-io" % "commons-io" % "2.4",
// "org.scala-lang.modules" %% "scala-pickling" % "0.10.1",
"org.specs2" %% "specs2-core" % "2.3.11" % "test",
"org.scalatest" %% "scalatest" % "2.2.4" % "test"
// "org.scalacheck" %% "scalacheck" % "1.12.5" % "test"
)
val names: Seq[Setting[_]] = Seq(
version := "1.1",
scalaVersion := "2.11.8",
organization := "chesnais.paul"
)
val settings: Seq[Setting[_]] = names ++ Seq(
libraryDependencies ++= dependencies,
resolvers += "softprops-maven" at "https://dl.bintray.com/content/softprops/maven",
resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots",
scalacOptions ++= Seq(
"-deprecation",
"-encoding", "UTF-8",
"-feature",
"-language:existentials",
"-language:higherKinds",
"-language:implicitConversions",
"-unchecked",
"-Ywarn-dead-code",
"-Ywarn-numeric-widen",
"-Xlint",
"-Ywarn-unused-import"
),
javaOptions ++= Seq("-d64", "-Xms1g", "-Xmx4g")
)
}
|
PapaCharlie/TetrisBot
|
tetris/project/Common.scala
|
Scala
|
mit
| 1,461 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.io._
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.{SparkConf, SparkEnv, SparkException}
import org.apache.spark.internal.config.{BUFFER_PAGESIZE, MEMORY_OFFHEAP_ENABLED}
import org.apache.spark.memory._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.BroadcastMode
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.types.LongType
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.util.{KnownSizeEstimation, Utils}
/**
* Interface for a hashed relation by some key. Use [[HashedRelation.apply]] to create a concrete
* object.
*/
private[execution] sealed trait HashedRelation extends KnownSizeEstimation {
/**
* Returns matched rows.
*
* Returns null if there is no matched rows.
*/
def get(key: InternalRow): Iterator[InternalRow]
/**
* Returns matched rows for a key that has only one column with LongType.
*
* Returns null if there is no matched rows.
*/
def get(key: Long): Iterator[InternalRow] = {
throw new UnsupportedOperationException
}
/**
* Returns the matched single row.
*/
def getValue(key: InternalRow): InternalRow
/**
* Returns the matched single row with key that have only one column of LongType.
*/
def getValue(key: Long): InternalRow = {
throw new UnsupportedOperationException
}
/**
* Returns an iterator for key index and matched rows.
*
* Returns null if there is no matched rows.
*/
def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = {
throw new UnsupportedOperationException
}
/**
* Returns key index and matched single row.
* This is for unique key case.
*
* Returns null if there is no matched rows.
*/
def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = {
throw new UnsupportedOperationException
}
/**
* Returns an iterator for keys index and rows of InternalRow type.
*/
def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = {
throw new UnsupportedOperationException
}
/**
* Returns the maximum number of allowed keys index.
*/
def maxNumKeysIndex: Int = {
throw new UnsupportedOperationException
}
/**
* Returns true iff all the keys are unique.
*/
def keyIsUnique: Boolean
/**
* Returns an iterator for keys of InternalRow type.
*/
def keys(): Iterator[InternalRow]
/**
* Returns a read-only copy of this, to be safely used in current thread.
*/
def asReadOnlyCopy(): HashedRelation
/**
* Release any used resources.
*/
def close(): Unit
}
private[execution] object HashedRelation {
/**
* Create a HashedRelation from an Iterator of InternalRow.
*
* @param allowsNullKey Allow NULL keys in HashedRelation.
* This is used for full outer join in `ShuffledHashJoinExec` only.
*/
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int = 64,
taskMemoryManager: TaskMemoryManager = null,
isNullAware: Boolean = false,
allowsNullKey: Boolean = false): HashedRelation = {
val mm = Option(taskMemoryManager).getOrElse {
new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
}
if (!input.hasNext && !allowsNullKey) {
EmptyHashedRelation
} else if (key.length == 1 && key.head.dataType == LongType && !allowsNullKey) {
// NOTE: LongHashedRelation does not support NULL keys.
LongHashedRelation(input, key, sizeEstimate, mm, isNullAware)
} else {
UnsafeHashedRelation(input, key, sizeEstimate, mm, isNullAware, allowsNullKey)
}
}
}
/**
* A wrapper for key index and value in InternalRow type.
* Designed to be instantiated once per thread and reused.
*/
private[execution] class ValueRowWithKeyIndex {
private var keyIndex: Int = _
private var value: InternalRow = _
/** Updates this ValueRowWithKeyIndex by updating its key index. Returns itself. */
def withNewKeyIndex(newKeyIndex: Int): ValueRowWithKeyIndex = {
keyIndex = newKeyIndex
this
}
/** Updates this ValueRowWithKeyIndex by updating its value. Returns itself. */
def withNewValue(newValue: InternalRow): ValueRowWithKeyIndex = {
value = newValue
this
}
/** Updates this ValueRowWithKeyIndex. Returns itself. */
def update(newKeyIndex: Int, newValue: InternalRow): ValueRowWithKeyIndex = {
keyIndex = newKeyIndex
value = newValue
this
}
def getKeyIndex: Int = {
keyIndex
}
def getValue: InternalRow = {
value
}
}
/**
* A HashedRelation for UnsafeRow, which is backed BytesToBytesMap.
*
* It's serialized in the following format:
* [number of keys]
* [size of key] [size of value] [key bytes] [bytes for value]
*/
private[joins] class UnsafeHashedRelation(
private var numKeys: Int,
private var numFields: Int,
private var binaryMap: BytesToBytesMap)
extends HashedRelation with Externalizable with KryoSerializable {
private[joins] def this() = this(0, 0, null) // Needed for serialization
override def keyIsUnique: Boolean = binaryMap.numKeys() == binaryMap.numValues()
override def asReadOnlyCopy(): UnsafeHashedRelation = {
new UnsafeHashedRelation(numKeys, numFields, binaryMap)
}
override def estimatedSize: Long = binaryMap.getTotalMemoryConsumption
// re-used in get()/getValue()/getWithKeyIndex()/getValueWithKeyIndex()/valuesWithKeyIndex()
var resultRow = new UnsafeRow(numFields)
// re-used in getWithKeyIndex()/getValueWithKeyIndex()/valuesWithKeyIndex()
val valueRowWithKeyIndex = new ValueRowWithKeyIndex
override def get(key: InternalRow): Iterator[InternalRow] = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
new Iterator[UnsafeRow] {
private var _hasNext = true
override def hasNext: Boolean = _hasNext
override def next(): UnsafeRow = {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
_hasNext = loc.nextValue()
resultRow
}
}
} else {
null
}
}
def getValue(key: InternalRow): InternalRow = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
resultRow
} else {
null
}
}
override def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
valueRowWithKeyIndex.withNewKeyIndex(loc.getKeyIndex)
new Iterator[ValueRowWithKeyIndex] {
private var _hasNext = true
override def hasNext: Boolean = _hasNext
override def next(): ValueRowWithKeyIndex = {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
_hasNext = loc.nextValue()
valueRowWithKeyIndex.withNewValue(resultRow)
}
}
} else {
null
}
}
override def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = {
val unsafeKey = key.asInstanceOf[UnsafeRow]
val map = binaryMap // avoid the compiler error
val loc = new map.Location // this could be allocated in stack
binaryMap.safeLookup(unsafeKey.getBaseObject, unsafeKey.getBaseOffset,
unsafeKey.getSizeInBytes, loc, unsafeKey.hashCode())
if (loc.isDefined) {
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
valueRowWithKeyIndex.update(loc.getKeyIndex, resultRow)
} else {
null
}
}
override def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = {
val iter = binaryMap.iteratorWithKeyIndex()
new Iterator[ValueRowWithKeyIndex] {
override def hasNext: Boolean = iter.hasNext
override def next(): ValueRowWithKeyIndex = {
if (!hasNext) {
throw QueryExecutionErrors.endOfIteratorError()
}
val loc = iter.next()
resultRow.pointTo(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
valueRowWithKeyIndex.update(loc.getKeyIndex, resultRow)
}
}
}
override def maxNumKeysIndex: Int = {
binaryMap.maxNumKeysIndex
}
override def keys(): Iterator[InternalRow] = {
val iter = binaryMap.iterator()
new Iterator[InternalRow] {
val unsafeRow = new UnsafeRow(numKeys)
override def hasNext: Boolean = {
iter.hasNext
}
override def next(): InternalRow = {
if (!hasNext) {
throw QueryExecutionErrors.endOfIteratorError()
} else {
val loc = iter.next()
unsafeRow.pointTo(loc.getKeyBase, loc.getKeyOffset, loc.getKeyLength)
unsafeRow
}
}
}
}
override def close(): Unit = {
binaryMap.free()
}
override def writeExternal(out: ObjectOutput): Unit = Utils.tryOrIOException {
write(out.writeInt, out.writeLong, out.write)
}
override def write(kryo: Kryo, out: Output): Unit = Utils.tryOrIOException {
write(out.writeInt, out.writeLong, out.write)
}
private def write(
writeInt: (Int) => Unit,
writeLong: (Long) => Unit,
writeBuffer: (Array[Byte], Int, Int) => Unit) : Unit = {
writeInt(numFields)
// TODO: move these into BytesToBytesMap
writeLong(binaryMap.numKeys())
writeLong(binaryMap.numValues())
var buffer = new Array[Byte](64)
def write(base: Object, offset: Long, length: Int): Unit = {
if (buffer.length < length) {
buffer = new Array[Byte](length)
}
Platform.copyMemory(base, offset, buffer, Platform.BYTE_ARRAY_OFFSET, length)
writeBuffer(buffer, 0, length)
}
val iter = binaryMap.iterator()
while (iter.hasNext) {
val loc = iter.next()
// [key size] [values size] [key bytes] [value bytes]
writeInt(loc.getKeyLength)
writeInt(loc.getValueLength)
write(loc.getKeyBase, loc.getKeyOffset, loc.getKeyLength)
write(loc.getValueBase, loc.getValueOffset, loc.getValueLength)
}
}
override def readExternal(in: ObjectInput): Unit = Utils.tryOrIOException {
read(() => in.readInt(), () => in.readLong(), in.readFully)
}
private def read(
readInt: () => Int,
readLong: () => Long,
readBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
numFields = readInt()
resultRow = new UnsafeRow(numFields)
val nKeys = readLong()
val nValues = readLong()
// This is used in Broadcast, shared by multiple tasks, so we use on-heap memory
// TODO(josh): This needs to be revisited before we merge this patch; making this change now
// so that tests compile:
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes)
.getOrElse(new SparkConf().get(BUFFER_PAGESIZE).getOrElse(16L * 1024 * 1024))
// TODO(josh): We won't need this dummy memory manager after future refactorings; revisit
// during code review
binaryMap = new BytesToBytesMap(
taskMemoryManager,
(nKeys * 1.5 + 1).toInt, // reduce hash collision
pageSizeBytes)
var i = 0
var keyBuffer = new Array[Byte](1024)
var valuesBuffer = new Array[Byte](1024)
while (i < nValues) {
val keySize = readInt()
val valuesSize = readInt()
if (keySize > keyBuffer.length) {
keyBuffer = new Array[Byte](keySize)
}
readBuffer(keyBuffer, 0, keySize)
if (valuesSize > valuesBuffer.length) {
valuesBuffer = new Array[Byte](valuesSize)
}
readBuffer(valuesBuffer, 0, valuesSize)
val loc = binaryMap.lookup(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize)
val putSucceeded = loc.append(keyBuffer, Platform.BYTE_ARRAY_OFFSET, keySize,
valuesBuffer, Platform.BYTE_ARRAY_OFFSET, valuesSize)
if (!putSucceeded) {
binaryMap.free()
throw QueryExecutionErrors.cannotAllocateMemoryToGrowBytesToBytesMapError()
}
i += 1
}
}
override def read(kryo: Kryo, in: Input): Unit = Utils.tryOrIOException {
read(() => in.readInt(), () => in.readLong(), in.readBytes)
}
}
private[joins] object UnsafeHashedRelation {
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int,
taskMemoryManager: TaskMemoryManager,
isNullAware: Boolean = false,
allowsNullKey: Boolean = false): HashedRelation = {
require(!(isNullAware && allowsNullKey),
"isNullAware and allowsNullKey cannot be enabled at same time")
val pageSizeBytes = Option(SparkEnv.get).map(_.memoryManager.pageSizeBytes)
.getOrElse(new SparkConf().get(BUFFER_PAGESIZE).getOrElse(16L * 1024 * 1024))
val binaryMap = new BytesToBytesMap(
taskMemoryManager,
// Only 70% of the slots can be used before growing, more capacity help to reduce collision
(sizeEstimate * 1.5 + 1).toInt,
pageSizeBytes)
// Create a mapping of buildKeys -> rows
val keyGenerator = UnsafeProjection.create(key)
var numFields = 0
while (input.hasNext) {
val row = input.next().asInstanceOf[UnsafeRow]
numFields = row.numFields()
val key = keyGenerator(row)
if (!key.anyNull || allowsNullKey) {
val loc = binaryMap.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes)
val success = loc.append(
key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
row.getBaseObject, row.getBaseOffset, row.getSizeInBytes)
if (!success) {
binaryMap.free()
throw QueryExecutionErrors.cannotAcquireMemoryToBuildUnsafeHashedRelationError()
}
} else if (isNullAware) {
return HashedRelationWithAllNullKeys
}
}
new UnsafeHashedRelation(key.size, numFields, binaryMap)
}
}
/**
* An append-only hash map mapping from key of Long to UnsafeRow.
*
* The underlying bytes of all values (UnsafeRows) are packed together as a single byte array
* (`page`) in this format:
*
* [bytes of row1][address1][bytes of row2][address1] ...
*
* address1 (8 bytes) is the offset and size of next value for the same key as row1, any key
* could have multiple values. the address at the end of last value for every key is 0.
*
* The keys and addresses of their values could be stored in two modes:
*
* 1) sparse mode: the keys and addresses are stored in `array` as:
*
* [key1][address1][key2][address2]...[]
*
* address1 (Long) is the offset (in `page`) and size of the value for key1. The position of key1
* is determined by `key1 % cap`. Quadratic probing with triangular numbers is used to address
* hash collision.
*
* 2) dense mode: all the addresses are packed into a single array of long, as:
*
* [address1] [address2] ...
*
* address1 (Long) is the offset (in `page`) and size of the value for key1, the position is
* determined by `key1 - minKey`.
*
* The map is created as sparse mode, then key-value could be appended into it. Once finish
* appending, caller could call optimize() to try to turn the map into dense mode, which is faster
* to probe.
*
* see http://java-performance.info/implementing-world-fastest-java-int-to-int-hash-map/
*/
private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, capacity: Int)
extends MemoryConsumer(mm, MemoryMode.ON_HEAP) with Externalizable with KryoSerializable {
// Whether the keys are stored in dense mode or not.
private var isDense = false
// The minimum key
private var minKey = Long.MaxValue
// The maximum key
private var maxKey = Long.MinValue
// The array to store the key and offset of UnsafeRow in the page.
//
// Sparse mode: [key1] [offset1 | size1] [key2] [offset | size2] ...
// Dense mode: [offset1 | size1] [offset2 | size2]
private var array: Array[Long] = null
private var mask: Int = 0
// The page to store all bytes of UnsafeRow and the pointer to next rows.
// [row1][pointer1] [row2][pointer2]
private var page: Array[Long] = null
// Current write cursor in the page.
private var cursor: Long = Platform.LONG_ARRAY_OFFSET
// The number of bits for size in address
private val SIZE_BITS = 28
private val SIZE_MASK = 0xfffffff
// The total number of values of all keys.
private var numValues = 0L
// The number of unique keys.
private var numKeys = 0L
// needed by serializer
def this() = {
this(
new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0),
0)
}
private def ensureAcquireMemory(size: Long): Unit = {
// do not support spilling
val got = acquireMemory(size)
if (got < size) {
freeMemory(got)
throw QueryExecutionErrors.cannotAcquireMemoryToBuildLongHashedRelationError(size, got)
}
}
private def init(): Unit = {
if (mm != null) {
require(capacity < 512000000, "Cannot broadcast 512 million or more rows")
var n = 1
while (n < capacity) n *= 2
ensureAcquireMemory(n * 2L * 8 + (1 << 20))
array = new Array[Long](n * 2)
mask = n * 2 - 2
page = new Array[Long](1 << 17) // 1M bytes
}
}
init()
def spill(size: Long, trigger: MemoryConsumer): Long = 0L
/**
* Returns whether all the keys are unique.
*/
def keyIsUnique: Boolean = numKeys == numValues
/**
* Returns total memory consumption.
*/
def getTotalMemoryConsumption: Long = array.length * 8L + page.length * 8L
/**
* Returns the first slot of array that store the keys (sparse mode).
*/
private def firstSlot(key: Long): Int = {
val h = key * 0x9E3779B9L
(h ^ (h >> 32)).toInt & mask
}
/**
* Returns the next probe in the array.
*/
private def nextSlot(pos: Int): Int = (pos + 2) & mask
private[this] def toAddress(offset: Long, size: Int): Long = {
((offset - Platform.LONG_ARRAY_OFFSET) << SIZE_BITS) | size
}
private[this] def toOffset(address: Long): Long = {
(address >>> SIZE_BITS) + Platform.LONG_ARRAY_OFFSET
}
private[this] def toSize(address: Long): Int = {
(address & SIZE_MASK).toInt
}
private def getRow(address: Long, resultRow: UnsafeRow): UnsafeRow = {
resultRow.pointTo(page, toOffset(address), toSize(address))
resultRow
}
/**
* Returns the single UnsafeRow for given key, or null if not found.
*/
def getValue(key: Long, resultRow: UnsafeRow): UnsafeRow = {
if (isDense) {
if (key >= minKey && key <= maxKey) {
val value = array((key - minKey).toInt)
if (value > 0) {
return getRow(value, resultRow)
}
}
} else {
var pos = firstSlot(key)
while (array(pos + 1) != 0) {
if (array(pos) == key) {
return getRow(array(pos + 1), resultRow)
}
pos = nextSlot(pos)
}
}
null
}
/**
* Returns an iterator of UnsafeRow for multiple linked values.
*/
private def valueIter(address: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = {
new Iterator[UnsafeRow] {
var addr = address
override def hasNext: Boolean = addr != 0
override def next(): UnsafeRow = {
val offset = toOffset(addr)
val size = toSize(addr)
resultRow.pointTo(page, offset, size)
addr = Platform.getLong(page, offset + size)
resultRow
}
}
}
/**
* Returns an iterator for all the values for the given key, or null if no value found.
*/
def get(key: Long, resultRow: UnsafeRow): Iterator[UnsafeRow] = {
if (isDense) {
if (key >= minKey && key <= maxKey) {
val value = array((key - minKey).toInt)
if (value > 0) {
return valueIter(value, resultRow)
}
}
} else {
var pos = firstSlot(key)
while (array(pos + 1) != 0) {
if (array(pos) == key) {
return valueIter(array(pos + 1), resultRow)
}
pos = nextSlot(pos)
}
}
null
}
/**
* Builds an iterator on a sparse array.
*/
def keys(): Iterator[InternalRow] = {
val row = new GenericInternalRow(1)
// a) in dense mode the array stores the address
// => (k, v) = (minKey + index, array(index))
// b) in sparse mode the array stores both the key and the address
// => (k, v) = (array(index), array(index+1))
new Iterator[InternalRow] {
// cursor that indicates the position of the next key which was not read by a next() call
var pos = 0
// when we iterate in dense mode we need to jump two positions at a time
val step = if (isDense) 0 else 1
override def hasNext: Boolean = {
// go to the next key if the current key slot is empty
while (pos + step < array.length) {
if (array(pos + step) > 0) {
return true
}
pos += step + 1
}
false
}
override def next(): InternalRow = {
if (!hasNext) {
throw QueryExecutionErrors.endOfIteratorError()
} else {
// the key is retrieved based on the map mode
val ret = if (isDense) minKey + pos else array(pos)
// advance the cursor to the next index
pos += step + 1
row.setLong(0, ret)
row
}
}
}
}
/**
* Appends the key and row into this map.
*/
def append(key: Long, row: UnsafeRow): Unit = {
val sizeInBytes = row.getSizeInBytes
if (sizeInBytes >= (1 << SIZE_BITS)) {
throw QueryExecutionErrors.rowLargerThan256MUnsupportedError()
}
if (key < minKey) {
minKey = key
}
if (key > maxKey) {
maxKey = key
}
grow(row.getSizeInBytes)
// copy the bytes of UnsafeRow
val offset = cursor
Platform.copyMemory(row.getBaseObject, row.getBaseOffset, page, cursor, row.getSizeInBytes)
cursor += row.getSizeInBytes
Platform.putLong(page, cursor, 0)
cursor += 8
numValues += 1
updateIndex(key, toAddress(offset, row.getSizeInBytes))
}
/**
* Update the address in array for given key.
*/
private def updateIndex(key: Long, address: Long): Unit = {
var pos = firstSlot(key)
assert(numKeys < array.length / 2)
while (array(pos) != key && array(pos + 1) != 0) {
pos = nextSlot(pos)
}
if (array(pos + 1) == 0) {
// this is the first value for this key, put the address in array.
array(pos) = key
array(pos + 1) = address
numKeys += 1
if (numKeys * 4 > array.length) {
// reach half of the capacity
if (array.length < (1 << 30)) {
// Cannot allocate an array with 2G elements
growArray()
} else if (numKeys > array.length / 2 * 0.75) {
// The fill ratio should be less than 0.75
throw QueryExecutionErrors.cannotBuildHashedRelationWithUniqueKeysExceededError()
}
}
} else {
// there are some values for this key, put the address in the front of them.
val pointer = toOffset(address) + toSize(address)
Platform.putLong(page, pointer, array(pos + 1))
array(pos + 1) = address
}
}
private def grow(inputRowSize: Int): Unit = {
// There is 8 bytes for the pointer to next value
val neededNumWords = (cursor - Platform.LONG_ARRAY_OFFSET + 8 + inputRowSize + 7) / 8
if (neededNumWords > page.length) {
if (neededNumWords > (1 << 30)) {
throw QueryExecutionErrors.cannotBuildHashedRelationLargerThan8GError()
}
val newNumWords = math.max(neededNumWords, math.min(page.length * 2, 1 << 30))
ensureAcquireMemory(newNumWords * 8L)
val newPage = new Array[Long](newNumWords.toInt)
Platform.copyMemory(page, Platform.LONG_ARRAY_OFFSET, newPage, Platform.LONG_ARRAY_OFFSET,
cursor - Platform.LONG_ARRAY_OFFSET)
val used = page.length
page = newPage
freeMemory(used * 8L)
}
}
private def growArray(): Unit = {
var old_array = array
val n = array.length
numKeys = 0
ensureAcquireMemory(n * 2 * 8L)
array = new Array[Long](n * 2)
mask = n * 2 - 2
var i = 0
while (i < old_array.length) {
if (old_array(i + 1) > 0) {
updateIndex(old_array(i), old_array(i + 1))
}
i += 2
}
old_array = null // release the reference to old array
freeMemory(n * 8L)
}
/**
* Try to turn the map into dense mode, which is faster to probe.
*/
def optimize(): Unit = {
val range = maxKey - minKey
// Convert to dense mode if it does not require more memory or could fit within L1 cache
// SPARK-16740: Make sure range doesn't overflow if minKey has a large negative value
if (range >= 0 && (range < array.length || range < 1024)) {
try {
ensureAcquireMemory((range + 1) * 8L)
} catch {
case e: SparkException =>
// there is no enough memory to convert
return
}
val denseArray = new Array[Long]((range + 1).toInt)
var i = 0
while (i < array.length) {
if (array(i + 1) > 0) {
val idx = (array(i) - minKey).toInt
denseArray(idx) = array(i + 1)
}
i += 2
}
val old_length = array.length
array = denseArray
isDense = true
freeMemory(old_length * 8L)
}
}
/**
* Free all the memory acquired by this map.
*/
def free(): Unit = {
if (page != null) {
freeMemory(page.length * 8L)
page = null
}
if (array != null) {
freeMemory(array.length * 8L)
array = null
}
}
private def writeLongArray(
writeBuffer: (Array[Byte], Int, Int) => Unit,
arr: Array[Long],
len: Int): Unit = {
val buffer = new Array[Byte](4 << 10)
var offset: Long = Platform.LONG_ARRAY_OFFSET
val end = len * 8L + Platform.LONG_ARRAY_OFFSET
while (offset < end) {
val size = Math.min(buffer.length, end - offset)
Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size)
writeBuffer(buffer, 0, size.toInt)
offset += size
}
}
private def write(
writeBoolean: (Boolean) => Unit,
writeLong: (Long) => Unit,
writeBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
writeBoolean(isDense)
writeLong(minKey)
writeLong(maxKey)
writeLong(numKeys)
writeLong(numValues)
writeLong(array.length)
writeLongArray(writeBuffer, array, array.length)
val used = ((cursor - Platform.LONG_ARRAY_OFFSET) / 8).toInt
writeLong(used)
writeLongArray(writeBuffer, page, used)
}
override def writeExternal(output: ObjectOutput): Unit = {
write(output.writeBoolean, output.writeLong, output.write)
}
override def write(kryo: Kryo, out: Output): Unit = {
write(out.writeBoolean, out.writeLong, out.write)
}
private def readLongArray(
readBuffer: (Array[Byte], Int, Int) => Unit,
length: Int): Array[Long] = {
val array = new Array[Long](length)
val buffer = new Array[Byte](4 << 10)
var offset: Long = Platform.LONG_ARRAY_OFFSET
val end = length * 8L + Platform.LONG_ARRAY_OFFSET
while (offset < end) {
val size = Math.min(buffer.length, end - offset)
readBuffer(buffer, 0, size.toInt)
Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size)
offset += size
}
array
}
private def read(
readBoolean: () => Boolean,
readLong: () => Long,
readBuffer: (Array[Byte], Int, Int) => Unit): Unit = {
isDense = readBoolean()
minKey = readLong()
maxKey = readLong()
numKeys = readLong()
numValues = readLong()
val length = readLong().toInt
mask = length - 2
array = readLongArray(readBuffer, length)
val pageLength = readLong().toInt
page = readLongArray(readBuffer, pageLength)
// Restore cursor variable to make this map able to be serialized again on executors.
cursor = pageLength * 8 + Platform.LONG_ARRAY_OFFSET
}
override def readExternal(in: ObjectInput): Unit = {
read(() => in.readBoolean(), () => in.readLong(), in.readFully)
}
override def read(kryo: Kryo, in: Input): Unit = {
read(() => in.readBoolean(), () => in.readLong(), in.readBytes)
}
}
class LongHashedRelation(
private var nFields: Int,
private var map: LongToUnsafeRowMap) extends HashedRelation with Externalizable {
private var resultRow: UnsafeRow = new UnsafeRow(nFields)
// Needed for serialization (it is public to make Java serialization work)
def this() = this(0, null)
override def asReadOnlyCopy(): LongHashedRelation = new LongHashedRelation(nFields, map)
override def estimatedSize: Long = map.getTotalMemoryConsumption
override def get(key: InternalRow): Iterator[InternalRow] = {
if (key.isNullAt(0)) {
null
} else {
get(key.getLong(0))
}
}
override def getValue(key: InternalRow): InternalRow = {
if (key.isNullAt(0)) {
null
} else {
getValue(key.getLong(0))
}
}
override def get(key: Long): Iterator[InternalRow] = map.get(key, resultRow)
override def getValue(key: Long): InternalRow = map.getValue(key, resultRow)
override def keyIsUnique: Boolean = map.keyIsUnique
override def close(): Unit = {
map.free()
}
override def writeExternal(out: ObjectOutput): Unit = {
out.writeInt(nFields)
out.writeObject(map)
}
override def readExternal(in: ObjectInput): Unit = {
nFields = in.readInt()
resultRow = new UnsafeRow(nFields)
map = in.readObject().asInstanceOf[LongToUnsafeRowMap]
}
/**
* Returns an iterator for keys of InternalRow type.
*/
override def keys(): Iterator[InternalRow] = map.keys()
override def getWithKeyIndex(key: InternalRow): Iterator[ValueRowWithKeyIndex] = {
throw new UnsupportedOperationException
}
override def getValueWithKeyIndex(key: InternalRow): ValueRowWithKeyIndex = {
throw new UnsupportedOperationException
}
override def valuesWithKeyIndex(): Iterator[ValueRowWithKeyIndex] = {
throw new UnsupportedOperationException
}
override def maxNumKeysIndex: Int = {
throw new UnsupportedOperationException
}
}
/**
* Create hashed relation with key that is long.
*/
private[joins] object LongHashedRelation {
def apply(
input: Iterator[InternalRow],
key: Seq[Expression],
sizeEstimate: Int,
taskMemoryManager: TaskMemoryManager,
isNullAware: Boolean = false): HashedRelation = {
val map = new LongToUnsafeRowMap(taskMemoryManager, sizeEstimate)
val keyGenerator = UnsafeProjection.create(key)
// Create a mapping of key -> rows
var numFields = 0
while (input.hasNext) {
val unsafeRow = input.next().asInstanceOf[UnsafeRow]
numFields = unsafeRow.numFields()
val rowKey = keyGenerator(unsafeRow)
if (!rowKey.isNullAt(0)) {
val key = rowKey.getLong(0)
map.append(key, unsafeRow)
} else if (isNullAware) {
return HashedRelationWithAllNullKeys
}
}
map.optimize()
new LongHashedRelation(numFields, map)
}
}
/**
* A special HashedRelation indicating that it's built from a empty input:Iterator[InternalRow].
* get & getValue will return null just like
* empty LongHashedRelation or empty UnsafeHashedRelation does.
*/
case object EmptyHashedRelation extends HashedRelation {
override def get(key: Long): Iterator[InternalRow] = null
override def get(key: InternalRow): Iterator[InternalRow] = null
override def getValue(key: Long): InternalRow = null
override def getValue(key: InternalRow): InternalRow = null
override def asReadOnlyCopy(): EmptyHashedRelation.type = this
override def keyIsUnique: Boolean = true
override def keys(): Iterator[InternalRow] = {
Iterator.empty
}
override def close(): Unit = {}
override def estimatedSize: Long = 0
}
/**
* A special HashedRelation indicating that it's built from a non-empty input:Iterator[InternalRow]
* with all the keys to be null.
*/
case object HashedRelationWithAllNullKeys extends HashedRelation {
override def get(key: InternalRow): Iterator[InternalRow] = {
throw new UnsupportedOperationException
}
override def getValue(key: InternalRow): InternalRow = {
throw new UnsupportedOperationException
}
override def asReadOnlyCopy(): HashedRelationWithAllNullKeys.type = this
override def keyIsUnique: Boolean = true
override def keys(): Iterator[InternalRow] = {
throw new UnsupportedOperationException
}
override def close(): Unit = {}
override def estimatedSize: Long = 0
}
/** The HashedRelationBroadcastMode requires that rows are broadcasted as a HashedRelation. */
case class HashedRelationBroadcastMode(key: Seq[Expression], isNullAware: Boolean = false)
extends BroadcastMode {
override def transform(rows: Array[InternalRow]): HashedRelation = {
transform(rows.iterator, Some(rows.length))
}
override def transform(
rows: Iterator[InternalRow],
sizeHint: Option[Long]): HashedRelation = {
sizeHint match {
case Some(numRows) =>
HashedRelation(rows, canonicalized.key, numRows.toInt, isNullAware = isNullAware)
case None =>
HashedRelation(rows, canonicalized.key, isNullAware = isNullAware)
}
}
override lazy val canonicalized: HashedRelationBroadcastMode = {
this.copy(key = key.map(_.canonicalized))
}
}
|
BryanCutler/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala
|
Scala
|
apache-2.0
| 35,671 |
package codechicken.microblock
import net.minecraft.item.crafting.IRecipe
import net.minecraft.world.World
import net.minecraft.inventory.InventoryCrafting
import net.minecraft.item.ItemStack
import codechicken.microblock.handler.MicroblockProxy._
import net.minecraftforge.oredict.RecipeSorter
object MicroRecipe extends IRecipe
{
RecipeSorter.register("fmp:micro", getClass, RecipeSorter.Category.SHAPED, "after:forge:shapelessore")
def getRecipeOutput = ItemMicroPart.create(1, 1, "tile.stone")
def getRecipeSize = 9
def matches(icraft:InventoryCrafting, world:World) = getCraftingResult(icraft) != null
def getCraftingResult(icraft:InventoryCrafting):ItemStack =
{
var res = getHollowResult(icraft)
if(res != null) return res
res = getGluingResult(icraft)
if(res != null) return res
res = getThinningResult(icraft)
if(res != null) return res
res = getSplittingResult(icraft)
if(res != null) return res
res = getHollowFillResult(icraft)
return res
}
def create(amount:Int, mcrClass:Int, size:Int, material:Int):ItemStack =
{
if(size == 8)
{
val item = MicroMaterialRegistry.getMaterial(material).getItem.copy
item.stackSize = amount
return item
}
return ItemMicroPart.create(amount, mcrClass<<8|size, MicroMaterialRegistry.materialName(material))
}
def microMaterial(item:ItemStack) =
if(item.getItem == itemMicro)
ItemMicroPart.getMaterialID(item)
else
findMaterial(item)
def microClass(item:ItemStack) =
if(item.getItem == itemMicro)
item.getItemDamage >> 8
else
0
def microSize(item:ItemStack) =
if(item.getItem == itemMicro)
item.getItemDamage & 0xFF
else
8
def getHollowResult(icraft:InventoryCrafting):ItemStack =
{
if(icraft.getStackInRowAndColumn(1, 1) != null) return null
val first = icraft.getStackInRowAndColumn(0, 0)
if(first == null || first.getItem != itemMicro || microClass(first) != 0) return null
val size = microSize(first)
val material = microMaterial(first)
for(i <- 1 to 8 if i != 4)
{
val item = icraft.getStackInSlot(i)
if(item == null || item.getItem != itemMicro ||
microMaterial(item) != material || item.getItemDamage != first.getItemDamage)
return null
}
return create(8, 1, size, material)
}
def getGluingResult(icraft:InventoryCrafting):ItemStack =
{
var size = 0
var count = 0
var smallest = 0
var mcrClass = 0
var material = 0
for(i <- 0 until 9)
{
val item = icraft.getStackInSlot(i)
if(item != null)
{
if(item.getItem != itemMicro) return null
if(count == 0)
{
size = microSize(item)
mcrClass = microClass(item)
material = microMaterial(item)
count = 1
smallest = size
}
else if(microClass(item) != mcrClass || microMaterial(item) != material) return null
else if(mcrClass >= 2 && microSize(item) != smallest) return null
else
{
smallest= Math.min(smallest, microSize(item))
count+=1
size+=microSize(item)
}
}
}
if(count <= 1) return null
mcrClass match {
case 3 => count match {
case 2 => create(1, 0, smallest, material)
case _ => null
}
case 2 => count match {
case 2 => create(1, 3, smallest, material)
case 4 => create(1, 0, smallest, material)
case _ => null
}
case 1|0 =>
val base = Seq(1, 2, 4).find(s => (s&size) != 0)
if(base.isEmpty)
create(size/8, 0, 8, material)
else if(base.get <= smallest)
null
else
create(size/base.get, mcrClass, base.get, material)
case _ => null
}
}
def getSaw(icraft:InventoryCrafting):(Saw, Int, Int) =
{
for(r <- 0 until 3)
for(c <- 0 until 3)
{
val item = icraft.getStackInRowAndColumn(c, r)
if(item != null && item.getItem.isInstanceOf[Saw])
return (item.getItem.asInstanceOf[Saw], r, c)
}
return (null, 0, 0)
}
def canCut(saw:Saw, sawItem:ItemStack, material:Int):Boolean = {
val sawStrength = saw.getCuttingStrength(sawItem)
val matStrength = MicroMaterialRegistry.getMaterial(material).getCutterStrength
return sawStrength >= matStrength || sawStrength == MicroMaterialRegistry.getMaxCuttingStrength
}
def getThinningResult(icraft:InventoryCrafting):ItemStack =
{
val (saw, row, col) = getSaw(icraft)
if(saw == null)
return null
val item = icraft.getStackInRowAndColumn(col, row+1)
if(item == null)
return null
val size = microSize(item)
val material = microMaterial(item)
val mcrClass = microClass(item)
if(size == 1 || material < 0 || !canCut(saw, icraft.getStackInRowAndColumn(col, row), material))
return null
for(r <- 0 until 3)
for(c <- 0 until 3)
if((c != col || r != row && r != row+1) &&
icraft.getStackInRowAndColumn(c, r) != null)
return null
return create(2, mcrClass, size/2, material)
}
def findMaterial(item:ItemStack):Int =
MicroMaterialRegistry.getIdMap.find{m => val mitem = m._2.getItem
item.getItem == mitem.getItem &&
item.getItemDamage == mitem.getItemDamage &&
ItemStack.areItemStackTagsEqual(item, mitem)} match {
case None => -1
case Some((name, m)) => MicroMaterialRegistry.materialID(name)
}
val splitMap = Map(0 -> 3, 1 -> 3, 3 -> 2)
def getSplittingResult(icraft:InventoryCrafting):ItemStack =
{
val (saw, row, col) = getSaw(icraft)
if(saw == null) return null
val item = icraft.getStackInRowAndColumn(col+1, row)
if(item == null || item.getItem != itemMicro) return null
val mcrClass = microClass(item)
val material = microMaterial(item)
if(!canCut(saw, icraft.getStackInRowAndColumn(col, row), material)) return null
val split = splitMap.get(mcrClass)
if(split.isEmpty)return null
for(r <- 0 until 3)
for(c <- 0 until 3)
if((r != row || c != col && c != col+1) &&
icraft.getStackInRowAndColumn(c, r) != null)
return null
return create(2, split.get, microSize(item), material)
}
def getHollowFillResult(icraft:InventoryCrafting):ItemStack =
{
var cover:ItemStack = null
for(i <- 0 until 9)
{
val item = icraft.getStackInSlot(i)
if(item != null)
{
if(item.getItem != itemMicro || cover != null || microClass(item) != 1) return null
else cover = item
}
}
if(cover == null) return null
return create(1, 0, microSize(cover), microMaterial(cover))
}
}
|
Chicken-Bones/ForgeMultipart
|
src/codechicken/microblock/MicroRecipe.scala
|
Scala
|
lgpl-2.1
| 7,891 |
package com.github.novamage.svalidator.binding.binders.typed
import com.github.novamage.svalidator.binding.binders.{JsonTypedBinder, TypedBinder}
import com.github.novamage.svalidator.binding.{BindingConfig, BindingFailure, BindingPass}
import io.circe.Json
import testUtils.Observes
class StringBinderSpecs extends Observes {
describe("when testing the binding of a class with a simple constructor with a string argument") {
val fieldName = "someStringFieldName"
val metadata = mock[Map[String, Any]]
describe("and the values map method of binding is used") {
val sut: TypedBinder[String] = new StringBinder(BindingConfig.defaultConfig)
describe("and the argument is not present in the values map") {
val result = sut.bind(fieldName, Map("aDifferentField" -> List("someValue")), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the argument is present in the values map") {
describe("and the argument is an empty string") {
val result = sut.bind(fieldName, Map(fieldName -> List("")), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the argument is a whitespace string") {
val result = sut.bind(fieldName, Map(fieldName -> List(" ")), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the valueGetter is a non-whitespace string with spaces on the edges") {
val fieldValue = " someValue "
val result = sut.bind(fieldName, Map(fieldName -> List(fieldValue)), metadata)
it("should have bound the property including its spaces properly") {
result should equal(BindingPass(fieldValue))
}
}
}
}
describe("and the json method of binding is used") {
val sut: JsonTypedBinder[String] = new StringBinder(BindingConfig.defaultConfig)
describe("and the argument is not present in the values map") {
val json = Json.obj("aDifferentField" -> Json.fromString("someValue"))
val result = sut.bindJson(json.hcursor.downField(fieldName), Some(fieldName), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the argument is present in the values map") {
describe("and the argument is an empty string") {
val json = Json.obj(fieldName -> Json.fromString(""))
val result = sut.bindJson(json.hcursor.downField(fieldName), Some(fieldName), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the argument is a whitespace string") {
val json = Json.obj(fieldName -> Json.fromString(" "))
val result = sut.bindJson(json.hcursor.downField(fieldName), Some(fieldName), metadata)
it("should have returned a Binding Failure with a field error for the string field") {
result.fieldErrors.filter(_.fieldName == fieldName) should have size 1
val error = result.fieldErrors.head
error.fieldName should equal(fieldName)
error.messageParts should equal(BindingConfig.defaultConfig.languageConfig.invalidNonEmptyTextMessage(fieldName))
result.asInstanceOf[BindingFailure[_]].cause.get.isInstanceOf[NoSuchElementException] should be(true)
}
}
describe("and the valueGetter is a non-whitespace string with spaces on the edges") {
val fieldValue = " someValue "
val json = Json.obj(fieldName -> Json.fromString(fieldValue))
val result = sut.bindJson(json.hcursor.downField(fieldName), Some(fieldName), metadata)
it("should have bound the property including its spaces properly") {
result should equal(BindingPass(fieldValue))
}
}
}
}
}
}
|
NovaMage/SValidator
|
src/test/scala/com/github/novamage/svalidator/binding/binders/typed/StringBinderSpecs.scala
|
Scala
|
mit
| 6,239 |
package circumflex
package orm
import core._
import java.io.File
import java.lang.reflect.Modifier
import java.net.URL
/*!# Exporting Database Schema
The `DDLUnit` class provides API for creating and dropping database schema.
It features arranging database objects in correct order (preliminary
auxiliary objects, tables, constraints, auxiliary database objects) and
configurable logging.
*/
class DDLUnit {
protected var _schemata: Seq[Schema] = Nil
def schemata = _schemata
protected var _tables: Seq[Table[_, _]] = Nil
def tables = _tables
protected var _views: Seq[View[_, _]] = Nil
def views = _views
protected var _constraints: Seq[Constraint] = Nil
def constraints = _constraints
protected var _preAux: Seq[SchemaObject] = Nil
def preAux = _preAux
protected var _postAux: Seq[SchemaObject] = Nil
def postAux = _postAux
protected var _msgs: Seq[Msg] = Nil
def messages = _msgs
def msgsArray: Array[Msg] = messages.toArray
def this(objects: SchemaObject*) = {
this()
add(objects: _*)
}
def resetMsgs(): this.type = {
_msgs = Nil
this
}
def clear() = {
_schemata = Nil
_tables = Nil
_views = Nil
_constraints = Nil
_preAux = Nil
_postAux = Nil
resetMsgs()
}
def add(objects: SchemaObject*): this.type = {
objects.foreach(addObject(_))
this
}
def addObject(obj: SchemaObject): this.type = {
def processRelation(r: Relation[_, _]) {
addObject(r.schema)
r.preAux.foreach(o =>
if (!_preAux.contains(o)) _preAux ++= List(o))
r.postAux.foreach(o => addObject(o))
}
obj match {
case t: Table[_, _] => if (!_tables.contains(t)) {
_tables ++= List(t)
t.constraints.foreach(c => addObject(c))
t.indexes.foreach(i => addObject(i))
processRelation(t)
}
case v: View[_, _] => if (!_views.contains(v)) {
_views ++= List(v)
processRelation(v)
}
case c: Constraint => if (!_constraints.contains(c))
_constraints ++= List(c)
case s: Schema => if (!_schemata.contains(s))
_schemata ++= List(s)
case o => if (!_postAux.contains(o))
_postAux ++= List(o)
}
this
}
protected def dropObjects(objects: Seq[SchemaObject]) {
if (ormConf.readOnly)
throw new CircumflexException(
"Read-only configuration does not allow DDL statements.")
objects.reverse.foreach { o =>
tx.execute(o.sqlDrop, { st =>
st.executeUpdate()
ORM_LOG.debug(ormConf.prefix(": ") + o.sqlDrop)
_msgs ++= List(new Msg(
"orm.ddl.info",
"status" -> ("DROP " + o.objectName + ": OK"),
"sql" -> o.sqlDrop))
}, { e =>
_msgs ++= List(new Msg(
"orm.ddl.info",
"status" -> ("DROP " + o.objectName + ": " + e.getMessage),
"sql" -> o.sqlDrop,
"error" -> e.getMessage))
})
}
}
protected def createObjects(objects: Seq[SchemaObject]) {
if (ormConf.readOnly)
throw new CircumflexException(
"Read-only configuration does not allow DDL statements.")
objects.foreach { o =>
tx.execute(o.sqlCreate, { st =>
st.executeUpdate()
ORM_LOG.debug(ormConf.prefix(": ") + o.sqlCreate)
_msgs ++= List(new Msg(
"orm.ddl.info",
"status" -> ("CREATE " + o.objectName + ": OK"),
"sql" -> o.sqlCreate))
}, { e =>
_msgs ++= List(new Msg(
"orm.ddl.error",
"status" -> ("CREATE " + o.objectName + ": " + e.getMessage),
"sql" -> o.sqlCreate,
"error" -> e.getMessage))
})
}
}
def DROP(): this.type = {
resetMsgs()
_drop()
this
}
def _drop() {
tx.execute({ conn =>
// We will commit every successfull statement
val autoCommit = conn.getAutoCommit
conn.setAutoCommit(true)
// Execute a script
dropObjects(postAux)
dropObjects(views)
if (ormConf.dialect.supportsDropConstraints)
dropObjects(constraints)
dropObjects(tables)
dropObjects(preAux)
if (ormConf.dialect.supportsSchema)
dropObjects(schemata)
// Restore auto-commit
conn.setAutoCommit(autoCommit)
}, { throw _ })
}
def CREATE(): this.type = {
resetMsgs()
_create()
this
}
def _create() {
tx.execute({ conn =>
// We will commit every successfull statement
val autoCommit = conn.getAutoCommit
conn.setAutoCommit(true)
// Execute a script
if (ormConf.dialect.supportsSchema)
createObjects(schemata)
createObjects(preAux)
createObjects(tables)
createObjects(constraints)
createObjects(views)
createObjects(postAux)
// Restore auto-commit
conn.setAutoCommit(autoCommit)
}, { throw _ })
}
def DROP_CREATE(): this.type = {
resetMsgs()
_drop()
_create()
this
}
def close() {
tx.close()
ormConf.connectionProvider.close()
}
def objectsCount: Int = schemata.size +
tables.size +
constraints.size +
views.size +
preAux.size +
postAux.size
def addPackage(urls: Iterable[URL], pkg: String) {
add(DDLUnit.searchObjects(urls, pkg): _*)
}
override def toString: String = {
var result = "Circumflex DDL Unit: "
if (messages.size == 0) {
result += objectsCount + " objects in queue."
} else {
val infoCount = messages.filter(_.key == "orm.ddl.info").size
val errorsCount = messages.filter(_.key == "orm.ddl.error").size
result += infoCount + " successful statements, " + errorsCount + " errors."
}
result
}
}
/*!# Building Schema from Sources
The `DDLUnit` singleton can inspect your compiled classes to find the relations and build
schema from them. The usage is pretty simple:
DDLUnit.fromClasspath().CREATE()
You can also specify package prefix for searching (using either slashes or dots as delimiters):
DDLUnit.fromClasspath("com.myapp.model").CREATE()
By default, the compiled classes are being searched in `target/classes` and `target/test-classes`
directories relative to your project's root. You can override this setting using `cx.build.outputDirs`
configuration parameter (paths are split from String using `File.pathSeparator`, i.e. colon ":" in UNIX
and ";" in Windows).
*/
object DDLUnit {
def outputDirs: Iterable[File] = cx.get("cx.build.outputDirs") match {
case Some(i: Iterable[File]) => i
case Some(s: String) => s.split(File.pathSeparator).map(s => new File(s))
case _ => List(new File("target/classes"), new File("target/test-classes"))
}
def instantiateObject(c: Class[_]): Option[SchemaObject] =
try {
var so: SchemaObject = null
// Try to treat it as a singleton
try {
val module = c.getField("MODULE$")
if (isSchemaObjectType(module.getType))
so = module.get(null).asInstanceOf[SchemaObject]
} catch {
case e: NoSuchFieldException =>
// Try to instantiate it as a POJO
if (isSchemaObjectType(c))
so = c.newInstance.asInstanceOf[SchemaObject]
}
if (so != null)
Some(so)
else None
} catch {
case e: Exception =>
// Omit non-schema classes silently
None
}
def searchObjects(urls: Iterable[URL],
pkgPrefix: String = ""): Seq[SchemaObject] =
urls.toSeq
.flatMap(url => cx.searchClasses(url, _.startsWith(pkgPrefix)))
.distinct
.flatMap(cl => instantiateObject(cl))
def fromClasspath(urls: Iterable[URL],
pkgPrefix: String = ""): DDLUnit = {
val ddl = new DDLUnit(searchObjects(urls, pkgPrefix): _*)
ORM_LOG.debug("Lookup complete, " + ddl.objectsCount + " objects found.")
ddl
}
protected def isSchemaObjectType(c: Class[_]): Boolean =
classOf[SchemaObject].isAssignableFrom(c) &&
!Modifier.isAbstract(c.getModifiers) &&
!Modifier.isInterface(c.getModifiers)
}
|
inca/circumflex
|
orm/src/main/scala/ddl.scala
|
Scala
|
bsd-2-clause
| 8,053 |
package com.verizon.bda.trapezium.dal.solr
import com.verizon.bda.trapezium.dal.exceptions.SolrOpsException
import org.apache.spark.SparkContext
import scala.collection.mutable.ListBuffer
class SolrOpsLocalApi(solrMap: Map[String, String], sparkContext: SparkContext)
extends SolrOpsLocal(solrMap: Map[String, String]) {
override def getHostToFileMap(): Map[String, ListBuffer[(String, String)]] = {
log.info("inside SolrOpsLocal.getHostToFileMap")
try {
val isRunning = PostZipDataAPI.isApiRunningOnAllMachines(coreMap, solrMap)
if (isRunning) {
PostZipDataAPI.postDataViaHTTP(sparkContext, solrMap, hdfsIndexFilePath,
coreMap, collectionName)
} else {
throw SolrOpsException(s"could not create collection :$collectionName")
}
}
catch {
case e: Exception => {
rollBackCollections(collectionName)
log.error(s"could not create collection ${collectionName}", e)
null
}
}
}
override def deleteOldCollections(oldCollection: String): Unit = {
if (oldCollection != null) {
deleteCollection(oldCollection, false)
PostZipDataAPI.deleteDirectoryViaHTTP(oldCollection, coreMap, solrMap)
}
}
}
|
Verizon/trapezium
|
dal/src/main/scala/com/verizon/bda/trapezium/dal/solr/SolrOpsLocalApi.scala
|
Scala
|
apache-2.0
| 1,226 |
package monocle.function
object all extends GenericOptics
trait GenericOptics
extends AtFunctions
with ConsFunctions
with Cons1Functions
with CurryFunctions
with EachFunctions
with EmptyFunctions
with Field1Functions
with Field2Functions
with Field3Functions
with Field4Functions
with Field5Functions
with Field6Functions
with FilterIndexFunctions
with IndexFunctions
with ReverseFunctions
with SnocFunctions
with Snoc1Functions
|
NightRa/Monocle
|
core/src/main/scala/monocle/function/All.scala
|
Scala
|
mit
| 512 |
import scala.tools.partest._
import java.io._
import scala.tools.nsc._
import scala.tools.cmd.CommandLineParser
import scala.tools.nsc.doc.{Settings, DocFactory}
import scala.tools.nsc.reporters.ConsoleReporter
object Test extends DirectTest {
override def extraSettings: String = "-usejavacp -Xprint:parser -Yrangepos -Ystop-after:parser -d " + testOutput.path
override def code = """
// SI-5527
object UselessComments {
var z = 0
def test1 = {
/** Some comment here */
object Maybe {
/** Some comment inside */
def nothing() = ()
}
}
def test2 = {
var x = 4
if (true) {
/** Testing 123 */
x = 5
val y = 6
}
}
def test3 = {
if (true)
z = 3
/** Calculate this result. */
val t = 4
for (i <- 0 to 4)
println(i)
}
val test4 = ('a') match {
/** Another digit is a giveaway. */
case '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' =>
true
case _ =>
false
}
def test5 {
/** @martin is this right? It shouldn't flag me as scaladoc. */
if (true) ???
}
def test6 = {
/** Document this crucial constant for posterity.
* Don't forget to dedoc this comment if you refactor to a local.
* @author Paul Phillips
*/
val u = 4
for (i <- 0 to u)
println(i)
}
def test7 = {
/** Some standard tags are tolerated locally and shouldn't trigger a warning.
* @note Don't change this unless you know what you're doing. This means you.
*/
val u = 4
for (i <- 0 to u)
println(i)
}
def test8 = {
/*************************\
* Fancy ASCII Art Block *
* @author som-snytt *
\*************************/
// this is just a local
val z = "fancy"
z replace ("fanc", "arts")
}
def test9 = {
val i = 10 */** Important!
* We have to multiply here!
* @author community
* @see SI-1234
*/
10
assert(i == 100)
}
}
/** comments that we should keep */
object UsefulComments {
/** class A */
class A {
/** f */
def f(i: Int) = i
/** v */
val v = 1
/** u */
var u = 2
}
/** trait B */
trait B {
/** T */
type T
/** f */
def f(i: Int)
/** v */
val v = 1
/** u */
var u = 2
}
/** object C */
object C {
/** f */
def f(i: Int) = i
/** v */
val v = 1
/** u */
var u = 2
}
/** class D */
@deprecated("use ... instead", "2.10.0")
class D
/** Get the simple value.
* @return the default value
*/
// an intervening line comment
/* I had more to say, but didn't want to pollute the scaladoc. */
def value: Int = 7
}
""".trim
override def show(): Unit = {
// redirect err to out, for logging
val prevErr = System.err
System.setErr(System.out)
compile()
System.setErr(prevErr)
}
override def newCompiler(args: String*): Global = {
// we want the Scaladoc compiler here, because it keeps DocDef nodes in the tree
val settings = new Settings(_ => ())
val command = new ScalaDoc.Command((CommandLineParser tokenize extraSettings) ++ args.toList, settings)
new DocFactory(new ConsoleReporter(settings), settings).compiler
}
override def isDebug = false // so we don't get the newSettings warning
}
|
felixmulder/scala
|
test/scaladoc/run/t5527.scala
|
Scala
|
bsd-3-clause
| 3,842 |
package info.raack.sailingcruisechecker
import java.time.LocalDate
import javax.annotation.PostConstruct
import javax.inject.Inject
import javax.inject.Named
import org.log4s.getLogger
// mark a class with @Named to create an instance of it in the object graph
@Named
// use @Inject() followed by constructor arguments to have Spring wire in instances of these classes
class SailingCruiseChecker @Inject() (val http: Http, val sampleProcessor: SampleProcessor) {
private[this] val logger = getLogger
@PostConstruct
def run() {
val now = LocalDate.now
(0 until 20).map(now.plusDays(_)).foreach(date => {
val url = getURLForDate(date)
val pageContent = http.get(url)
if (containsDays(pageContent)) {
logger.info(s"${date} has openings - ${url}")
}
})
}
private
def getURLForDate(date: LocalDate): String = {
f"http://sailsfbay.com/Kalendar/eventdetails.cfm?EventID=${date.getMonth.getValue}%02d-${date.getDayOfMonth}%02d-${date.getYear}"
}
private
def containsDays(pageContent: String): Boolean = {
!pageContent.contains("this cruise is fully booked")
}
}
|
dinoboy197/sailing-cruise-watcher
|
src/main/scala/info/raack/sailingcruisechecker/SailingCruiseChecker.scala
|
Scala
|
agpl-3.0
| 1,134 |
/*
* Copyright 2012-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.directive.std
import cats.syntax.all._
import laika.ast.{DocumentCursor, InvalidSpan, TemplateElement, TemplateSpan, TemplateSpanSequence}
import laika.config.{ArrayValue, BooleanValue, ConfigValue, Key, NullValue, ObjectValue, StringValue}
import laika.directive.Templates
import laika.rewrite.TemplateRewriter
import scala.annotation.tailrec
/** Provides the implementation for the standard control flow directives included in Laika.
*
* These include:
*
* - `for`: Accesses a value from the context and sets it as the reference context for its
* body elements, executing the body if the referenced value is non-empty and executing
* it multiple times when it is a collection.
* - `if`: Accesses a value from the context and processes the body element only when
* it is a value recognized as true.
*
* For full documentation see the section about
* [[https://planet42.github.io/Laika/07-reference/01-standard-directives.html#conditionals-and-loops Conditionals and Loops]]
* in the manual.
*
* @author Jens Halm
*/
object ControlFlowDirectives {
/** Implementation of the `for` directive for templates.
*/
lazy val templateFor: Templates.Directive = Templates.create("for") {
import Templates.dsl._
val emptyValues = Set[ConfigValue](StringValue(""), BooleanValue(false), NullValue)
case class Empty (spans: Seq[TemplateSpan])
val emptySeparator = Templates.separator("empty", max = 1)(parsedBody.map(Empty.apply))
(attribute(0).as[String], separatedBody(Seq(emptySeparator)), cursor, Templates.dsl.source).mapN { (ref, multipart, cursor, source) =>
def rewrite (spans: Seq[TemplateSpan], childCursor: DocumentCursor): TemplateSpanSequence =
TemplateSpanSequence(spans).rewriteChildren(TemplateRewriter.rewriteRules(childCursor))
def rewriteContent (value: ConfigValue): TemplateSpanSequence = rewrite(multipart.mainBody, cursor.withReferenceContext(value))
def rewriteFallback = multipart.children.headOption.map(_.spans).map(rewrite(_, cursor)).getOrElse(TemplateSpanSequence.empty)
cursor.resolveReference(Key.parse(ref)) match {
case Right(Some(o: ObjectValue)) => rewriteContent(o)
case Right(Some(a: ArrayValue)) if a.isEmpty => rewriteFallback
case Right(Some(a: ArrayValue)) => TemplateSpanSequence(a.values.map(rewriteContent))
case Right(Some(simpleValue)) if emptyValues(simpleValue) => rewriteFallback
case Right(Some(simpleValue)) => rewriteContent(simpleValue)
case Right(None) => rewriteFallback
case Left(error) => TemplateElement(InvalidSpan(s"Error retrieving reference '$ref': ${error.message}", source))
}
}
}
/** Implementation of the `if` directive for templates.
*/
lazy val templateIf: Templates.Directive = Templates.create("if") {
import Templates.dsl._
val trueStrings = Set("true","yes","on","enabled")
sealed trait IfSeparator extends Product with Serializable
case class ElseIf (ref: String, body: Seq[TemplateSpan]) extends IfSeparator
case class Else (body: Seq[TemplateSpan]) extends IfSeparator
val elseIfSep = Templates.separator("elseIf") {
(attribute(0).as[String], parsedBody).mapN(ElseIf.apply)
}
val elseSep = Templates.separator("else", max = 1) {
parsedBody.map(Else.apply)
}
val multipartBody = separatedBody(Seq(elseIfSep, elseSep))
(attribute(0).as[String], multipartBody, cursor).mapN { (path, multipart, cursor) =>
def rewrite (spans: Seq[TemplateSpan]): TemplateSpanSequence =
TemplateSpanSequence(spans).rewriteChildren(TemplateRewriter.rewriteRules(cursor))
def rewriteFallback = multipart.children
.collectFirst { case e: Else => e }
.map(_.body).map(rewrite)
.getOrElse(TemplateSpanSequence.empty)
@tailrec
def process (parts: Seq[ElseIf]): TemplateSpanSequence =
if (parts.isEmpty) rewriteFallback
else cursor.resolveReference(Key.parse(parts.head.ref)) match {
case Right(Some(BooleanValue(true))) => rewrite(parts.head.body)
case Right(Some(StringValue(s))) if trueStrings(s) => rewrite(parts.head.body)
case Right(Some(a: ArrayValue)) if !a.isEmpty => rewrite(parts.head.body)
case _ => process(parts.tail)
}
val alternatives = ElseIf(path, multipart.mainBody) +: multipart.collect[ElseIf]
process(alternatives)
}
}
}
|
planet42/Laika
|
core/shared/src/main/scala/laika/directive/std/ControlFlowDirectives.scala
|
Scala
|
apache-2.0
| 5,224 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.optimize
import cc.factorie.model.{WeightsMap, WeightsSet}
import cc.factorie.util.FastLogging
/**
* A conjugate gradient optimizer. Should not be used unless you know you want it because LBFGS is often better.
* @param initialStepSize The initial step size. Not too important because line search is performed.
* @author Andrew McCallum, Alexandre Passos
*/
class ConjugateGradient(val initialStepSize: Double = 1.0) extends GradientOptimizer with FastLogging {
private var _isConverged = false
def isConverged = _isConverged
var tolerance = 0.0001
var gradientTolerance = 0.001
var maxIterations = 1000
val eps = 1.0e-10 // a small number to rectify the special case of converging to exactly zero function value
// The state of a conjugate gradient search
//var fp = 0.0
var oldValue = 0.0
var gg = 0.0
var gam = 0.0
var dgg = 0.0
var stepSize = 0.0
var xi: WeightsMap = null
var g: WeightsMap = null
var h: WeightsMap = null
var iterations = 0
var lineOptimizer: BackTrackLineOptimizer = null
def reset(): Unit = {
xi = null
_isConverged = false
}
def initializeWeights(weights: WeightsSet): Unit = { }
def finalizeWeights(weights: WeightsSet): Unit = { }
def step(weights:WeightsSet, gradient:WeightsMap, value:Double): Unit = {
if (_isConverged) return
// If this is our first time in, then initialize
if (xi eq null) {
xi = gradient.copy
g = xi.copy
h = xi.copy
stepSize = initialStepSize
}
// Take a step in the current search direction, xi
if (lineOptimizer eq null) lineOptimizer = new BackTrackLineOptimizer(gradient, xi.copy, stepSize)
lineOptimizer.step(weights, xi, value)
// If the lineOptimizer has not yet converged, then don't yet do any of the ConjugateGradient-specific things below
if (lineOptimizer.isConverged){
lineOptimizer = null // So we create a new one next time around
xi = gradient.copy
// This termination provided by "Numeric Recipes in C".
if (2.0 * math.abs(value - oldValue) <= tolerance * (math.abs(value) + math.abs(oldValue) + eps)) {
logger.info("ConjugateGradient converged: old value="+oldValue+" new value="+value+" tolerance="+tolerance)
_isConverged = true
return
}
// This termination provided by McCallum
if (xi.twoNorm < gradientTolerance) {
logger.info("ConjugateGradient converged: maximum gradient component: "+xi.twoNorm+" less than "+tolerance)
_isConverged = true
return
}
oldValue = value
// compute gamma, new g and new h
{
dgg = 0.0
gg = 0.0
val xia = xi.toArray
val ga = g.toArray
var i = 0
while (i < ga.length) {
gg += ga(i) * ga(i) // previous gradient
dgg += xia(i) * (xia(i) - ga(i)) // current gradient
i += 1
}
gam = dgg / gg
g.keys.foreach(k => g(k) := xi(k))
h.keys.foreach(k => h(k) *= gam)
h += g
assert(!h.containsNaN())
}
/* gdruck: If using the BackTrackLineSearch, then the search stops whenever
a step is found that increases the value significantly (according
to a threshold from Numerical Recipes). ConjugateGradient
assumes that line maximization finds something close
to the maximum in that direction. In tests, sometimes the
direction suggested by CG points downhill. Consequently, here I am
setting the search direction to the gradient if the slope is
negative or 0. */
// TODO Implement GradientBracketLineMaximizer (used in Numerical Recipes) which should avoid this problem!
if (xi.dot(h) > 0) xi := h else h := xi
iterations += 1
lineOptimizer = new BackTrackLineOptimizer(gradient, xi.copy, stepSize)
lineOptimizer.step(weights, xi, value)
}
}
}
|
Craigacp/factorie
|
src/main/scala/cc/factorie/optimize/ConjugateGradient.scala
|
Scala
|
apache-2.0
| 4,604 |
package org.jetbrains.plugins.scala
package lang
package completion
package handlers
import com.intellij.codeInsight.completion.{InsertHandler, InsertionContext}
import com.intellij.codeInsight.lookup.LookupElement
import com.intellij.psi.util.PsiTreeUtil.getParentOfType
import com.intellij.psi.{PsiClass, PsiDocumentManager, PsiFile}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReference
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScObject}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createReferenceFromText
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.overrideImplement.ScalaOIUtil.{getMembersToImplement, runAction}
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings
/**
* @author Alexander Podkhalyuzin
*/
final class ScalaConstructorInsertHandler(typeParametersEvaluator: (ScType => String) => String,
hasSubstitutionProblem: Boolean,
isInterface: Boolean,
isRenamed: Boolean,
isPrefixCompletion: Boolean) extends InsertHandler[LookupElement] {
override def handleInsert(context: InsertionContext,
element: LookupElement): Unit = {
val InsertionContextExt(editor, document, file, project) = context
if (context.getCompletionChar == '(') {
context.setAddCompletionChar(false)
} else if (context.getCompletionChar == '[') {
context.setAddCompletionChar(false)
}
val startOffset = context.getStartOffset
val lookupStringLength = element.getLookupString.length
var endOffset = startOffset + lookupStringLength
val model = editor.getCaretModel
element.getPsiElement match {
case _: ScObject =>
if (context.getCompletionChar != '.') {
document.insertString(endOffset, ".")
endOffset += 1
model.moveToOffset(endOffset)
context.scheduleAutoPopup()
}
case clazz: PsiClass =>
var hasNonEmptyParams = false
clazz match {
case c: ScClass =>
c.constructor match {
case Some(constr) if constr.parameters.nonEmpty => hasNonEmptyParams = true
case _ =>
}
c.secondaryConstructors.foreach(fun => if (fun.parameters.nonEmpty) hasNonEmptyParams = true)
case _ =>
clazz.getConstructors.foreach(meth => if (meth.getParameterList.getParametersCount > 0) hasNonEmptyParams = true)
}
if (context.getCompletionChar == '(') hasNonEmptyParams = true
if (hasSubstitutionProblem) {
document.insertString(endOffset, "[]")
endOffset += 2
model.moveToOffset(endOffset - 1)
} else {
val text = typeParametersEvaluator(_.canonicalText)
document.insertString(endOffset, text)
endOffset += text.length
model.moveToOffset(endOffset)
}
if (hasNonEmptyParams) {
document.insertString(endOffset, "()")
endOffset += 2
if (!hasSubstitutionProblem)
model.moveToOffset(endOffset - 1)
}
if (isInterface) {
onDefinition(file, startOffset) { newTemplateDef =>
val (openBlock, closeBlock) = generateBlock(newTemplateDef)
document.insertString(endOffset, openBlock)
endOffset += openBlock.length
if (!hasSubstitutionProblem)
model.moveToOffset(endOffset)
document.insertString(endOffset, closeBlock)
endOffset += closeBlock.length
}
}
PsiDocumentManager.getInstance(project).commitDocument(document)
onDefinition(file, endOffset - 1) {
newTemplateDefinition =>
newTemplateDefinition.extendsBlock.templateParents.toSeq.flatMap(_.typeElements) match {
case Seq(ScSimpleTypeElement.unwrapped(reference)) if !isRenamed =>
simplifyReference(clazz, reference).bindToElement(clazz)
case _ =>
}
ScalaPsiUtil.adjustTypes(newTemplateDefinition)
}
if (isInterface && !hasSubstitutionProblem) {
context.setLaterRunnable(() => {
onDefinition(file, model.getOffset - 1) { newTemplateDefinition =>
val members = getMembersToImplement(newTemplateDefinition)
ScalaApplicationSettings.getInstance().SPECIFY_RETURN_TYPE_EXPLICITLY =
ScalaApplicationSettings.ReturnTypeLevel.BY_CODE_STYLE
runAction(
members,
isImplement = true,
newTemplateDefinition
)(project, editor)
}
})
}
}
}
private def simplifyReference(`class`: PsiClass,
reference: ScStableCodeReference): ScStableCodeReference =
if (isPrefixCompletion) {
// TODO unify with ScalaLookupItem
val name = `class`.qualifiedName
.split('.')
.takeRight(2)
.mkString(".")
reference.replace {
createReferenceFromText(name)(`class`)
}.asInstanceOf[ScStableCodeReference]
} else {
reference
}
private def onDefinition(file: PsiFile, offset: Int)
(action: ScNewTemplateDefinition => Unit): Unit = {
val element = file.findElementAt(offset) match {
case e if e.isWhitespace => e.getPrevNonEmptyLeaf
case e => e
}
getParentOfType(element, classOf[ScNewTemplateDefinition]) match {
case null =>
case newTemplateDefinition => action(newTemplateDefinition)
}
}
private def generateBlock(newTemplateDefinition: ScNewTemplateDefinition): (String, String) = {
val defaultBlock = (" {", "}")
val file = newTemplateDefinition.containingFile.getOrElse(return defaultBlock)
val useIndentationBasedSyntax = file.useIndentationBasedSyntax
if (!useIndentationBasedSyntax || getMembersToImplement(newTemplateDefinition).isEmpty)
defaultBlock
else (":", "")
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/completion/handlers/ScalaConstructorInsertHandler.scala
|
Scala
|
apache-2.0
| 6,482 |
package ru.pavkin.todoist.api.core.dto
import java.util.UUID
case class RawTempIdCommand[A](`type`: String, uuid: UUID, args: A, temp_id: UUID)
|
vpavkin/todoist-api-scala
|
core/src/main/scala/ru/pavkin/todoist/api/core/dto/RawTempIdCommand.scala
|
Scala
|
mit
| 146 |
package com.databricks.spark.sql.perf.mllib.feature
import org.apache.spark.ml
import org.apache.spark.ml.PipelineStage
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions.{col, split}
import com.databricks.spark.sql.perf.mllib.{BenchmarkAlgorithm, MLBenchContext, TestFromTraining}
import com.databricks.spark.sql.perf.mllib.OptionImplicits._
import com.databricks.spark.sql.perf.mllib.data.DataGenerator
/** Object for testing Word2Vec performance */
object Word2Vec extends BenchmarkAlgorithm with TestFromTraining {
override def trainingDataSet(ctx: MLBenchContext): DataFrame = {
import ctx.params._
val df = DataGenerator.generateDoc(
ctx.sqlContext,
numExamples,
ctx.seed(),
numPartitions,
vocabSize,
docLength,
"text"
)
df.select(split(col("text"), " ").as("text"))
}
override def getPipelineStage(ctx: MLBenchContext): PipelineStage = {
new ml.feature.Word2Vec().setInputCol("text")
}
}
|
wayblink/Naive
|
spark/spark-sql-perf-2.2/src/main/scala/com/databricks/spark/sql/perf/mllib/feature/Word2Vec.scala
|
Scala
|
mit
| 997 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.expressions.aggregate.{Complete, AggregateExpression2, AggregateFunction2}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.{SimpleCatalystConf, CatalystConf}
import org.apache.spark.sql.types._
/**
* A trivial [[Analyzer]] with an [[EmptyCatalog]] and [[EmptyFunctionRegistry]]. Used for testing
* when all relations are already filled in and the analyzer needs only to resolve attribute
* references.
*/
object SimpleAnalyzer
extends Analyzer(EmptyCatalog, EmptyFunctionRegistry, new SimpleCatalystConf(true))
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a schema [[Catalog]] and
* a [[FunctionRegistry]].
*/
class Analyzer(
catalog: Catalog,
registry: FunctionRegistry,
conf: CatalystConf,
maxIterations: Int = 100)
extends RuleExecutor[LogicalPlan] with CheckAnalysis {
def resolver: Resolver = {
if (conf.caseSensitiveAnalysis) {
caseSensitiveResolution
} else {
caseInsensitiveResolution
}
}
val fixedPoint = FixedPoint(maxIterations)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
lazy val batches: Seq[Batch] = Seq(
Batch("Substitution", fixedPoint,
CTESubstitution ::
WindowsSubstitution ::
Nil : _*),
Batch("Resolution", fixedPoint,
ResolveRelations ::
ResolveReferences ::
ResolveGroupingAnalytics ::
ResolveSortReferences ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
HiveTypeCoercion.typeCoercionRules ++
extendedResolutionRules : _*),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* Substitute child plan with cte definitions
*/
object CTESubstitution extends Rule[LogicalPlan] {
// TODO allow subquery to define CTE
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case With(child, relations) => substituteCTE(child, relations)
case other => other
}
def substituteCTE(plan: LogicalPlan, cteRelations: Map[String, LogicalPlan]): LogicalPlan = {
plan transform {
// In hive, if there is same table name in database and CTE definition,
// hive will use the table in database, not the CTE one.
// Taking into account the reasonableness and the implementation complexity,
// here use the CTE definition first, check table name only and ignore database name
// see https://github.com/apache/spark/pull/4929#discussion_r27186638 for more info
case u : UnresolvedRelation =>
val substituted = cteRelations.get(u.tableIdentifier.table).map { relation =>
val withAlias = u.alias.map(Subquery(_, relation))
withAlias.getOrElse(relation)
}
substituted.getOrElse(u)
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) =>
child.transform {
case plan => plan.transformExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions
.get(windowName)
.getOrElse(failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.zipWithIndex.map {
case (expr, i) =>
expr transform {
case u @ UnresolvedAlias(child) => child match {
case ne: NamedExpression => ne
case e if !e.resolved => u
case g: Generator if g.elementTypes.size > 1 => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _) => Alias(c, ne.name)()
case other => Alias(other, s"_c$i")()
}
}
}.asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingAnalytics if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.withNewAggs(assignAliases(g.aggregations))
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as the bit masks.
*/
def bitmasks(r: Rollup): Seq[Int] = {
Seq.tabulate(r.groupByExprs.length + 1)(idx => {(1 << idx) - 1})
}
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as the bit masks.
*/
def bitmasks(c: Cube): Seq[Int] = {
Seq.tabulate(1 << c.groupByExprs.length)(i => i)
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case a if !a.childrenResolved => a // be sure all of the children are resolved.
case a: Cube =>
GroupingSets(bitmasks(a), a.groupByExprs, a.child, a.aggregations)
case a: Rollup =>
GroupingSets(bitmasks(a), a.groupByExprs, a.child, a.aggregations)
case x: GroupingSets =>
val gid = AttributeReference(VirtualColumn.groupingIdName, IntegerType, false)()
// We will insert another Projection if the GROUP BY keys contains the
// non-attribute expressions. And the top operators can references those
// expressions by its alias.
// e.g. SELECT key%5 as c1 FROM src GROUP BY key%5 ==>
// SELECT a as c1 FROM (SELECT key%5 AS a FROM src) GROUP BY a
// find all of the non-attribute expressions in the GROUP BY keys
val nonAttributeGroupByExpressions = new ArrayBuffer[Alias]()
// The pair of (the original GROUP BY key, associated attribute)
val groupByExprPairs = x.groupByExprs.map(_ match {
case e: NamedExpression => (e, e.toAttribute)
case other => {
val alias = Alias(other, other.toString)()
nonAttributeGroupByExpressions += alias // add the non-attributes expression alias
(other, alias.toAttribute)
}
})
// substitute the non-attribute expressions for aggregations.
val aggregation = x.aggregations.map(expr => expr.transformDown {
case e => groupByExprPairs.find(_._1.semanticEquals(e)).map(_._2).getOrElse(e)
}.asInstanceOf[NamedExpression])
// substitute the group by expressions.
val newGroupByExprs = groupByExprPairs.map(_._2)
val child = if (nonAttributeGroupByExpressions.length > 0) {
// insert additional projection if contains the
// non-attribute expressions in the GROUP BY keys
Project(x.child.output ++ nonAttributeGroupByExpressions, x.child)
} else {
x.child
}
Aggregate(
newGroupByExprs :+ VirtualColumn.groupingIdAttribute,
aggregation,
Expand(x.bitmasks, newGroupByExprs, gid, child))
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
def getTable(u: UnresolvedRelation): LogicalPlan = {
try {
catalog.lookupRelation(u.tableIdentifier, u.alias)
} catch {
case _: NoSuchTableException =>
u.failAnalysis(s"Table not found: ${u.tableName}")
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case i @ InsertIntoTable(u: UnresolvedRelation, _, _, _, _) =>
i.copy(table = EliminateSubQueries(getTable(u)))
case u: UnresolvedRelation =>
try {
getTable(u)
} catch {
case _: AnalysisException if u.tableIdentifier.database.isDefined =>
// delay the exception into CheckAnalysis, then it could be resolved as data source.
u
}
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p @ Project(projectList, child) if containsStar(projectList) =>
Project(
projectList.flatMap {
case s: Star => s.expand(child.output, resolver)
case UnresolvedAlias(f @ UnresolvedFunction(_, args, _)) if containsStar(args) =>
val expandedArgs = args.flatMap {
case s: Star => s.expand(child.output, resolver)
case o => o :: Nil
}
UnresolvedAlias(child = f.copy(children = expandedArgs)) :: Nil
case UnresolvedAlias(c @ CreateArray(args)) if containsStar(args) =>
val expandedArgs = args.flatMap {
case s: Star => s.expand(child.output, resolver)
case o => o :: Nil
}
UnresolvedAlias(c.copy(children = expandedArgs)) :: Nil
case UnresolvedAlias(c @ CreateStruct(args)) if containsStar(args) =>
val expandedArgs = args.flatMap {
case s: Star => s.expand(child.output, resolver)
case o => o :: Nil
}
UnresolvedAlias(c.copy(children = expandedArgs)) :: Nil
case o => o :: Nil
},
child)
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child.output, resolver)
case o => o :: Nil
}
)
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
a.copy(
aggregateExpressions = a.aggregateExpressions.flatMap {
case s: Star => s.expand(a.child.output, resolver)
case o => o :: Nil
}
)
// Special handling for cases when self-join introduce duplicate expression ids.
case j @ Join(left, right, _, _) if !j.selfJoinResolved =>
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} in $j")
right.collect {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
(oldVersion, newVersion)
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(projectList = newAliases(projectList)))
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
(oldVersion, oldVersion.copy(aggregateExpressions = newAliases(aggregateExpressions)))
case oldVersion: Generate
if oldVersion.generatedSet.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
(oldVersion, oldVersion.copy(generatorOutput = newOutput))
case oldVersion @ Window(_, windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
(oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions)))
}
// Only handle first case, others will be fixed on the next pass.
.headOption match {
case None =>
/*
* No result implies that there is a logical plan node that produces new references
* that this rule cannot handle. When that is the case, there must be another rule
* that resolves these conflicts. Otherwise, the analysis will fail.
*/
j
case Some((oldRelation, newRelation)) =>
val attributeRewrites = AttributeMap(oldRelation.output.zip(newRelation.output))
val newRight = right transformUp {
case r if r == oldRelation => newRelation
} transformUp {
case other => other transformExpressions {
case a: Attribute => attributeRewrites.get(a).getOrElse(a)
}
}
j.copy(right = newRight)
}
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on grandchild
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering = resolveSortOrders(ordering, child, throws = false)
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, join, outer, qualifier, output, child)
if child.resolved && !generator.resolved =>
val newG = generator transformUp {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) { child.resolve(nameParts, resolver).getOrElse(u) }
case UnresolvedExtractValue(child, fieldExpr) =>
ExtractValue(child, fieldExpr, resolver)
}
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString}")
q transformExpressionsUp {
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result =
withPosition(u) { q.resolveChildren(nameParts, resolver).getOrElse(u) }
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
}
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
}
private def resolveSortOrders(ordering: Seq[SortOrder], plan: LogicalPlan, throws: Boolean) = {
ordering.map { order =>
// Resolve SortOrder in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
val newOrder = order transformUp {
case u @ UnresolvedAttribute(nameParts) =>
plan.resolve(nameParts, resolver).getOrElse(u)
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
newOrder.asInstanceOf[SortOrder]
} catch {
case a: AnalysisException if !throws => order
}
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*/
object ResolveSortReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case s @ Sort(ordering, global, p @ Project(projectList, child))
if !s.resolved && p.resolved =>
val (newOrdering, missing) = resolveAndFindMissing(ordering, p, child)
// If this rule was not a no-op, return the transformed plan, otherwise return the original.
if (missing.nonEmpty) {
// Add missing attributes and then project them away after the sort.
Project(p.output,
Sort(newOrdering, global,
Project(projectList ++ missing, child)))
} else {
logDebug(s"Failed to find $missing in ${p.output.mkString(", ")}")
s // Nothing we can do here. Return original plan.
}
}
/**
* Given a child and a grandchild that are present beneath a sort operator, try to resolve
* the sort ordering and returns it with a list of attributes that are missing from the
* child but are present in the grandchild.
*/
def resolveAndFindMissing(
ordering: Seq[SortOrder],
child: LogicalPlan,
grandchild: LogicalPlan): (Seq[SortOrder], Seq[Attribute]) = {
val newOrdering = resolveSortOrders(ordering, grandchild, throws = true)
// Construct a set that contains all of the attributes that we need to evaluate the
// ordering.
val requiredAttributes = AttributeSet(newOrdering).filter(_.resolved)
// Figure out which ones are missing from the projection, so that we can add them and
// remove them after the sort.
val missingInProject = requiredAttributes -- child.output
// It is important to return the new SortOrders here, instead of waiting for the standard
// resolving process as adding attributes to the project below can actually introduce
// ambiguity that was not present before.
(newOrdering, missingInProject.toSeq)
}
}
/**
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u @ UnresolvedFunction(name, children, isDistinct) =>
withPosition(u) {
registry.lookupFunction(name, children) match {
// We get an aggregate function built based on AggregateFunction2 interface.
// So, we wrap it in AggregateExpression2.
case agg2: AggregateFunction2 => AggregateExpression2(agg2, Complete, isDistinct)
// Currently, our old aggregate function interface supports SUM(DISTINCT ...)
// and COUTN(DISTINCT ...).
case sumDistinct: SumDistinct => sumDistinct
case countDistinct: CountDistinct => countDistinct
// DISTINCT is not meaningful with Max and Min.
case max: Max if isDistinct => max
case min: Min if isDistinct => min
// For other aggregate functions, DISTINCT keyword is not supported for now.
// Once we converted to the new code path, we will allow using DISTINCT keyword.
case other: AggregateExpression1 if isDistinct =>
failAnalysis(s"$name does not support DISTINCT keyword.")
// If it does not have DISTINCT keyword, we will return it as is.
case other => other
}
}
}
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
exprs.foreach(_.foreach {
case agg: AggregateExpression => return true
case _ =>
})
false
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case filter @ Filter(havingCondition,
aggregate @ Aggregate(grouping, originalAggExprs, child))
if aggregate.resolved =>
// Try resolving the condition of the filter as though it is in the aggregate clause
val aggregatedCondition =
Aggregate(grouping, Alias(havingCondition, "havingCondition")() :: Nil, child)
val resolvedOperator = execute(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved && containsAggregate(resolvedAggregateFilter)) {
val aggExprsWithHaving = resolvedAggregateFilter +: originalAggExprs
Project(aggregate.output,
Filter(resolvedAggregateFilter.toAttribute,
aggregate.copy(aggregateExpressions = aggExprsWithHaving)))
} else {
filter
}
case sort @ Sort(sortOrder, global, aggregate: Aggregate)
if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
val aliasedOrdering = sortOrder.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate = execute(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(
CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(sortOrder).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == evaluatedOrderings) {
sort
} else {
Project(aggregate.output,
Sort(evaluatedOrderings, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
protected def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]]. If more than one [[Generator]] is found in a Project, an
* [[AnalysisException]] is throw.
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case g: Generate if ResolveReferences.containsStar(g.generator.children) =>
failAnalysis("Cannot explode *, explode can only be applied on a specific column.")
case p: Generate if !p.child.resolved || !p.generator.resolved => p
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList.flatMap {
case AliasedGenerator(generator, names) if generator.childrenResolved =>
if (resolvedGenerator != null) {
failAnalysis(
s"Only one generator allowed per select but ${resolvedGenerator.nodeName} and " +
s"and ${generator.nodeName} found.")
}
resolvedGenerator =
Generate(
generator,
join = projectList.size > 1, // Only join if there are other expressions in SELECT.
outer = false,
qualifier = None,
generatorOutput = makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
}
/** Extracts a [[Generator]] expression and any names assigned by aliases to their output. */
private object AliasedGenerator {
def unapply(e: Expression): Option[(Generator, Seq[String])] = e match {
case Alias(g: Generator, name) if g.resolved && g.elementTypes.size > 1 =>
// If not given the default names, and the TGF with multiple output columns
failAnalysis(
s"""Expect multiple names given for ${g.getClass.getName},
|but only single name '${name}' specified""".stripMargin)
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil))
case MultiAlias(g: Generator, names) if g.resolved => Some(g, names)
case _ => None
}
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned by ordinal (i.e., _c0, _c1, ...) to match Hive's defaults.
*/
private def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementTypes = generator.elementTypes
if (names.length == elementTypes.length) {
names.zip(elementTypes).map {
case (name, (t, nullable)) =>
AttributeReference(name, t, nullable)()
}
} else if (names.isEmpty) {
elementTypes.zipWithIndex.map {
// keep the default column names as Hive does _c0, _c1, _cN
case ((t, nullable), i) => AttributeReference(s"_c$i", t, nullable)()
}
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementTypes.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - An [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s.
* 3. For every distinct [[WindowSpecDefinition]], creates a [[Window]] operator and inserts
* it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
private def hasWindowFunction(projectList: Seq[NamedExpression]): Boolean =
projectList.exists(hasWindowFunction)
private def hasWindowFunction(expr: NamedExpression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at lease one window expressions,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
ne.toAttribute
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an interal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf : WindowFunction =>
val newChildren = wf.children.map(extractExpr(_))
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr(_))
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.length == 0 ) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
(spec.partitionSpec, spec.orderSpec)
}
}.toSeq
// Third, for every Window Spec, we add a Window operator and set currentChild as the
// child of it.
var currentChild = child
var i = 0
while (i < groupedWindowExpressions.size) {
val ((partitionSpec, orderSpec), windowExpressions) = groupedWindowExpressions(i)
// Set currentChild to the newly created Window operator.
currentChild =
Window(
currentChild.output,
windowExpressions,
partitionSpec,
orderSpec,
currentChild)
// Move to next Window Spec.
i += 1
}
// Finally, we create a Project to output currentChild's output
// newExpressionsWithWindowFunctions.
Project(currentChild.output ++ newExpressionsWithWindowFunctions, currentChild)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map (_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map (_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map (_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterministicExprs = p.expressions.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect {
case n: Nondeterministic => n
}
leafNondeterministic.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
new TreeNodeRef(e) -> ne
}
}.toMap
val newPlan = p.transformExpressions { case e =>
nondeterministicExprs.get(new TreeNodeRef(e)).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterministicExprs.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
}
}
/**
* Removes [[Subquery]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubQueries extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Subquery(_, child) => child
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions).
*/
object CleanupAliases extends Rule[LogicalPlan] {
private def trimAliases(e: Expression): Expression = {
var stop = false
e.transformDown {
// CreateStruct is a special case, we need to retain its top level Aliases as they decide the
// name of StructField. We also need to stop transform down this expression, or the Aliases
// under CreateStruct will be mistakenly trimmed.
case c: CreateStruct if !stop =>
stop = true
c.copy(children = c.children.map(trimNonTopLevelAliases))
case c: CreateStructUnsafe if !stop =>
stop = true
c.copy(children = c.children.map(trimNonTopLevelAliases))
case Alias(child, _) if !stop => child
}
}
def trimNonTopLevelAliases(e: Expression): Expression = e match {
case a: Alias =>
Alias(trimAliases(a.child), a.name)(a.exprId, a.qualifiers, a.explicitMetadata)
case other => trimAliases(other)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Project(projectList, child) =>
val cleanedProjectList =
projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case w @ Window(projectList, windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs =
windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression])
Window(projectList, cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
case other =>
var stop = false
other transformExpressionsDown {
case c: CreateStruct if !stop =>
stop = true
c.copy(children = c.children.map(trimNonTopLevelAliases))
case c: CreateStructUnsafe if !stop =>
stop = true
c.copy(children = c.children.map(trimNonTopLevelAliases))
case Alias(child, _) if !stop => child
}
}
}
|
pronix/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
|
Scala
|
apache-2.0
| 48,044 |
package controllers
import play.api._
import play.api.mvc._
import java.io.File
import scala.io.Source
import org.clapper.markwrap.MarkWrap
import org.clapper.markwrap.MarkupType
import java.text.SimpleDateFormat
import java.text.DateFormat
import java.util.Locale
import scala.xml.Elem
object Notes extends Controller {
val rssDateFormat = new SimpleDateFormat("E, dd MMM yyyy HH:mm:ss Z")
def index = content(Application.data("notes/"))
def content(data: String) = Action { implicit request =>
render {
case Accepts.Html() => Ok(html(data)).as("text/html")
case _ => Ok(rss(data)).as("application/rss+xml")
}
}
def loadAllFiles(root: String) = {
for (file <- new File(root).listFiles if !file.isHidden()) yield {
Logger.trace("Loading file: " + file)
val source = Source.fromFile(file).mkString
(file.getName().split("\\\\.")(0), parseToHtml(source))
}
}
def parseToHtml(source: String) =
MarkWrap.parserFor(MarkupType.Textile).parseToHTML(source)
def html(data: String) =
scala.xml.XML.loadString(
views.html.tags.feed.render(
data, List((rss(data), "application/rss+xml"))).body)
def rss(location: String) = {
val data = (loadAllFiles(location) sortBy (_._1))(Ordering[String].reverse)
Logger.trace("data: " + data.toList)
val latestRssTime = parseToRssTime(data.head._1)
<rss version="2.0">
<channel>
<title>Notes RSS feed</title>
<description>Feed description</description>
<link>http://www.example.com/rss</link>
<lastBuildDate>{ latestRssTime }</lastBuildDate>
<pubDate>{ latestRssTime }</pubDate>
{
for ((time, text) <- data; rssTime = parseToRssTime(time)) yield {
<item>
<title>{ rssTime }</title>
<description>{ text }</description>
<link>http://www.example.com/item</link>
<guid isPermaLink="false">{ rssTime }</guid>
<pubDate>{ rssTime }</pubDate>
</item>
}
}
</channel>
</rss>
}
def parseToRssTime(time: String) = {
val inputFormat = new SimpleDateFormat("yyyy-mm-dd")
val outputFormat = rssDateFormat
outputFormat.format(inputFormat.parse(time))
}
}
|
fsteeg/appd
|
app/controllers/Notes.scala
|
Scala
|
epl-1.0
| 2,280 |
package com.github.uchibori3.mfcloud.invoice
import akka.actor.ActorSystem
import akka.stream.scaladsl.Flow
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse }
import scala.concurrent.duration._
trait HttpClient {
def connection(scheme: String, host: String, port: Int, timeout: Int): Flow[HttpRequest, HttpResponse, _]
def connectionHttp(host: String, port: Int = 80, timeout: Int = 15): Flow[HttpRequest, HttpResponse, _]
def connectionHttps(host: String, port: Int = 443, timeout: Int = 15): Flow[HttpRequest, HttpResponse, _]
}
class HttpClientImpl(implicit actorSystem: ActorSystem) extends HttpClient {
override def connection(scheme: String, host: String, port: Int, timeout: Int): Flow[HttpRequest, HttpResponse, _] =
scheme match {
case "http" => connectionHttp(host, port, timeout)
case "https" => connectionHttps(host, port, timeout)
case _ => connectionHttp(host, port, timeout)
}
override def connectionHttp(host: String, port: Int, timeout: Int): Flow[HttpRequest, HttpResponse, _] =
Http().outgoingConnection(host, port).idleTimeout(timeout.seconds)
override def connectionHttps(host: String, port: Int, timeout: Int): Flow[HttpRequest, HttpResponse, _] =
Http().outgoingConnectionHttps(host, port).idleTimeout(timeout.seconds)
}
|
Uchibori3/mfcloud-invoice-scala
|
src/main/scala/com/github/uchibori3/mfcloud/invoice/HttpClient.scala
|
Scala
|
apache-2.0
| 1,345 |
package com.sksamuel.elastic4s.searches
import org.elasticsearch.script.mustache.SearchTemplateResponse
case class RichSearchTemplateResponse(original: SearchTemplateResponse) {
def status = original.status()
def hasResponse = original.hasResponse
def response = RichSearchResponse(original.getResponse)
}
|
FabienPennequin/elastic4s
|
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/RichSearchTemplateResponse.scala
|
Scala
|
apache-2.0
| 314 |
package io.iohk.ethereum.nodebuilder
import java.lang.ProcessBuilder.Redirect
import akka.actor.ActorSystem
import io.iohk.ethereum.extvm.{ExtVMInterface, VmServerApp}
import io.iohk.ethereum.ledger.Ledger.VMImpl
import io.iohk.ethereum.utils.{BlockchainConfig, Logger, VmConfig}
import io.iohk.ethereum.utils.VmConfig.ExternalConfig
object VmSetup extends Logger {
import VmConfig.VmMode._
def vm(vmConfig: VmConfig, blockchainConfig: BlockchainConfig, testMode: Boolean)(implicit
actorSystem: ActorSystem
): VMImpl =
(vmConfig.mode, vmConfig.externalConfig) match {
case (Internal, _) =>
log.info("Using Mantis internal VM")
new VMImpl
case (External, Some(extConf)) =>
startExternalVm(extConf)
new ExtVMInterface(extConf, blockchainConfig, testMode)
case _ =>
throw new RuntimeException("Missing vm.external config for external VM")
}
private def startExternalVm(externalConfig: ExternalConfig): Unit = {
externalConfig.vmType match {
case "iele" | "kevm" =>
log.info(s"Starting external ${externalConfig.vmType} VM process using executable path")
startStandardVmProcess(externalConfig)
case "mantis" =>
log.info("Starting external Mantis VM process using executable path")
startMantisVmProcess(externalConfig)
case "none" =>
log.info("Using external VM process not managed by Mantis")
// expect the vm to be started by external means
}
}
/**
* Runs a standard VM binary that takes $port and $host as input arguments
*/
private def startStandardVmProcess(externalConfig: ExternalConfig): Unit = {
import externalConfig._
require(executablePath.isDefined, s"VM type '$vmType' requires the path to binary to be provided")
// TODO: we also need host parameter in iele node
new ProcessBuilder(executablePath.get, port.toString, host)
.redirectOutput(Redirect.INHERIT)
.redirectError(Redirect.INHERIT)
.start()
}
private def startMantisVmProcess(externalConfig: ExternalConfig): Unit = {
if (externalConfig.executablePath.isDefined)
startStandardVmProcess(externalConfig)
else
startMantisVmInThisProcess()
}
private def startMantisVmInThisProcess(): Unit = {
VmServerApp.main(Array())
}
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/nodebuilder/VmSetup.scala
|
Scala
|
mit
| 2,335 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Strategy
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.First
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.exchange.ShuffleExchange
import org.apache.spark.sql.execution.joins.{BuildLeft, BuildRight}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.StreamingQuery
/**
* Converts a logical plan into zero or more SparkPlans. This API is exposed for experimenting
* with the query planner and is not designed to be stable across spark releases. Developers
* writing libraries should instead consider using the stable APIs provided in
* [[org.apache.spark.sql.sources]]
*/
abstract class SparkStrategy extends GenericStrategy[SparkPlan] {
override protected def planLater(plan: LogicalPlan): SparkPlan = PlanLater(plan)
}
case class PlanLater(plan: LogicalPlan) extends LeafExecNode {
override def output: Seq[Attribute] = plan.output
protected override def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException()
}
}
abstract class SparkStrategies extends QueryPlanner[SparkPlan] {
self: SparkPlanner =>
/**
* Plans special cases of limit operators.
*/
object SpecialLimits extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case logical.ReturnAnswer(rootPlan) => rootPlan match {
case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) =>
execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case logical.Limit(
IntegerLiteral(limit),
logical.Project(projectList, logical.Sort(order, true, child))) =>
execution.TakeOrderedAndProjectExec(
limit, order, projectList, planLater(child)) :: Nil
case logical.Limit(IntegerLiteral(limit), child) =>
execution.CollectLimitExec(limit, planLater(child)) :: Nil
case other => planLater(other) :: Nil
}
case logical.Limit(IntegerLiteral(limit), logical.Sort(order, true, child)) =>
execution.TakeOrderedAndProjectExec(limit, order, child.output, planLater(child)) :: Nil
case logical.Limit(
IntegerLiteral(limit), logical.Project(projectList, logical.Sort(order, true, child))) =>
execution.TakeOrderedAndProjectExec(
limit, order, projectList, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Select the proper physical plan for join based on joining keys and size of logical plan.
*
* At first, uses the [[ExtractEquiJoinKeys]] pattern to find joins where at least some of the
* predicates can be evaluated by matching join keys. If found, Join implementations are chosen
* with the following precedence:
*
* - Broadcast: if one side of the join has an estimated physical size that is smaller than the
* user-configurable [[SQLConf.AUTO_BROADCASTJOIN_THRESHOLD]] threshold
* or if that side has an explicit broadcast hint (e.g. the user applied the
* [[org.apache.spark.sql.functions.broadcast()]] function to a DataFrame), then that side
* of the join will be broadcasted and the other side will be streamed, with no shuffling
* performed. If both sides of the join are eligible to be broadcasted then the
* - Shuffle hash join: if the average size of a single partition is small enough to build a hash
* table.
* - Sort merge: if the matching join keys are sortable.
*
* If there is no joining keys, Join implementations are chosen with the following precedence:
* - BroadcastNestedLoopJoin: if one side of the join could be broadcasted
* - CartesianProduct: for Inner join
* - BroadcastNestedLoopJoin
*/
object JoinSelection extends Strategy with PredicateHelper {
/**
* Matches a plan whose output should be small enough to be used in broadcast join.
*/
private def canBroadcast(plan: LogicalPlan): Boolean = {
plan.stats(conf).isBroadcastable ||
(plan.stats(conf).sizeInBytes >= 0 &&
plan.stats(conf).sizeInBytes <= conf.autoBroadcastJoinThreshold)
}
/**
* Matches a plan whose single partition should be small enough to build a hash table.
*
* Note: this assume that the number of partition is fixed, requires additional work if it's
* dynamic.
*/
private def canBuildLocalHashMap(plan: LogicalPlan): Boolean = {
plan.stats(conf).sizeInBytes < conf.autoBroadcastJoinThreshold * conf.numShufflePartitions
}
/**
* Returns whether plan a is much smaller (3X) than plan b.
*
* The cost to build hash map is higher than sorting, we should only build hash map on a table
* that is much smaller than other one. Since we does not have the statistic for number of rows,
* use the size of bytes here as estimation.
*/
private def muchSmaller(a: LogicalPlan, b: LogicalPlan): Boolean = {
a.stats(conf).sizeInBytes * 3 <= b.stats(conf).sizeInBytes
}
private def canBuildRight(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | LeftOuter | LeftSemi | LeftAnti => true
case j: ExistenceJoin => true
case _ => false
}
private def canBuildLeft(joinType: JoinType): Boolean = joinType match {
case _: InnerLike | RightOuter => true
case _ => false
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
// --- BroadcastHashJoin --------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildRight(joinType) && canBroadcast(right) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if canBuildLeft(joinType) && canBroadcast(left) =>
Seq(joins.BroadcastHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- ShuffledHashJoin ---------------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildRight(joinType) && canBuildLocalHashMap(right)
&& muchSmaller(right, left) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildRight, condition, planLater(left), planLater(right)))
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if !conf.preferSortMergeJoin && canBuildLeft(joinType) && canBuildLocalHashMap(left)
&& muchSmaller(left, right) ||
!RowOrdering.isOrderable(leftKeys) =>
Seq(joins.ShuffledHashJoinExec(
leftKeys, rightKeys, joinType, BuildLeft, condition, planLater(left), planLater(right)))
// --- SortMergeJoin ------------------------------------------------------------
case ExtractEquiJoinKeys(joinType, leftKeys, rightKeys, condition, left, right)
if RowOrdering.isOrderable(leftKeys) =>
joins.SortMergeJoinExec(
leftKeys, rightKeys, joinType, condition, planLater(left), planLater(right)) :: Nil
// --- Without joining keys ------------------------------------------------------------
// Pick BroadcastNestedLoopJoin if one side could be broadcasted
case j @ logical.Join(left, right, joinType, condition)
if canBuildRight(joinType) && canBroadcast(right) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildRight, joinType, condition) :: Nil
case j @ logical.Join(left, right, joinType, condition)
if canBuildLeft(joinType) && canBroadcast(left) =>
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), BuildLeft, joinType, condition) :: Nil
// Pick CartesianProduct for InnerJoin
case logical.Join(left, right, _: InnerLike, condition) =>
joins.CartesianProductExec(planLater(left), planLater(right), condition) :: Nil
case logical.Join(left, right, joinType, condition) =>
val buildSide =
if (right.stats(conf).sizeInBytes <= left.stats(conf).sizeInBytes) {
BuildRight
} else {
BuildLeft
}
// This join could be very slow or OOM
joins.BroadcastNestedLoopJoinExec(
planLater(left), planLater(right), buildSide, joinType, condition) :: Nil
// --- Cases where this strategy does not apply ---------------------------------------------
case _ => Nil
}
}
/**
* Used to plan aggregation queries that are computed incrementally as part of a
* [[StreamingQuery]]. Currently this rule is injected into the planner
* on-demand, only when planning in a [[org.apache.spark.sql.execution.streaming.StreamExecution]]
*/
object StatefulAggregationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case EventTimeWatermark(columnName, delay, child) =>
EventTimeWatermarkExec(columnName, delay, planLater(child)) :: Nil
case PhysicalAggregation(
namedGroupingExpressions, aggregateExpressions, rewrittenResultExpressions, child) =>
aggregate.AggUtils.planStreamingAggregation(
namedGroupingExpressions,
aggregateExpressions,
rewrittenResultExpressions,
planLater(child))
case _ => Nil
}
}
/**
* Used to plan the streaming deduplicate operator.
*/
object StreamingDeduplicationStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case Deduplicate(keys, child, true) =>
StreamingDeduplicateExec(keys, planLater(child)) :: Nil
case _ => Nil
}
}
/**
* Used to plan the aggregate operator for expressions based on the AggregateFunction2 interface.
*/
object Aggregation extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalAggregation(
groupingExpressions, aggregateExpressions, resultExpressions, child) =>
val (functionsWithDistinct, functionsWithoutDistinct) =
aggregateExpressions.partition(_.isDistinct)
if (functionsWithDistinct.map(_.aggregateFunction.children).distinct.length > 1) {
// This is a sanity check. We should not reach here when we have multiple distinct
// column sets. Our MultipleDistinctRewriter should take care this case.
sys.error("You hit a query analyzer bug. Please report your query to " +
"Spark user mailing list.")
}
val aggregateOperator =
if (functionsWithDistinct.isEmpty) {
aggregate.AggUtils.planAggregateWithoutDistinct(
groupingExpressions,
aggregateExpressions,
resultExpressions,
planLater(child))
} else {
aggregate.AggUtils.planAggregateWithOneDistinct(
groupingExpressions,
functionsWithDistinct,
functionsWithoutDistinct,
resultExpressions,
planLater(child))
}
aggregateOperator
case _ => Nil
}
}
protected lazy val singleRowRdd = sparkContext.parallelize(Seq(InternalRow()), 1)
object InMemoryScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalOperation(projectList, filters, mem: InMemoryRelation) =>
pruneFilterProject(
projectList,
filters,
identity[Seq[Expression]], // All filters still need to be evaluated.
InMemoryTableScanExec(_, filters, mem)) :: Nil
case _ => Nil
}
}
/**
* This strategy is just for explaining `Dataset/DataFrame` created by `spark.readStream`.
* It won't affect the execution, because `StreamingRelation` will be replaced with
* `StreamingExecutionRelation` in `StreamingQueryManager` and `StreamingExecutionRelation` will
* be replaced with the real relation using the `Source` in `StreamExecution`.
*/
object StreamingRelationStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case s: StreamingRelation =>
StreamingRelationExec(s.sourceName, s.output) :: Nil
case s: StreamingExecutionRelation =>
StreamingRelationExec(s.toString, s.output) :: Nil
case _ => Nil
}
}
/**
* Strategy to convert [[FlatMapGroupsWithState]] logical operator to physical operator
* in streaming plans. Conversion for batch plans is handled by [[BasicOperators]].
*/
object FlatMapGroupsWithStateStrategy extends Strategy {
override def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case FlatMapGroupsWithState(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, stateEnc, outputMode, _,
timeout, child) =>
val execPlan = FlatMapGroupsWithStateExec(
func, keyDeser, valueDeser, groupAttr, dataAttr, outputAttr, None, stateEnc, outputMode,
timeout, batchTimestampMs = None, eventTimeWatermark = None, planLater(child))
execPlan :: Nil
case _ =>
Nil
}
}
// Can we automate these 'pass through' operations?
object BasicOperators extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case r: RunnableCommand => ExecutedCommandExec(r) :: Nil
case MemoryPlan(sink, output) =>
val encoder = RowEncoder(sink.schema)
LocalTableScanExec(output, sink.allData.map(r => encoder.toRow(r).copy())) :: Nil
case logical.Distinct(child) =>
throw new IllegalStateException(
"logical distinct operator should have been replaced by aggregate in the optimizer")
case logical.Intersect(left, right) =>
throw new IllegalStateException(
"logical intersect operator should have been replaced by semi-join in the optimizer")
case logical.Except(left, right) =>
throw new IllegalStateException(
"logical except operator should have been replaced by anti-join in the optimizer")
case logical.DeserializeToObject(deserializer, objAttr, child) =>
execution.DeserializeToObjectExec(deserializer, objAttr, planLater(child)) :: Nil
case logical.SerializeFromObject(serializer, child) =>
execution.SerializeFromObjectExec(serializer, planLater(child)) :: Nil
case logical.MapPartitions(f, objAttr, child) =>
execution.MapPartitionsExec(f, objAttr, planLater(child)) :: Nil
case logical.MapPartitionsInR(f, p, b, is, os, objAttr, child) =>
execution.MapPartitionsExec(
execution.r.MapPartitionsRWrapper(f, p, b, is, os), objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsInR(f, p, b, is, os, key, value, grouping, data, objAttr, child) =>
execution.FlatMapGroupsInRExec(f, p, b, is, os, key, value, grouping,
data, objAttr, planLater(child)) :: Nil
case logical.MapElements(f, _, _, objAttr, child) =>
execution.MapElementsExec(f, objAttr, planLater(child)) :: Nil
case logical.AppendColumns(f, _, _, in, out, child) =>
execution.AppendColumnsExec(f, in, out, planLater(child)) :: Nil
case logical.AppendColumnsWithObject(f, childSer, newSer, child) =>
execution.AppendColumnsWithObjectExec(f, childSer, newSer, planLater(child)) :: Nil
case logical.MapGroups(f, key, value, grouping, data, objAttr, child) =>
execution.MapGroupsExec(f, key, value, grouping, data, objAttr, planLater(child)) :: Nil
case logical.FlatMapGroupsWithState(
f, key, value, grouping, data, output, _, _, _, _, child) =>
execution.MapGroupsExec(f, key, value, grouping, data, output, planLater(child)) :: Nil
case logical.CoGroup(f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr, left, right) =>
execution.CoGroupExec(
f, key, lObj, rObj, lGroup, rGroup, lAttr, rAttr, oAttr,
planLater(left), planLater(right)) :: Nil
case logical.Repartition(numPartitions, shuffle, child) =>
if (shuffle) {
ShuffleExchange(RoundRobinPartitioning(numPartitions), planLater(child)) :: Nil
} else {
execution.CoalesceExec(numPartitions, planLater(child)) :: Nil
}
case logical.Sort(sortExprs, global, child) =>
execution.SortExec(sortExprs, global, planLater(child)) :: Nil
case logical.Project(projectList, child) =>
execution.ProjectExec(projectList, planLater(child)) :: Nil
case logical.Filter(condition, child) =>
execution.FilterExec(condition, planLater(child)) :: Nil
case f: logical.TypedFilter =>
execution.FilterExec(f.typedCondition(f.deserializer), planLater(f.child)) :: Nil
case e @ logical.Expand(_, _, child) =>
execution.ExpandExec(e.projections, e.output, planLater(child)) :: Nil
case logical.Window(windowExprs, partitionSpec, orderSpec, child) =>
execution.window.WindowExec(windowExprs, partitionSpec, orderSpec, planLater(child)) :: Nil
case logical.Sample(lb, ub, withReplacement, seed, child) =>
execution.SampleExec(lb, ub, withReplacement, seed, planLater(child)) :: Nil
case logical.LocalRelation(output, data) =>
LocalTableScanExec(output, data) :: Nil
case logical.LocalLimit(IntegerLiteral(limit), child) =>
execution.LocalLimitExec(limit, planLater(child)) :: Nil
case logical.GlobalLimit(IntegerLiteral(limit), child) =>
execution.GlobalLimitExec(limit, planLater(child)) :: Nil
case logical.Union(unionChildren) =>
execution.UnionExec(unionChildren.map(planLater)) :: Nil
case g @ logical.Generate(generator, join, outer, _, _, child) =>
execution.GenerateExec(
generator, join = join, outer = outer, g.qualifiedGeneratorOutput,
planLater(child)) :: Nil
case logical.OneRowRelation =>
execution.RDDScanExec(Nil, singleRowRdd, "OneRowRelation") :: Nil
case r: logical.Range =>
execution.RangeExec(r) :: Nil
case logical.RepartitionByExpression(expressions, child, numPartitions) =>
exchange.ShuffleExchange(HashPartitioning(
expressions, numPartitions), planLater(child)) :: Nil
case ExternalRDD(outputObjAttr, rdd) => ExternalRDDScanExec(outputObjAttr, rdd) :: Nil
case r: LogicalRDD =>
RDDScanExec(r.output, r.rdd, "ExistingRDD", r.outputPartitioning, r.outputOrdering) :: Nil
case BroadcastHint(child) => planLater(child) :: Nil
case _ => Nil
}
}
}
|
MLnick/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/SparkStrategies.scala
|
Scala
|
apache-2.0
| 20,382 |
/*
* Copyright (c) 2012-2022 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.maxmind.iplookups
import java.net.InetAddress
import cats.{Eval, Id}
import cats.effect.Sync
import cats.syntax.either._
/** Data type letting you resolve IP address */
sealed trait IpAddressResolver[F[_]] {
def resolve(ip: String): F[Either[Throwable, InetAddress]]
protected def getIpAddress(ip: String): Either[Throwable, InetAddress] =
Either.catchNonFatal(InetAddress.getByName(ip))
}
object IpAddressResolver {
implicit def syncIpAddressResolver[F[_]: Sync]: IpAddressResolver[F] = new IpAddressResolver[F] {
def resolve(ip: String): F[Either[Throwable, InetAddress]] =
Sync[F].delay(getIpAddress(ip))
}
implicit def evalIpAddressResolver: IpAddressResolver[Eval] = new IpAddressResolver[Eval] {
def resolve(ip: String): Eval[Either[Throwable, InetAddress]] =
Eval.later(getIpAddress(ip))
}
implicit def idIpAddressResolver: IpAddressResolver[Id] = new IpAddressResolver[Id] {
def resolve(ip: String): Id[Either[Throwable, InetAddress]] =
getIpAddress(ip)
}
}
|
snowplow/scala-maxmind-iplookups
|
src/main/scala/com.snowplowanalytics.maxmind.iplookups/IpAddressResolver.scala
|
Scala
|
apache-2.0
| 1,758 |
package controllers
import play.api._
import play.api.mvc._
import play.api.libs._
import play.api.libs.iteratee._
import play.api.libs.concurrent.Akka
import akka.actor.Props
import utils.{ CleanUpActor, CleanUp }
import java.text.DecimalFormat
import java.io.File
import models.Dump
import javax.inject.Inject
import akka.actor.ActorSystem
import javax.inject.Named
import akka.actor.ActorRef
import models.DumpDB
class Admin @Inject() (
configuration: Configuration,
dumpDb: DumpDB,
@Named("clean-up-actor") cleanUpActor: ActorRef) extends Controller {
def index = Action {
val dmpPath = configuration.getString("dmpster.dmp.path").getOrElse("dmps")
val filePath = new File(dmpPath)
val totalSpace = filePath.getTotalSpace
val freeSpace = filePath.getFreeSpace
val referencedFiles = dumpDb.all.map(_.pathInStorageDirectory)
def getActualFiles(filePath: File): List[File] = {
val all = filePath.listFiles().toList
val files = all.filterNot(_.isDirectory)
val dirs = all.filter(_.isDirectory)
val subfiles = dirs.flatMap(getActualFiles(_))
files ++ subfiles
}
val referencedFilesAbsolute = referencedFiles.map(f => new File(filePath, f)).map(_.getPath)
val danglingFiles = getActualFiles(filePath).map(_.getPath).filterNot { f => referencedFilesAbsolute.contains(f) }
Ok(views.html.admin(totalSpace, freeSpace, formatFileSize(totalSpace), formatFileSize(freeSpace), danglingFiles))
}
def cleanUpNow = Action {
Logger.info("clean up requested")
cleanUpActor ! CleanUp
Redirect(routes.Admin.index)
}
private def formatFileSize(size: Long) = {
if (size <= 0) "0"
else {
val units = List("B", "KB", "MB", "GB", "TB")
val digitGroups = (Math.log10(size) / Math.log10(1024)).toInt
s"${new DecimalFormat("#,##0.#").format(size / Math.pow(1024, digitGroups))} ${units(digitGroups)}"
}
}
}
|
alexanderfloh/dmpster
|
app/controllers/Admin.scala
|
Scala
|
mit
| 1,989 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtInteger, NotInPdf}
import uk.gov.hmrc.ct.computations.calculations.MachineryAndPlantCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CATO20(value: Int) extends CtBoxIdentifier(name = "UnclaimedAIA_FYA") with CtInteger with NotInPdf
object CATO20 extends Calculated[CATO20, ComputationsBoxRetriever] with MachineryAndPlantCalculator {
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): CATO20 = {
unclaimedAIAFirstYearAllowance(cp81 = fieldValueRetriever.cp81(),
cp83 = fieldValueRetriever.cp83(),
cp87 = fieldValueRetriever.cp87(),
cp88 = fieldValueRetriever.cp88(),
cpAux1 = fieldValueRetriever.cpAux1())
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/CATO20.scala
|
Scala
|
apache-2.0
| 1,515 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.server.tcp
import akka.actor._
import akka.actor.{ ActorRef, Props, ActorLogging, Actor }
import akka.io.Tcp
import akka.util.ByteString
import org.ensime.api.{ RpcRequestEnvelope, EnsimeServerError, RpcResponseEnvelope, EnsimeEvent }
import org.ensime.core.{ Broadcaster, Canonised, Protocol }
import org.ensime.server.RequestHandler
import scala.annotation.tailrec
import scala.util.control.NonFatal
class TCPConnectionActor(
connection: ActorRef,
protocol: Protocol,
project: ActorRef,
broadcaster: ActorRef
) extends Actor with Stash with ActorLogging {
case object Ack extends Tcp.Event
// sign death pact: this actor terminates when connection breaks
context watch connection
import Tcp._
// bytes we have seen but have been unable to process yet
var seen = ByteString()
def handlePeerClosed(): Unit = {
context.parent ! ClientConnectionClosed
context stop self
}
override def receive: Receive = idle
// not Receive, thanks to https://issues.scala-lang.org/browse/SI-8861
// (fixed in 2.11.7)
def idle: PartialFunction[Any, Unit] = incoming orElse readyToSend
def busy: PartialFunction[Any, Unit] = incoming orElse awaitingAck
def incoming: Receive = {
case Received(data: ByteString) =>
seen = seen ++ data
attemptProcess()
case PeerClosed =>
handlePeerClosed()
}
def readyToSend: Receive = {
case outgoing: EnsimeEvent =>
sendMessage(RpcResponseEnvelope(None, outgoing))
case outgoing: RpcResponseEnvelope =>
sendMessage(outgoing)
}
def awaitingAck: Receive = {
case Ack =>
// we only stash outgoing messages, so this will cause them to be queued for sending
unstashAll()
context.become(idle, discardOld = true)
case outgoing: EnsimeEvent =>
stash()
case outgoing: RpcResponseEnvelope =>
stash()
case CommandFailed(Write(_, _)) =>
connection ! ResumeWriting
}
def sendMessage(envelope: RpcResponseEnvelope): Unit = {
val msg = try {
protocol.encode(Canonised(envelope))
} catch {
case NonFatal(t) =>
log.error(t, s"Problem serialising $envelope")
protocol.encode(
RpcResponseEnvelope(
envelope.callId,
EnsimeServerError(s"Server error: ${t.getMessage}")
)
)
}
connection ! Tcp.Write(msg, Ack)
context.become(busy, discardOld = true)
}
override def preStart(): Unit = {
broadcaster ! Broadcaster.Register
}
final def attemptProcess(): Unit = {
try {
repeatedDecode()
} catch {
case e: Throwable =>
log.error(e, "Error seen during message processing, closing client connection")
context.stop(self)
}
}
@tailrec
final def repeatedDecode(): Unit = {
val (envelopeOpt, remainder) = protocol.decode(seen)
seen = remainder
envelopeOpt match {
case Some(rawEnvelope: RpcRequestEnvelope) =>
val envelope = Canonised(rawEnvelope)
context.actorOf(RequestHandler(envelope, project, self), s"${envelope.callId}")
repeatedDecode()
case None =>
}
}
}
object TCPConnectionActor {
def apply(connection: ActorRef, protocol: Protocol, project: ActorRef, broadcaster: ActorRef): Props =
Props(new TCPConnectionActor(connection, protocol, project, broadcaster))
}
|
espinhogr/ensime-server
|
server/src/main/scala/org/ensime/server/tcp/TCPConnectionActor.scala
|
Scala
|
gpl-3.0
| 3,499 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import Incomplete.{ Error, Value => IValue }
/**
* Describes why a task did not complete.
*
* @param node the task that did not complete that is described by this Incomplete instance
* @param tpe whether the task was incomplete because of an error or because it was skipped. Only Error is actually used and Skipped may be removed in the future.
* @param message an optional error message describing this incompletion
* @param causes a list of incompletions that prevented `node` from completing
* @param directCause the exception that caused `node` to not complete
*/
final case class Incomplete(node: Option[AnyRef], tpe: IValue = Error, message: Option[String] = None, causes: Seq[Incomplete] = Nil, directCause: Option[Throwable] = None)
extends Exception(message.orNull, directCause.orNull) with UnprintableException {
override def toString = "Incomplete(node=" + node + ", tpe=" + tpe + ", msg=" + message + ", causes=" + causes + ", directCause=" + directCause + ")"
}
object Incomplete extends Enumeration {
val Skipped, Error = Value
def transformTD(i: Incomplete)(f: Incomplete => Incomplete): Incomplete = transform(i, true)(f)
def transformBU(i: Incomplete)(f: Incomplete => Incomplete): Incomplete = transform(i, false)(f)
def transform(i: Incomplete, topDown: Boolean)(f: Incomplete => Incomplete): Incomplete =
{
import collection.JavaConversions._
val visited: collection.mutable.Map[Incomplete, Incomplete] = new java.util.IdentityHashMap[Incomplete, Incomplete]
def visit(inc: Incomplete): Incomplete =
visited.getOrElseUpdate(inc, if (topDown) visitCauses(f(inc)) else f(visitCauses(inc)))
def visitCauses(inc: Incomplete): Incomplete =
inc.copy(causes = inc.causes.map(visit))
visit(i)
}
def visitAll(i: Incomplete)(f: Incomplete => Unit) {
val visited = IDSet.create[Incomplete]
def visit(inc: Incomplete): Unit =
visited.process(inc)(()) {
f(inc)
inc.causes.foreach(visit)
}
visit(i)
}
def linearize(i: Incomplete): Seq[Incomplete] =
{
var ordered = List[Incomplete]()
visitAll(i) { ordered ::= _ }
ordered
}
def allExceptions(is: Seq[Incomplete]): Iterable[Throwable] =
allExceptions(new Incomplete(None, causes = is))
def allExceptions(i: Incomplete): Iterable[Throwable] =
{
val exceptions = IDSet.create[Throwable]
visitAll(i) { exceptions ++= _.directCause.toList }
exceptions.all
}
def show(tpe: Value) = tpe match { case Skipped => "skipped"; case Error => "error" }
}
|
jaceklaskowski/sbt
|
tasks/src/main/scala/sbt/Incomplete.scala
|
Scala
|
bsd-3-clause
| 2,660 |
/**
* Copyright 2015 Adrian Hurtado (adrianhurt)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.html
package object bs {
import play.api.data.{ Field, FormError }
import play.twirl.api.Html
import play.api.i18n.{ Lang, Messages }
import bs.ArgsMap.isTrue
import play.api.mvc.Call
/**
* Class with relevant variables for a field to pass it to the helper and field constructor
* - args: list of available arguments for the helper and field constructor
*/
class BSFieldInfo(field: Field, args: Seq[(Symbol, Any)], val messages: Messages) {
/* A map with the args to work easily with them */
val argsMap: Map[Symbol, Any] = Args.withoutNones(args).toMap
/* Id of the input */
val id: String = argsMap.get('id).map(_.toString).getOrElse(field.id)
/* Id of the form-group */
val idFormField: String = argsMap.get('_id).map(_.toString).getOrElse(id + "_field")
/* The optional label */
val labelOpt: Option[Any] = argsMap.get('_label).orElse(argsMap.get('_hiddenLabel))
/* Indicates if the label must be hidden */
val hideLabel: Boolean = isTrue(argsMap, '_hideLabel) || argsMap.contains('_hiddenLabel)
/* Name of the input */
def name: String = field.name
/* Value of the input */
val value: Option[String] = field.value.orElse(argsMap.get('value).map(_.toString))
/* List with every error and its corresponding ARIA id. Ex: ("foo_error_0" -> "foo error") */
val errors: Seq[(String, String)] = BSFieldInfo.errors(Some(field), argsMap, messages).zipWithIndex.map {
case (error, i) => (id + "_error_" + i, error)
}
/* Indicates if there is any error */
val hasErrors: Boolean = !errors.isEmpty || ArgsMap.isNotFalse(argsMap, '_error)
/* The optional validation state ("success", "warning" or "error") */
lazy val status: Option[String] = BSFieldInfo.status(hasErrors, argsMap)
/* List of every ARIA id */
val ariaIds: Seq[String] = errors.map(_._1)
/*
* Map with the inner args, i.e. those args for the helper itself removing those ones reserved for the field constructor.
* It adds the ARIA attributes and removes the underscored reserved for the field constructor and the `id and `value ones that are
* managed independently.
*/
lazy val innerArgsMap: Map[Symbol, Any] = (
(if (ariaIds.size > 0) Seq(Symbol("aria-describedby") -> ariaIds.mkString(" ")) else Nil) ++
(if (hasErrors) Seq(Symbol("aria-invalid") -> "true") else Nil) ++
BSFieldInfo.constraintsArgs(field, messages) ++
args.filter(_._1 == 'placeholder).map(Args.msg(_)(messages)) ++
args.filterNot { case (key, _) => key == 'id || key == 'value || key == 'placeholder || key.name.startsWith("_") }
).toMap.filterNot(_._2 == false)
}
/**
* Companion object for class BSFieldInfo
*/
object BSFieldInfo {
def apply(field: Field, args: Seq[(Symbol, Any)], messages: Messages): BSFieldInfo = {
new BSFieldInfo(field, args, messages)
}
/* List with every error */
def errors(maybeField: Option[Field], argsMap: Map[Symbol, Any], messages: Messages): Seq[String] = {
argsMap.get('_error).filter(!_.isInstanceOf[Boolean]).map {
_ match {
case Some(FormError(_, message, args)) => Seq(messages(message, args.map(a => translateMsgArg(a, messages)): _*))
case message => Seq(messages(message.toString))
}
}.getOrElse {
maybeField.filter(_ => argsMap.get('_showErrors) != Some(false)).map { field =>
field.errors.map { e => messages(e.message, e.args.map(a => translateMsgArg(a, messages)): _*) }
}.getOrElse(Nil)
}
}
/* List with every "feedback info" except "errors" */
def feedbackInfosButErrors(argsMap: Map[Symbol, Any], messages: Messages): Seq[String] = {
argsMap.get('_warning).filter(!_.isInstanceOf[Boolean]).map(m => Seq(messages(m.toString))).getOrElse(
argsMap.get('_success).filter(!_.isInstanceOf[Boolean]).map(m => Seq(messages(m.toString))).getOrElse(Nil)
)
}
/* List with every "help info", i.e. a help text or constraints */
def helpInfos(maybeField: Option[Field], argsMap: Map[Symbol, Any], messages: Messages): Seq[String] = {
argsMap.get('_help).map(m => Seq(messages(m.toString))).getOrElse {
maybeField.filter(_ => argsMap.get('_showConstraints) == Some(true)).map { field =>
field.constraints.map(c => messages(c._1, c._2.map(a => translateMsgArg(a, messages)): _*)) ++ field.format.map(f => messages(f._1, f._2.map(a => translateMsgArg(a, messages)): _*))
}.getOrElse(Nil)
}
}
/* The optional validation state ("success", "warning" or "error") */
def status(hasErrors: Boolean, argsMap: Map[Symbol, Any]): Option[String] = {
if (hasErrors)
Some("error")
else if (ArgsMap.isNotFalse(argsMap, '_warning))
Some("warning")
else if (ArgsMap.isNotFalse(argsMap, '_success))
Some("success")
else
None
}
/* Generates automatically the input attributes for the constraints of a field */
def constraintsArgs(field: Field, messages: Messages): Seq[(Symbol, Any)] = field.constraints.map {
case ("constraint.required", params) => Some(('required -> true))
case ("constraint.min", params: Seq[Any]) => Some(('min -> messages(params.head.toString)))
case ("constraint.max", params: Seq[Any]) => Some(('max -> messages(params.head.toString)))
case ("constraint.minLength", params: Seq[Any]) => Some(('minlength -> messages(params.head.toString)))
case ("constraint.maxLength", params: Seq[Any]) => Some(('maxlength -> messages(params.head.toString)))
case ("constraint.pattern", params: Seq[Any]) => params.head match {
case str: String => Some(('pattern -> messages(str)))
case func: Function0[_] => Some(('pattern -> messages(func.asInstanceOf[() => scala.util.matching.Regex]().toString)))
case _ => None
}
case _ => None
}.flatten
private def translateMsgArg(msgArg: Any, messages: Messages) = msgArg match {
case key: String => messages(key)
case keys: Seq[_] => keys.map(key => messages(key.toString))
case _ => msgArg
}
}
/**
* Class with relevant variables for the global information of a multifield
* - fields: list of Fields
* - globalArguments: list of available arguments for the global helper
* - fieldsArguments: list of available arguments for every specific field
*/
class BSMultifieldInfo(fields: Seq[Field], globalArguments: Seq[(Symbol, Any)], fieldsArguments: Seq[(Symbol, Any)], val messages: Messages) {
/* A map with the args to work easily with them. The '_help is removed because the helper freeFormFieldormField will add it */
val argsMap: Map[Symbol, Any] = Args.withoutNones(fieldsArguments ++ globalArguments).toMap
/* List with every error */
val errors: Seq[String] = {
val globalErrors = BSFieldInfo.errors(None, argsMap, messages)
if (globalErrors.size > 0)
globalErrors
else
fields.flatMap { field =>
BSFieldInfo.errors(Some(field), argsMap, messages)
}
}
/* Indicates if there is any error */
val hasErrors: Boolean = !errors.isEmpty || ArgsMap.isNotFalse(argsMap, '_error)
/* The optional validation state ("success", "warning" or "error") */
lazy val status: Option[String] = BSFieldInfo.status(hasErrors, argsMap)
lazy val globalArgs = globalArguments
lazy val fieldsArgs = fieldsArguments
}
/**
* Companion object for class BSMultifieldInfo
*/
object BSMultifieldInfo {
def apply(fields: Seq[Field], globalArguments: Seq[(Symbol, Any)], fieldsArguments: Seq[(Symbol, Any)], messages: Messages): BSMultifieldInfo = {
new BSMultifieldInfo(fields, globalArguments, fieldsArguments, messages)
}
}
/**
* Custom BSFieldConstructor for the library. Every BSFieldConstructor must extend this functionality.
*/
trait BSFieldConstructor[F <: BSFieldInfo] {
/* Renders the corresponding template of the field constructor */
def apply(fieldInfo: F, inputHtml: Html)(implicit messages: Messages): Html
/* Renders the corresponding template of a fake field constructor (i.e. with the same structure but without the field) */
def apply(contentHtml: Html, argsMap: Map[Symbol, Any])(implicit messages: Messages): Html
}
/**
* Renders an input field with its corresponding wrapper using the BSFieldConstructor.
* - fieldInfo: a BSFieldInfo with all the information about the field.
* - inputDef: function that returns a Html from the BSFieldInfo.
*/
def inputFormField[F <: BSFieldInfo](fieldInfo: F)(inputDef: F => Html)(implicit fc: BSFieldConstructor[F]) =
fc(fieldInfo, inputDef(fieldInfo))(fieldInfo.messages)
/**
* Renders a fake field constructor using the BSFieldConstructor.
* - args: list of available arguments for the helper and the form-group
* - contentDef: function that returns a Html from a map of arguments
*/
def freeFormField[F <: BSFieldInfo](args: Seq[(Symbol, Any)])(contentDef: Map[Symbol, Any] => Html)(implicit fc: BSFieldConstructor[F], messages: Messages) = {
val argsWithoutNones = Args.withoutNones(args)
fc(contentDef(Args.inner(argsWithoutNones).toMap), argsWithoutNones.toMap)(messages)
}
/**
* Renders a multi-field constructor using the BSFieldConstructor.
* - fieldInfo: a BSMultifieldInfo with all the information about the fields.
* - contentDef: function that returns a Html from the BSMultifieldInfo
*/
def multifieldFormField[F <: BSFieldInfo, M <: BSMultifieldInfo](multifieldInfo: M)(contentDef: M => Html)(implicit fc: BSFieldConstructor[F]) =
freeFormField(multifieldInfo.globalArgs)(_ => contentDef(multifieldInfo))(fc, multifieldInfo.messages)
}
|
mkurz/play-bootstrap3
|
core-play24/app/views/bs/package.scala
|
Scala
|
apache-2.0
| 10,432 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scouter.server.tagcnt.first;
import java.io.IOException
import scouter.util.IClose
class WorkDB(file: String) extends IClose {
var lastActive = 0L
var entry: TagValueEntry = null
var table: IndexFile = null
var logDate = ""
var objType = ""
def open() {
this.table = IndexFile.open(file + "/first");
this.entry = TagValueEntry.open(this.table);
}
def close() {
this.entry.close();
this.table.close();
}
}
|
idkwim/scouter
|
scouter.server/src/scouter/server/tagcnt/first/WorkDB.scala
|
Scala
|
apache-2.0
| 1,131 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.attribute.Attribute
import org.apache.spark.ml.feature.{IndexToString, StringIndexer}
// $example off$
import org.apache.spark.sql.SparkSession
object IndexToStringExample {
def main(args: Array[String]) {
val spark = SparkSession
.builder
.appName("IndexToStringExample")
.getOrCreate()
// $example on$
val df = spark.createDataFrame(Seq(
(0, "a"),
(1, "b"),
(2, "c"),
(3, "a"),
(4, "a"),
(5, "c")
)).toDF("id", "category")
val indexer = new StringIndexer()
.setInputCol("category")
.setOutputCol("categoryIndex")
.fit(df)
val indexed = indexer.transform(df)
println(s"Transformed string column '${indexer.getInputCol}' " +
s"to indexed column '${indexer.getOutputCol}'")
indexed.show()
val inputColSchema = indexed.schema(indexer.getOutputCol)
println(s"StringIndexer will store labels in output column metadata: " +
s"${Attribute.fromStructField(inputColSchema).toString}\\n")
val converter = new IndexToString()
.setInputCol("categoryIndex")
.setOutputCol("originalCategory")
val converted = converter.transform(indexed)
println(s"Transformed indexed column '${converter.getInputCol}' back to original string " +
s"column '${converter.getOutputCol}' using labels in metadata")
converted.select("id", "categoryIndex", "originalCategory").show()
// $example off$
spark.stop()
}
}
// scalastyle:on println
|
lhfei/spark-in-action
|
spark-2.x/src/main/scala/org/apache/spark/examples/ml/IndexToStringExample.scala
|
Scala
|
apache-2.0
| 2,472 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.recorder.http.handler.user
import io.gatling.recorder.http.channel.BootstrapFactory._
import io.gatling.recorder.http.ssl.SslServerContext
import com.typesafe.scalalogging.StrictLogging
import io.netty.channel.ChannelOutboundHandlerAdapter
import io.netty.channel._
import io.netty.handler.ssl.SslHandler
/**
* Placed on the server side pipeline, it replaces itself with a SslHandler when it sees the 200 response to the CONNECT request
* (as CONNECT happens over HTTP, not HTTPS)
*/
private[handler] class SslHandlerSetter(domainAlias: String, sslServerContext: SslServerContext) extends ChannelOutboundHandlerAdapter with StrictLogging {
override def write(ctx: ChannelHandlerContext, msg: AnyRef, promise: ChannelPromise): Unit = {
ctx.pipeline
.addAfter(SslHandlerSetterName, SslHandlerName, new SslHandler(sslServerContext.createSSLEngine(domainAlias)))
.remove(SslHandlerSetterName)
super.write(ctx, msg, promise)
}
}
|
ryez/gatling
|
gatling-recorder/src/main/scala/io/gatling/recorder/http/handler/user/SslHandlerSetter.scala
|
Scala
|
apache-2.0
| 1,592 |
package org.bruchez.olivier.dsstoreremover
import java.io.File
import scala.annotation.tailrec
object Files {
def filesInDirectory(directory: File,
recursive: Boolean,
includeDirectories: Boolean): Seq[File] = {
val (directories, files) =
Option(directory.listFiles()).fold(Seq[File]())(_.toSeq).partition(_.isDirectory)
val subDirectoriesAndFiles =
if (recursive) {
directories.flatMap(filesInDirectory(_, recursive = true, includeDirectories))
} else {
Seq()
}
(if (includeDirectories) directories else Seq()) ++ files ++ subDirectoriesAndFiles
}
def isMacOsMetadataFile(file: File): Boolean = {
val filename = file.getName
lazy val isDsStoreFile = filename == MacOsDsStoreFilename
lazy val isMetadataFile = filename.startsWith(MacOsMetadataFilePrefix) && {
val baseFile =
new File(file.getParentFile, filename.substring(MacOsMetadataFilePrefix.length))
baseFile.exists()
}
isDsStoreFile || isMetadataFile
}
private val MacOsDsStoreFilename = ".DS_Store"
private val MacOsMetadataFilePrefix = "._"
def nonExistingFile(directory: File, baseFilename: String): File = {
@tailrec
def nonExistingFile(index: Int): File = {
val suffix = if (index <= 0) "" else s".$index"
val fileToTest = new File(directory, baseFilename + suffix)
if (!fileToTest.exists()) fileToTest else nonExistingFile(index + 1)
}
nonExistingFile(index = 0)
}
}
|
obruchez/ds-store-remover
|
src/main/scala/org/bruchez/olivier/dsstoreremover/Files.scala
|
Scala
|
apache-2.0
| 1,529 |
package org.jetbrains.plugins.scala.extensions.implementation.iterator
import com.intellij.psi.PsiElement
/**
* Pavel.Fatin, 09.05.2010
*/
final class DepthFirstIterator(element: PsiElement, predicate: PsiElement => Boolean) extends Iterator[PsiElement] {
private var stack: List[PsiElement] =
if (element == null) List.empty
else List(element)
override def hasNext: Boolean = stack.nonEmpty
override def next(): PsiElement = {
val element = stack.head
stack = stack.tail
if (predicate(element)) pushChildren(element)
element
}
private def pushChildren(element: PsiElement): Unit = {
var child = element.getLastChild
while (child != null) {
stack = child +: stack
child = child.getPrevSibling
}
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/extensions/implementation/iterator/DepthFirstIterator.scala
|
Scala
|
apache-2.0
| 794 |
/*
* Copyright 2015 springml
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.springml.spark.salesforce
import org.apache.spark.sql.types._
import org.apache.spark.sql.{Row, SparkSession}
import org.scalatest.{BeforeAndAfterEach, FunSuite}
import scala.concurrent.duration.{FiniteDuration, MILLISECONDS}
class TestUtils extends FunSuite with BeforeAndAfterEach {
var ss: SparkSession = _
override def beforeEach() {
ss = SparkSession.builder().master("local").appName("Test Utils").getOrCreate()
}
override def afterEach() {
ss.stop()
}
test("Test Metadata Configuration") {
val metadataConfig = Utils.metadataConfig(null)
assert(metadataConfig.size == 8)
val integerConfig = metadataConfig.get("integer")
assert(integerConfig.isDefined == true)
assert(integerConfig.get.get("precision").isDefined == true)
val timestampConfig = metadataConfig.get("timestamp")
assert(timestampConfig.isDefined == true)
assert(timestampConfig.get.get("format").isDefined == true)
val doubleConfig = metadataConfig.get("double")
assert(doubleConfig.isDefined == true)
assert(doubleConfig.get.get("precision").isDefined == true)
}
test("Test Custom Metadata Configuration") {
val customTimestampConfig = """{"timestamp":{"wave_type":"Date","format":"yyyy/MM/dd'T'HH:mm:ss"}}"""
val metadataConfig = Utils.metadataConfig(Some(customTimestampConfig))
assert(metadataConfig.size == 8)
val timestampConfig = metadataConfig.get("timestamp")
assert(timestampConfig.isDefined == true)
assert(timestampConfig.get.get("format").isDefined == true)
assert(timestampConfig.get.get("format").get == "yyyy/MM/dd'T'HH:mm:ss")
}
test("Test Custom Metadata Configuration with new datatype") {
val customTimestampConfig = """{"mydataType":{"wave_type":"Date","format":"yy-MM-dd"}}"""
val metadataConfig = Utils.metadataConfig(Some(customTimestampConfig))
assert(metadataConfig.size == 9)
val myDataTypeConfig = metadataConfig.get("mydataType")
assert(myDataTypeConfig.isDefined == true)
assert(myDataTypeConfig.get.get("format").isDefined == true)
assert(myDataTypeConfig.get.get("format").get == "yy-MM-dd")
}
test("Test repartition for in memory RDD") {
val inMemoryData = (0 to 2000).map(value => {
val rowValues = Array(value, value + 1, value + 2, value + 3).map(value => value.toString)
Row.fromSeq(rowValues)
})
val inMemoryRDD = ss.sparkContext.makeRDD(inMemoryData)
val columnNames = List("c1", "c2", "c3", "c4")
val columnStruct = columnNames.map(colName => StructField(colName, StringType, true))
val schema = StructType(columnStruct)
val inMemoryDF = ss.sqlContext.createDataFrame(inMemoryRDD, schema)
val repartitionDF = Utils.repartition(inMemoryRDD)
assert(repartitionDF.partitions.length == 1)
}
test("Test repartition for local CSV file with size less than 10 MB") {
val csvURL= getClass.getResource("/ad-server-data-formatted.csv")
val csvFilePath = csvURL.getPath
val csvDF = ss.read.option("header", "true").csv(csvFilePath)
val repartitionDF = Utils.repartition(csvDF.rdd)
assert(repartitionDF.partitions.length == 1)
}
test("Test repartition for local CSV file with size > 10 MB and < 20 MB") {
val csvURL= getClass.getResource("/minified_GDS_90.csv")
val csvFilePath = csvURL.getPath
val csvDF = ss.read.option("header", "true").csv(csvFilePath)
val repartitionDF = Utils.repartition(csvDF.rdd)
assert(repartitionDF.partitions.length == 2)
}
test("Check whether CSV Header constructed properly") {
val intField = StructField("c1", IntegerType, true)
val longField = StructField("c2", LongType, true)
val floatField = StructField("c3", FloatType, true)
val dateField = StructField("c4", DateType, true)
val stringField = StructField("c5", StringType, true)
val columnStruct = Array[StructField] (intField, longField, floatField, dateField, stringField)
val schema = StructType(columnStruct)
val expected = "c1,c2,c3,c4,c5"
val actual = Utils.csvHeadder(schema)
assert(expected.equals(actual))
}
test("retry with expoential backoff") {
val timeoutDuration = FiniteDuration(5000L, MILLISECONDS)
val initSleepIntervalDuration = FiniteDuration(100L, MILLISECONDS)
val maxSleepIntervalDuration = FiniteDuration(500L, MILLISECONDS)
var completed = false
var attempts = 0
var expectedNumberAttempts = 2
Utils.retryWithExponentialBackoff(() => {
attempts += 1
completed = attempts == expectedNumberAttempts
completed
}, timeoutDuration, initSleepIntervalDuration, maxSleepIntervalDuration)
assert(attempts == expectedNumberAttempts)
}
}
|
springml/spark-salesforce
|
src/test/scala/com/springml/spark/salesforce/TestUtils.scala
|
Scala
|
apache-2.0
| 5,296 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.