code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.test.SQLTestData._
private case class FunctionResult(f1: String, f2: String)
class UDFSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("built-in fixed arity expressions") {
val df = ctx.emptyDataFrame
df.selectExpr("rand()", "randn()", "rand(5)", "randn(50)")
}
test("built-in vararg expressions") {
val df = Seq((1, 2)).toDF("a", "b")
df.selectExpr("array(a, b)")
df.selectExpr("struct(a, b)")
}
test("built-in expressions with multiple constructors") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("substr(a, 2)", "substr(a, 2, 3)").collect()
}
test("count") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("count(a)")
}
test("count distinct") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("count(distinct a)")
}
test("SPARK-8003 spark_partition_id") {
val df = Seq((1, "Tearing down the walls that divide us")).toDF("id", "saying")
df.registerTempTable("tmp_table")
checkAnswer(sql("select spark_partition_id() from tmp_table").toDF(), Row(0))
ctx.dropTempTable("tmp_table")
}
test("SPARK-8005 input_file_name") {
withTempPath { dir =>
val data = ctx.sparkContext.parallelize(0 to 10, 2).toDF("id")
data.write.parquet(dir.getCanonicalPath)
ctx.read.parquet(dir.getCanonicalPath).registerTempTable("test_table")
val answer = sql("select input_file_name() from test_table").head().getString(0)
assert(answer.contains(dir.getCanonicalPath))
assert(sql("select input_file_name() from test_table").distinct().collect().length >= 2)
ctx.dropTempTable("test_table")
}
}
test("error reporting for incorrect number of arguments") {
val df = ctx.emptyDataFrame
val e = intercept[AnalysisException] {
df.selectExpr("substr('abcd', 2, 3, 4)")
}
assert(e.getMessage.contains("arguments"))
}
test("error reporting for undefined functions") {
val df = ctx.emptyDataFrame
val e = intercept[AnalysisException] {
df.selectExpr("a_function_that_does_not_exist()")
}
assert(e.getMessage.contains("undefined function"))
}
test("Simple UDF") {
ctx.udf.register("strLenScala", (_: String).length)
assert(sql("SELECT strLenScala('test')").head().getInt(0) === 4)
}
test("ZeroArgument UDF") {
ctx.udf.register("random0", () => { Math.random()})
assert(sql("SELECT random0()").head().getDouble(0) >= 0.0)
}
test("TwoArgument UDF") {
ctx.udf.register("strLenScala", (_: String).length + (_: Int))
assert(sql("SELECT strLenScala('test', 1)").head().getInt(0) === 5)
}
test("UDF in a WHERE") {
ctx.udf.register("oneArgFilter", (n: Int) => { n > 80 })
val df = ctx.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString))).toDF()
df.registerTempTable("integerData")
val result =
sql("SELECT * FROM integerData WHERE oneArgFilter(key)")
assert(result.count() === 20)
}
test("UDF in a HAVING") {
ctx.udf.register("havingFilter", (n: Long) => { n > 5 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.registerTempTable("groupData")
val result =
sql(
"""
| SELECT g, SUM(v) as s
| FROM groupData
| GROUP BY g
| HAVING havingFilter(s)
""".stripMargin)
assert(result.count() === 2)
}
test("UDF in a GROUP BY") {
ctx.udf.register("groupFunction", (n: Int) => { n > 10 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.registerTempTable("groupData")
val result =
sql(
"""
| SELECT SUM(v)
| FROM groupData
| GROUP BY groupFunction(v)
""".stripMargin)
assert(result.count() === 2)
}
test("UDFs everywhere") {
ctx.udf.register("groupFunction", (n: Int) => { n > 10 })
ctx.udf.register("havingFilter", (n: Long) => { n > 2000 })
ctx.udf.register("whereFilter", (n: Int) => { n < 150 })
ctx.udf.register("timesHundred", (n: Long) => { n * 100 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.registerTempTable("groupData")
val result =
sql(
"""
| SELECT timesHundred(SUM(v)) as v100
| FROM groupData
| WHERE whereFilter(v)
| GROUP BY groupFunction(v)
| HAVING havingFilter(v100)
""".stripMargin)
assert(result.count() === 1)
}
test("struct UDF") {
ctx.udf.register("returnStruct", (f1: String, f2: String) => FunctionResult(f1, f2))
val result =
sql("SELECT returnStruct('test', 'test2') as ret")
.select($"ret.f1").head().getString(0)
assert(result === "test")
}
test("udf that is transformed") {
ctx.udf.register("makeStruct", (x: Int, y: Int) => (x, y))
// 1 + 1 is constant folded causing a transformation.
assert(sql("SELECT makeStruct(1 + 1, 2)").first().getAs[Row](0) === Row(2, 2))
}
test("type coercion for udf inputs") {
ctx.udf.register("intExpected", (x: Int) => x)
// pass a decimal to intExpected.
assert(sql("SELECT intExpected(1.0)").head().getInt(0) === 1)
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala
|
Scala
|
apache-2.0
| 6,215 |
object UpdateVars3 {
def main(args: Array[String]) {
var x: Int = 3
var y: Int = x + 2
y = 2 * y
x = y - x
println(s"x = $x, y = $y")
}
}
|
LoyolaChicagoBooks/introcs-scala-examples
|
update_vars3/update_vars3.scala
|
Scala
|
gpl-3.0
| 162 |
package com.yammer.dropwizard.examples
object SayingFactory {
def buildSaying(implicit config: ExampleConfiguration) = config.saying
}
|
jamesward/dropwizard
|
dropwizard-scala_2.9.1/src/test/scala/com/yammer/dropwizard/examples/SayingFactory.scala
|
Scala
|
apache-2.0
| 138 |
package scala.slick.ast
import scala.slick.SlickException
import scala.slick.util.Logging
import TypeUtil.typeToTypeUtil
import Util._
/**
* A node in the query AST.
*
* Every Node has a number of child nodes and an optional type annotation.
*/
trait Node {
type Self >: this.type <: Node
private[this] var seenType: Boolean = false
/** All child nodes of this node. Must be implemented by subclasses. */
def nodeChildren: Seq[Node]
/** Names for the child nodes to show in AST dumps. Defaults to a numbered
* sequence starting at 0 but can be overridden by subclasses to produce
* more suitable names. */
def nodeChildNames: Iterable[String] = Stream.from(0).map(_.toString)
/** Rebuild this node with a new list of children. Implementations of this
* method *must not* perform any optimization to reuse the current node.
* This method always returns a fresh copy. */
protected[this] def nodeRebuild(ch: IndexedSeq[Node]): Self
/** Rebuild this node with a new list of children unless all children are
* identical to the current ones. */
protected[this] final def nodeRebuildOrThis(ch: IndexedSeq[Node]): Self = {
if((nodeChildren, ch).zipped.forall(_ eq _)) this
else nodeRebuild(ch)
}
/** Apply a mapping function to all children of this node and recreate the
* node with the new children. If all new children are identical to the old
* ones, this node is returned. If ``keepType`` is set to true, the type
* of this node is kept even if the children have changed. */
final def nodeMapChildren(f: Node => Node, keepType: Boolean = false): Self = {
val this2: Self = mapOrNone(nodeChildren)(f).map(nodeRebuild).getOrElse(this)
if(_nodeType == UnassignedType || !keepType) this2
else nodeBuildTypedNode(this2, _nodeType)
}
override def toString = this match {
case p: Product =>
val cln = getClass.getName.replaceFirst(".*\\\\.", "")
val n = if(cln.endsWith("$")) cln.substring(0, cln.length-1) else cln.replaceFirst(".*\\\\$", "")
val args = p.productIterator.filterNot(_.isInstanceOf[Node]).mkString(", ")
if(args.isEmpty) n else (n + ' ' + args)
case _ => super.toString
}
/** The intrinsic symbol that points to this Node object. */
final def nodeIntrinsicSymbol = new IntrinsicSymbol(this)
private var _nodeType: Type = UnassignedType
/** The current type of this node */
def nodeType: Type = {
seenType = true
_nodeType
}
def nodePeekType: Type = _nodeType
def nodeHasType: Boolean = _nodeType != UnassignedType
/** Return this Node with a Type assigned. This may only be called on
* freshly constructed nodes with no other existing references, i.e.
* creating the Node plus assigning it a Type must be atomic. */
final def nodeTyped(tpe: Type): this.type = {
if(seenType && tpe != _nodeType)
throw new SlickException("Trying to reassign node type -- nodeTyped() may only be called on freshly constructed nodes")
_nodeType = tpe
Node.logType(this)
this
}
/** Return this Node with no Type assigned (if it's type has not been
* observed yet) or an untyped copy. */
final def nodeUntypedOrCopy: Self = {
if(seenType || _nodeType != UnassignedType) nodeRebuild(nodeChildren.toIndexedSeq)
else this
}
/** Return this Node with a Type assigned (if no other type has been seen
* for it yet) or a typed copy. */
final def nodeTypedOrCopy(tpe: Type): Self = {
if(seenType && tpe != _nodeType)
nodeRebuild(nodeChildren.toIndexedSeq).nodeTyped(tpe)
else nodeTyped(tpe)
}
def nodeBuildTypedNode[T >: this.type <: Node](newNode: T, newType: Type): T =
if(newNode ne this) newNode.nodeTyped(newType)
else if(newType == _nodeType) this
else nodeRebuildWithType(newType).asInstanceOf[T]
def nodeRebuildWithType(tpe: Type): Self = nodeRebuild(nodeChildren.toIndexedSeq).nodeTyped(tpe)
/** Rebuild this node and all children with their computed type. If this
* node already has a type, the children are only type-checked again if
* ``typeChildren`` is set to true. if ``retype`` is also set to true, the
* existing type of this node is replaced. If this node does not yet have
* a type, the types of all children are computed. */
final def nodeWithComputedType(scope: SymbolScope = SymbolScope.empty, typeChildren: Boolean = false, retype: Boolean = false): Self =
if(nodeHasType && !typeChildren) this else nodeWithComputedType2(scope, typeChildren, retype)
protected[this] def nodeWithComputedType2(scope: SymbolScope = SymbolScope.empty, typeChildren: Boolean = false, retype: Boolean = false): Self
}
/** A Node whose children can be typed independently of each other and which
* can be typed without access to its scope. */
trait SimplyTypedNode extends Node {
type Self >: this.type <: SimplyTypedNode
protected def buildType: Type
final def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val this2 = nodeMapChildren(_.nodeWithComputedType(scope, typeChildren, retype), !retype)
if(!nodeHasType || retype) nodeBuildTypedNode(this2, this2.buildType) else this2
}
}
object Node extends Logging {
private def logType(n: Node): Unit =
logger.debug("Assigned type "+n.nodePeekType+" to node "+n)
}
trait TypedNode extends Node with Typed {
override def nodeType: Type = {
val t = super.nodeType
if(t eq UnassignedType) tpe else t
}
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self =
nodeMapChildren(_.nodeWithComputedType(scope, typeChildren, retype), !retype)
override def nodeHasType = (tpe ne UnassignedType) || super.nodeHasType
override def nodePeekType: Type = super.nodePeekType match {
case UnassignedType => tpe
case t => t
}
}
/** An expression that represents a conjunction of expressions. */
trait ProductNode extends SimplyTypedNode { self =>
type Self >: this.type <: SimplyTypedNode with ProductNode
override def toString = "ProductNode"
protected[this] def nodeRebuild(ch: IndexedSeq[Node]): Self = new ProductNode {
val nodeChildren = ch
}.asInstanceOf[Self]
override def nodeChildNames: Iterable[String] = Stream.from(1).map(_.toString)
override def hashCode() = nodeChildren.hashCode()
override def equals(o: Any) = o match {
case p: ProductNode => nodeChildren == p.nodeChildren
case _ => false
}
def withComputedTypeNoRec: ProductNode = nodeBuildTypedNode(this, buildType)
protected def buildType: Type = ProductType(nodeChildren.map { ch =>
val t = ch.nodeType
if(t == UnassignedType) throw new SlickException(s"ProductNode child $ch has UnassignedType")
t
}(collection.breakOut))
def numberedElements: Iterator[(ElementSymbol, Node)] =
nodeChildren.iterator.zipWithIndex.map { case (n, i) => (new ElementSymbol(i+1), n) }
def flatten: ProductNode = {
def f(n: Node): IndexedSeq[Node] = n match {
case ProductNode(ns) => ns.flatMap(f).toIndexedSeq
case n => IndexedSeq(n)
}
ProductNode(f(this))
}
}
object ProductNode {
def apply(s: Seq[Node]): ProductNode = new ProductNode {
val nodeChildren = s
}
def unapply(p: ProductNode) = Some(p.nodeChildren)
}
/** An expression that represents a structure, i.e. a conjunction where the
* individual components have Symbols associated with them. */
final case class StructNode(elements: IndexedSeq[(Symbol, Node)]) extends ProductNode with DefNode {
type Self = StructNode
override def toString = "StructNode"
override def nodeChildNames = elements.map(_._1.toString)
val nodeChildren = elements.map(_._2)
override protected[this] def nodeRebuild(ch: IndexedSeq[Node]) =
new StructNode(elements.zip(ch).map{ case ((s,_),n) => (s,n) })
override def hashCode() = elements.hashCode()
override def equals(o: Any) = o match {
case s: StructNode => elements == s.elements
case _ => false
}
def nodeGenerators = elements
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]): Node =
copy(elements = (elements, gen).zipped.map((e, s) => (s, e._2)))
override def withComputedTypeNoRec: StructNode = nodeBuildTypedNode(this, buildType)
override protected def buildType: Type = StructType(elements.map { case (s, n) =>
val t = n.nodeType
if(t == UnassignedType) throw new SlickException(s"StructNode child $s has UnassignedType")
(s, t)
})
}
/** A literal value expression. */
trait LiteralNode extends NullaryNode with TypedNode {
type Self = LiteralNode
def value: Any
/** Indicates whether this value should be considered volatile, i.e. it
* contains user-generated data or may change in future executions of what
* is otherwise the same query. A database back-end should usually turn
* volatile constants into bind variables. */
def volatileHint: Boolean
}
object LiteralNode {
def apply(tp: Type, v: Any, vol: Boolean = false): LiteralNode = new LiteralNode {
val value = v
val tpe = tp
def nodeRebuild = apply(tp, v, vol)
def volatileHint = vol
override def toString = s"LiteralNode $value (volatileHint=$volatileHint)"
}
def apply[T](v: T)(implicit tp: ScalaBaseType[T]): LiteralNode = apply(tp, v)
def unapply(n: LiteralNode): Option[Any] = Some(n.value)
}
trait BinaryNode extends Node {
def left: Node
def right: Node
lazy val nodeChildren = Seq(left, right)
protected[this] final def nodeRebuild(ch: IndexedSeq[Node]): Self = nodeRebuild(ch(0), ch(1))
protected[this] def nodeRebuild(left: Node, right: Node): Self
}
trait UnaryNode extends Node {
def child: Node
lazy val nodeChildren = Seq(child)
protected[this] final def nodeRebuild(ch: IndexedSeq[Node]): Self = nodeRebuild(ch(0))
protected[this] def nodeRebuild(child: Node): Self
}
trait NullaryNode extends Node {
val nodeChildren = Nil
protected[this] final def nodeRebuild(ch: IndexedSeq[Node]): Self = nodeRebuild
protected[this] def nodeRebuild: Self
}
/** An expression that represents a plain value lifted into a Query. */
final case class Pure(value: Node, identity: TypeSymbol = new AnonTypeSymbol) extends UnaryNode with SimplyTypedNode {
type Self = Pure
def child = value
override def nodeChildNames = Seq("value")
protected[this] def nodeRebuild(child: Node) = copy(child)
def withComputedTypeNoRec: Self = nodeBuildTypedNode(this, buildType)
protected def buildType =
CollectionType(CollectionTypeConstructor.default,
NominalType(identity)(value.nodeType))
}
/** Common superclass for expressions of type
* (CollectionType(c, t), _) => CollectionType(c, t). */
abstract class FilteredQuery extends Node {
protected[this] def generator: Symbol
def from: Node
def nodeGenerators = Seq((generator, from))
override def toString = this match {
case p: Product =>
val n = getClass.getName.replaceFirst(".*\\\\.", "").replaceFirst(".*\\\\$", "")
val args = p.productIterator.filterNot(n => n.isInstanceOf[Node] || n.isInstanceOf[Symbol]).mkString(", ")
if(args.isEmpty) n else (n + ' ' + args)
case _ => super.toString
}
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val from2 = from.nodeWithComputedType(scope, typeChildren, retype)
val genScope = scope + (generator -> from2.nodeType.asCollectionType.elementType)
val ch2: IndexedSeq[Node] = nodeChildren.map { ch =>
if(ch eq from) from2 else ch.nodeWithComputedType(genScope, typeChildren, retype)
}(collection.breakOut)
nodeRebuildOrThis(ch2).nodeTypedOrCopy(if(!nodeHasType || retype) ch2.head.nodeType else nodeType).asInstanceOf[Self]
}
}
/** A .filter call of type
* (CollectionType(c, t), Boolean) => CollectionType(c, t). */
final case class Filter(generator: Symbol, from: Node, where: Node) extends FilteredQuery with BinaryNode with DefNode {
type Self = Filter
def left = from
def right = where
override def nodeChildNames = Seq("from "+generator, "where")
protected[this] def nodeRebuild(left: Node, right: Node) = copy(from = left, where = right)
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(generator = gen(0))
}
object Filter {
def ifRefutable(generator: Symbol, from: Node, where: Node): Node =
if(where match { case LiteralNode(true) => true; case _ => false }) from
else Filter(generator, from, where)
}
/** A .sortBy call of type
* (CollectionType(c, t), _) => CollectionType(c, t). */
final case class SortBy(generator: Symbol, from: Node, by: Seq[(Node, Ordering)]) extends FilteredQuery with DefNode {
type Self = SortBy
lazy val nodeChildren = from +: by.map(_._1)
protected[this] def nodeRebuild(ch: IndexedSeq[Node]) =
copy(from = ch(0), by = by.zip(ch.tail).map{ case ((_, o), n) => (n, o) })
override def nodeChildNames = ("from "+generator) +: by.zipWithIndex.map("by" + _._2)
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(generator = gen(0))
override def toString = "SortBy " + by.map(_._2).mkString(", ")
}
final case class Ordering(direction: Ordering.Direction = Ordering.Asc, nulls: Ordering.NullOrdering = Ordering.NullsDefault) {
def asc = copy(direction = Ordering.Asc)
def desc = copy(direction = Ordering.Desc)
def reverse = copy(direction = direction.reverse)
def nullsDefault = copy(nulls = Ordering.NullsDefault)
def nullsFirst = copy(nulls = Ordering.NullsFirst)
def nullsLast = copy(nulls = Ordering.NullsLast)
}
object Ordering {
sealed abstract class NullOrdering(val first: Boolean, val last: Boolean)
final case object NullsDefault extends NullOrdering(false, false)
final case object NullsFirst extends NullOrdering(true, false)
final case object NullsLast extends NullOrdering(false, true)
sealed abstract class Direction(val desc: Boolean) { def reverse: Direction }
final case object Asc extends Direction(false) { def reverse = Desc }
final case object Desc extends Direction(true) { def reverse = Asc }
}
/** A .groupBy call. */
final case class GroupBy(fromGen: Symbol, from: Node, by: Node) extends BinaryNode with DefNode {
type Self = GroupBy
def left = from
def right = by
override def nodeChildNames = Seq("from "+fromGen, "by")
protected[this] def nodeRebuild(left: Node, right: Node) = copy(from = left, by = right)
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(fromGen = gen(0))
def nodeGenerators = Seq((fromGen, from))
override def toString = "GroupBy"
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val from2 = from.nodeWithComputedType(scope, typeChildren, retype)
val from2Type = from2.nodeType.asCollectionType
val by2 = by.nodeWithComputedType(scope + (fromGen -> from2Type.elementType), typeChildren, retype)
nodeRebuildOrThis(Vector(from2, by2)).nodeTypedOrCopy(
if(!nodeHasType || retype)
CollectionType(from2Type.cons, ProductType(IndexedSeq(by2.nodeType, CollectionType(CollectionTypeConstructor.default, from2Type.elementType))))
else nodeType)
}
}
/** A .take call. */
final case class Take(from: Node, num: Int) extends FilteredQuery with UnaryNode {
type Self = Take
def child = from
protected[this] val generator = new AnonSymbol
override def nodeChildNames = Seq("from")
protected[this] def nodeRebuild(child: Node) = copy(from = child)
}
/** A .drop call. */
final case class Drop(from: Node, num: Int) extends FilteredQuery with UnaryNode {
type Self = Drop
def child = from
protected[this] val generator = new AnonSymbol
override def nodeChildNames = Seq("from")
protected[this] def nodeRebuild(child: Node) = copy(from = child)
}
/** A join expression of type
* (CollectionType(c, t), CollectionType(_, u)) => CollecionType(c, (t, u)). */
final case class Join(leftGen: Symbol, rightGen: Symbol, left: Node, right: Node, jt: JoinType, on: Node) extends DefNode {
type Self = Join
lazy val nodeChildren = IndexedSeq(left, right, on)
protected[this] def nodeRebuild(ch: IndexedSeq[Node]) = copy(left = ch(0), right = ch(1), on = ch(2))
override def nodeChildNames = Seq("left "+leftGen, "right "+rightGen, "on")
override def toString = "Join " + jt.sqlName
def nodeGenerators = Seq((leftGen, left), (rightGen, right))
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) =
copy(leftGen = gen(0), rightGen = gen(1))
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val left2 = left.nodeWithComputedType(scope, typeChildren, retype)
val right2 = right.nodeWithComputedType(scope, typeChildren, retype)
val left2Type = left2.nodeType.asCollectionType
val right2Type = right2.nodeType.asCollectionType
val on2 = on.nodeWithComputedType(scope + (leftGen -> left2Type.elementType) + (rightGen -> right2Type.elementType), typeChildren, retype)
nodeRebuildOrThis(Vector(left2, right2, on2)).nodeTypedOrCopy(
if(!nodeHasType || retype)
CollectionType(left2Type.cons, ProductType(IndexedSeq(left2Type.elementType, right2Type.elementType)))
else nodeType)
}
}
/** A union of type
* (CollectionType(c, t), CollectionType(_, t)) => CollectionType(c, t). */
final case class Union(left: Node, right: Node, all: Boolean, leftGen: Symbol = new AnonSymbol, rightGen: Symbol = new AnonSymbol) extends BinaryNode with DefNode with SimplyTypedNode {
type Self = Union
protected[this] def nodeRebuild(left: Node, right: Node) = copy(left = left, right = right)
override def toString = if(all) "Union all" else "Union"
override def nodeChildNames = Seq("left "+leftGen, "right "+rightGen)
def nodeGenerators = Seq((leftGen, left), (rightGen, right))
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(leftGen = gen(0), rightGen = gen(1))
protected def buildType = left.nodeType
}
/** A .flatMap call of type
* (CollectionType(c, _), CollectionType(_, u)) => CollectionType(c, u). */
final case class Bind(generator: Symbol, from: Node, select: Node) extends BinaryNode with DefNode {
type Self = Bind
def left = from
def right = select
override def nodeChildNames = Seq("from "+generator, "select")
protected[this] def nodeRebuild(left: Node, right: Node) = copy(from = left, select = right)
def nodeGenerators = Seq((generator, from))
override def toString = "Bind"
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(generator = gen(0))
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val from2 = from.nodeWithComputedType(scope, typeChildren, retype)
val from2Type = from2.nodeType.asCollectionType
val select2 = select.nodeWithComputedType(scope + (generator -> from2Type.elementType), typeChildren, retype)
nodeRebuildOrThis(Vector(from2, select2)).nodeTypedOrCopy(
if(!nodeHasType || retype)
CollectionType(from2Type.cons, select2.nodeType.asCollectionType.elementType)
else nodeType)
}
}
/** A table expansion. In phase expandTables, all tables are replaced by
* TableExpansions to capture the dual nature of tables as as single entity
* and a structure of columns. TableExpansions are removed again in phase
* rewritePaths. */
final case class TableExpansion(generator: Symbol, table: Node, columns: Node) extends BinaryNode with DefNode {
type Self = TableExpansion
def left = table
def right = columns
override def nodeChildNames = Seq("table "+generator, "columns")
protected[this] def nodeRebuild(left: Node, right: Node) = copy(table = left, columns = right)
def nodeGenerators = Seq((generator, table))
override def toString = "TableExpansion"
protected[this] def nodeRebuildWithGenerators(gen: IndexedSeq[Symbol]) = copy(generator = gen(0))
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self = {
val table2 = table.nodeWithComputedType(scope, typeChildren, retype)
val columns2 = columns.nodeWithComputedType(scope + (generator -> table2.nodeType.asCollectionType.elementType), typeChildren, retype)
nodeRebuildOrThis(Vector(table2, columns2)).nodeTypedOrCopy(if(!nodeHasType || retype) table2.nodeType else nodeType)
}
}
/** An expression that selects a field in another expression. */
final case class Select(in: Node, field: Symbol) extends UnaryNode with SimplyTypedNode {
type Self = Select
def child = in
override def nodeChildNames = Seq("in")
protected[this] def nodeRebuild(child: Node) = copy(in = child).nodeTyped(nodeType)
override def toString = Path.unapply(this) match {
case Some(l) => Path.toString(l)
case None => super.toString
}
protected def buildType = in.nodeType.select(field)
}
/** A function call expression. */
final case class Apply(sym: Symbol, children: Seq[Node])(val tpe: Type) extends TypedNode {
type Self = Apply
def nodeChildren = children
protected[this] def nodeRebuild(ch: IndexedSeq[scala.slick.ast.Node]) = copy(children = ch)(tpe)
override def toString = "Apply "+sym
}
/** A reference to a Symbol */
final case class Ref(sym: Symbol) extends NullaryNode {
type Self = Ref
def nodeWithComputedType2(scope: SymbolScope, typeChildren: Boolean, retype: Boolean): Self =
if(nodeHasType && !retype) this else {
scope.get(sym) match {
case Some(t) => if(t == nodeType) this else copy().nodeTyped(t)
case _ => throw new SlickException("No type for symbol "+sym+" found for "+this)
}
}
def nodeRebuild = copy()
}
/** A constructor/extractor for nested Selects starting at a Ref. */
object Path {
def apply(l: List[Symbol]): Node = l match {
case s :: Nil => Ref(s)
case s :: l => Select(apply(l), s)
}
def unapply(n: Node): Option[List[Symbol]] = n match {
case Ref(sym) => Some(List(sym))
case Select(in, s) => unapply(in).map(l => s :: l)
case _ => None
}
def toString(path: Seq[Symbol]): String = path.reverseIterator.mkString("Path ", ".", "")
def toString(s: Select): String = s match {
case Path(syms) => toString(syms)
case n => n.toString
}
}
object FwdPath {
def apply(ch: List[Symbol]) = Path(ch.reverse)
def unapply(n: Node): Option[List[Symbol]] = Path.unapply(n).map(_.reverse)
def toString(path: Seq[Symbol]): String = path.mkString("Path ", ".", "")
}
/** A Node representing a database table. */
final case class TableNode(schemaName: Option[String], tableName: String, identity: TableIdentitySymbol, driverTable: Any) extends NullaryNode with TypedNode {
type Self = TableNode
def tpe = CollectionType(CollectionTypeConstructor.default, NominalType(identity)(NoType))
def nodeRebuild = copy()
override def toString = "Table " + tableName
}
/** A node that represents an SQL sequence. */
final case class SequenceNode(name: String)(val increment: Long) extends NullaryNode with TypedNode {
type Self = SequenceNode
def tpe = ScalaBaseType.longType
def nodeRebuild = copy()(increment)
}
/** A Query of this special Node represents an infinite stream of consecutive
* numbers starting at the given number. This is used as an operand for
* zipWithIndex. It is not exposed directly in the query language because it
* cannot be represented in SQL outside of a 'zip' operation. */
final case class RangeFrom(start: Long = 1L) extends NullaryNode with TypedNode {
type Self = RangeFrom
def tpe = CollectionType(CollectionTypeConstructor.default, ScalaBaseType.longType)
def nodeRebuild = copy()
}
/** An if-then part of a Conditional node */
final case class IfThen(val left: Node, val right: Node) extends BinaryNode with SimplyTypedNode {
type Self = IfThen
protected[this] def nodeRebuild(left: Node, right: Node): Self = copy(left = left, right = right)
protected def buildType = right.nodeType
}
/** A conditional expression; all clauses should be IfThen nodes */
final case class ConditionalExpr(val clauses: IndexedSeq[Node], val elseClause: Node) extends SimplyTypedNode {
type Self = ConditionalExpr
val nodeChildren = elseClause +: clauses
override def nodeChildNames = "else" +: (1 to clauses.length).map(_.toString)
protected[this] def nodeRebuild(ch: IndexedSeq[Node]): Self =
copy(clauses = ch.tail, elseClause = ch.head)
protected def buildType = {
val isNullable = nodeChildren.exists(ch =>
ch.nodeType.isInstanceOf[OptionType] || ch.nodeType == ScalaBaseType.nullType)
val base = clauses.head.nodeType
if(isNullable && !base.isInstanceOf[OptionType]) OptionType(base) else base
}
override def toString = "ConditionalExpr"
}
final case class OptionApply(val child: Node) extends UnaryNode with SimplyTypedNode {
type Self = OptionApply
protected[this] def nodeRebuild(ch: Node) = copy(child = ch)
protected def buildType = OptionType(nodeChildren.head.nodeType)
}
final case class GetOrElse(val child: Node, val default: () => Any) extends UnaryNode with SimplyTypedNode {
type Self = GetOrElse
protected[this] def nodeRebuild(ch: Node) = copy(child = ch)
protected def buildType = nodeChildren.head.nodeType.asOptionType.elementType
}
/** A compiled statement with a fixed type, a statement string and
* driver-specific extra data. */
final case class CompiledStatement(statement: String, extra: Any, tpe: Type) extends NullaryNode with TypedNode {
type Self = CompiledStatement
def nodeRebuild = copy()
override def toString = "CompiledStatement \\"" + statement + "\\""
}
/** A client-side type mapping */
final case class TypeMapping(val child: Node, val toBase: Any => Any, val toMapped: Any => Any) extends UnaryNode with SimplyTypedNode { self =>
type Self = TypeMapping
def nodeRebuild(ch: Node) = copy(child = ch)
override def toString = "TypeMapping"
protected def buildType = new MappedScalaType(child.nodeType, toBase, toMapped)
}
/** A parameter from a QueryTemplate which gets turned into a bind variable. */
final case class QueryParameter(extractor: (Any => Any), val tpe: Type) extends NullaryNode with TypedNode {
type Self = QueryParameter
def nodeRebuild = copy()
override def toString = "QueryParameter"
}
|
retronym/slick
|
src/main/scala/scala/slick/ast/Node.scala
|
Scala
|
bsd-2-clause
| 26,270 |
package im.actor.server.api.frontend
import akka.actor._
import akka.event.Logging
import akka.stream.Materializer
import akka.stream.scaladsl._
import slick.driver.PostgresDriver.api.Database
import im.actor.server.session.SessionRegion
import im.actor.server.tls.{ Tls, TlsContext }
object TcpFrontend extends Frontend {
override protected val connIdPrefix = "tcp"
def start(host: String, port: Int, tlsContext: Option[TlsContext])(
implicit
sessionRegion: SessionRegion,
db: Database,
system: ActorSystem,
mat: Materializer
): Unit = {
val log = Logging.getLogger(system, this)
Tcp().bind(host, port)
.to(Sink.foreach {
case (Tcp.IncomingConnection(localAddress, remoteAddress, flow)) ⇒
log.debug("New TCP connection from {}", localAddress)
val mtProto = MTProtoBlueprint(nextConnId())
val connFlow = tlsContext map (Tls.connection(_, flow)) getOrElse (flow) join mtProto
connFlow.run()
})
.run()
}
}
|
boneyao/actor-platform
|
actor-server/actor-frontend/src/main/scala/im/actor/server/api/frontend/TcpFrontend.scala
|
Scala
|
mit
| 1,043 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.edu.grade.course.domain
import org.openurp.base.edu.model.Course
import org.openurp.base.edu.model.Student
import org.openurp.edu.grade.course.model.CourseGrade
import org.openurp.edu.grade.course.model.StdGpa
import org.openurp.edu.grade.course.model.StdSemesterGpa
import org.openurp.edu.grade.course.model.StdYearGpa
import org.openurp.base.edu.model.Semester
import scala.collection.mutable.Buffer
import org.beangle.commons.collection.Collections
import java.time.Instant
class DefaultGpaPolicy extends GpaPolicy {
var precision = 2
var rounder: NumRounder = NumRounder.Normal
def calcGa(grades: Iterable[CourseGrade]): Float = {
rounder.round(WeightedMean.calcGa(grades), precision)
}
def calcGpa(grades: Iterable[CourseGrade]): Float = {
rounder.round(WeightedMean.calcGpa(grades), precision)
}
def calc(std: Student, grades: Iterable[CourseGrade], statDetail: Boolean): StdGpa = {
val stdGpa = new StdGpa(std)
if (statDetail) {
val gradesMap = Collections.newMap[Semester, Buffer[CourseGrade]]
for (grade <- grades) {
val semesterGrades = gradesMap.getOrElseUpdate(grade.semester, Collections.newBuffer)
semesterGrades += grade
}
val yearGradeMap = Collections.newMap[String, Buffer[CourseGrade]]
gradesMap foreach {
case (semester, semesterGrades) =>
val stdTermGpa = new StdSemesterGpa()
stdTermGpa.semester = semester
stdGpa.add(stdTermGpa)
val yearGrades = yearGradeMap.getOrElseUpdate(semester.schoolYear, Collections.newBuffer)
yearGrades ++= semesterGrades
stdTermGpa.gpa = this.calcGpa(semesterGrades)
stdTermGpa.ga = this.calcGa(semesterGrades)
stdTermGpa.gradeCount = semesterGrades.size
val stats = statCredits(semesterGrades)
stdTermGpa.totalCredits = stats(0)
stdTermGpa.credits = stats(1)
}
yearGradeMap foreach {
case (year, yearGrades) =>
val stdYearGpa = new StdYearGpa()
stdYearGpa.schoolYear = year
stdGpa.add(stdYearGpa)
stdYearGpa.gpa = this.calcGpa(yearGrades)
stdYearGpa.ga = this.calcGa(yearGrades)
stdYearGpa.gradeCount = yearGrades.size
val stats = statCredits(yearGrades)
stdYearGpa.totalCredits = stats(0)
stdYearGpa.credits = stats(1)
}
}
stdGpa.gpa = this.calcGpa(grades)
stdGpa.ga = this.calcGa(grades)
val courseMap = Collections.newMap[Course, CourseGrade]
for (grade <- grades) {
val add = courseMap.get(grade.course) match {
case None => true
case Some(exist) => !exist.passed
}
if (add) {
courseMap.put(grade.course, grade)
}
}
stdGpa.gradeCount = courseMap.size
val totalStats = statCredits(courseMap.values)
stdGpa.totalCredits = totalStats(0)
stdGpa.credits = totalStats(1)
stdGpa.updatedAt = Instant.now
stdGpa
}
private def statCredits(grades: Iterable[CourseGrade]): Array[Float] = {
var credits = 0f
var all = 0f
for (grade <- grades) {
if (grade.passed) credits += grade.course.credits
all += grade.course.credits
}
Array(all, credits)
}
}
|
openurp/api
|
edu/src/main/scala/org/openurp/edu/grade/course/domain/DefaultGpaPolicy.scala
|
Scala
|
lgpl-3.0
| 3,995 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.db.transport
import org.beangle.commons.io.IOs
import org.beangle.commons.lang.Strings
import org.beangle.commons.lang.time.Stopwatch
import org.beangle.commons.logging.Logging
import org.beangle.data.jdbc.query.JdbcExecutor
import java.io.{File, FileInputStream}
import javax.sql.DataSource
trait Action {
def process(): Boolean
}
case class ActionConfig(category: String, properties: Map[String, String])
class SqlAction(val dataSource: DataSource, fileName: String) extends Action with Logging {
require(new File(fileName).exists(), "sql file:" + fileName + " doesnot exists")
def process(): Boolean = {
val executor = new JdbcExecutor(dataSource)
executeFile(executor)
}
def executeFile(executor: JdbcExecutor): Boolean = {
logger.info("execute sql scripts " + fileName)
readSql(fileName) foreach { s =>
if (s.startsWith("--")) {
var comment = Strings.substringBefore(s, "\\n")
comment = Strings.replace(comment, "--", "")
var statement = Strings.substringAfter(s, "\\n").trim()
statement = Strings.replace(statement, "\\n", " ")
val sw = new Stopwatch(true)
val rs = executor.update(statement)
logger.info(comment + s"${rs}条 用时 ${sw}")
} else if (Strings.isNotBlank(s)) {
executor.update(s)
}
}
true
}
def readSql(name: String): Seq[String] = {
val content = IOs.readString(new FileInputStream(new File(name)))
val statements = Strings.split(content, ";")
statements.map(x => x.replace('\\r', '\\n').trim).toList
}
}
|
beangle/db
|
transport/src/main/scala/org/beangle/db/transport/Action.scala
|
Scala
|
gpl-3.0
| 2,307 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.mesos
import java.nio.ByteBuffer
import java.util.Arrays
import java.util.Collection
import java.util.Collections
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.immutable
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.mesos.{Protos, Scheduler, SchedulerDriver}
import org.apache.mesos.Protos._
import org.apache.mesos.Protos.Value.Scalar
import org.mockito.ArgumentCaptor
import org.mockito.ArgumentMatchers.{any, anyLong, eq => meq}
import org.mockito.Mockito._
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext,
SparkFunSuite}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.executor.MesosExecutorBackend
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.scheduler.{LiveListenerBus, SparkListenerExecutorAdded,
TaskDescription, TaskSchedulerImpl, WorkerOffer}
import org.apache.spark.scheduler.cluster.ExecutorInfo
class MesosFineGrainedSchedulerBackendSuite
extends SparkFunSuite with LocalSparkContext with MockitoSugar {
test("weburi is set in created scheduler driver") {
val conf = new SparkConf
conf.set(DRIVER_WEBUI_URL, "http://webui")
conf.set("spark.app.name", "name1")
val sc = mock[SparkContext]
when(sc.conf).thenReturn(conf)
when(sc.sparkUser).thenReturn("sparkUser1")
when(sc.appName).thenReturn("appName1")
val taskScheduler = mock[TaskSchedulerImpl]
val driver = mock[SchedulerDriver]
when(driver.start()).thenReturn(Protos.Status.DRIVER_RUNNING)
val backend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master") {
override protected def createSchedulerDriver(
masterUrl: String,
scheduler: Scheduler,
sparkUser: String,
appName: String,
conf: SparkConf,
webuiUrl: Option[String] = None,
checkpoint: Option[Boolean] = None,
failoverTimeout: Option[Double] = None,
frameworkId: Option[String] = None): SchedulerDriver = {
markRegistered()
assert(webuiUrl.isDefined)
assert(webuiUrl.get.equals("http://webui"))
driver
}
}
backend.start()
}
test("Use configured mesosExecutor.cores for ExecutorInfo") {
val mesosExecutorCores = 3.0
val conf = new SparkConf
conf.set(EXECUTOR_CORES, mesosExecutorCores)
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1",
new ExecutorInfo("host1", 2, Map.empty, Map.empty)))
val sc = mock[SparkContext]
when(sc.getSparkHome()).thenReturn(Option("/spark-home"))
when(sc.conf).thenReturn(conf)
when(sc.executorEnvs).thenReturn(new mutable.HashMap[String, String])
when(sc.executorMemory).thenReturn(100)
when(sc.listenerBus).thenReturn(listenerBus)
val taskScheduler = mock[TaskSchedulerImpl]
when(taskScheduler.CPUS_PER_TASK).thenReturn(2)
val mesosSchedulerBackend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master")
val resources = Arrays.asList(
mesosSchedulerBackend.createResource("cpus", 4),
mesosSchedulerBackend.createResource("mem", 1024))
// uri is null.
val (executorInfo, _) = mesosSchedulerBackend.createExecutorInfo(resources, "test-id")
val executorResources = executorInfo.getResourcesList
val cpus = executorResources.asScala.find(_.getName == "cpus").get.getScalar.getValue
assert(cpus === mesosExecutorCores)
}
test("check spark-class location correctly") {
val conf = new SparkConf
conf.set(EXECUTOR_HOME, "/mesos-home")
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1",
new ExecutorInfo("host1", 2, Map.empty, Map.empty)))
val sc = mock[SparkContext]
when(sc.getSparkHome()).thenReturn(Option("/spark-home"))
when(sc.conf).thenReturn(conf)
when(sc.executorEnvs).thenReturn(new mutable.HashMap[String, String])
when(sc.executorMemory).thenReturn(100)
when(sc.listenerBus).thenReturn(listenerBus)
val taskScheduler = mock[TaskSchedulerImpl]
when(taskScheduler.CPUS_PER_TASK).thenReturn(2)
val mesosSchedulerBackend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master")
val resources = Arrays.asList(
mesosSchedulerBackend.createResource("cpus", 4),
mesosSchedulerBackend.createResource("mem", 1024))
// uri is null.
val (executorInfo, _) = mesosSchedulerBackend.createExecutorInfo(resources, "test-id")
assert(executorInfo.getCommand.getValue ===
s" /mesos-home/bin/spark-class ${classOf[MesosExecutorBackend].getName}")
// uri exists.
conf.set(EXECUTOR_URI, "hdfs:///test-app-1.0.0.tgz")
val (executorInfo1, _) = mesosSchedulerBackend.createExecutorInfo(resources, "test-id")
assert(executorInfo1.getCommand.getValue ===
s"cd test-app-1*; ./bin/spark-class ${classOf[MesosExecutorBackend].getName}")
}
test("spark docker properties correctly populate the DockerInfo message") {
val taskScheduler = mock[TaskSchedulerImpl]
val conf = new SparkConf()
.set(EXECUTOR_DOCKER_IMAGE, "spark/mock")
.set(EXECUTOR_DOCKER_FORCE_PULL_IMAGE, true)
.set(EXECUTOR_DOCKER_VOLUMES, Seq("/a", "/b:/b", "/c:/c:rw", "/d:ro", "/e:/e:ro"))
.set(EXECUTOR_DOCKER_PORT_MAPS, Seq("80:8080", "53:53:tcp"))
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1",
new ExecutorInfo("host1", 2, Map.empty, Map.empty)))
val sc = mock[SparkContext]
when(sc.executorMemory).thenReturn(100)
when(sc.getSparkHome()).thenReturn(Option("/spark-home"))
when(sc.executorEnvs).thenReturn(new mutable.HashMap[String, String])
when(sc.conf).thenReturn(conf)
when(sc.listenerBus).thenReturn(listenerBus)
val backend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master")
val (execInfo, _) = backend.createExecutorInfo(
Arrays.asList(backend.createResource("cpus", 4)), "mockExecutor")
assert(execInfo.getContainer.getDocker.getImage.equals("spark/mock"))
assert(execInfo.getContainer.getDocker.getForcePullImage)
val portmaps = execInfo.getContainer.getDocker.getPortMappingsList
assert(portmaps.get(0).getHostPort.equals(80))
assert(portmaps.get(0).getContainerPort.equals(8080))
assert(portmaps.get(0).getProtocol.equals("tcp"))
assert(portmaps.get(1).getHostPort.equals(53))
assert(portmaps.get(1).getContainerPort.equals(53))
assert(portmaps.get(1).getProtocol.equals("tcp"))
val volumes = execInfo.getContainer.getVolumesList
assert(volumes.get(0).getContainerPath.equals("/a"))
assert(volumes.get(0).getMode.equals(Volume.Mode.RW))
assert(volumes.get(1).getContainerPath.equals("/b"))
assert(volumes.get(1).getHostPath.equals("/b"))
assert(volumes.get(1).getMode.equals(Volume.Mode.RW))
assert(volumes.get(2).getContainerPath.equals("/c"))
assert(volumes.get(2).getHostPath.equals("/c"))
assert(volumes.get(2).getMode.equals(Volume.Mode.RW))
assert(volumes.get(3).getContainerPath.equals("/d"))
assert(volumes.get(3).getMode.equals(Volume.Mode.RO))
assert(volumes.get(4).getContainerPath.equals("/e"))
assert(volumes.get(4).getHostPath.equals("/e"))
assert(volumes.get(4).getMode.equals(Volume.Mode.RO))
}
test("mesos resource offers result in launching tasks") {
def createOffer(id: Int, mem: Int, cpu: Int): Offer = {
val builder = Offer.newBuilder()
builder.addResourcesBuilder()
.setName("mem")
.setType(Value.Type.SCALAR)
.setScalar(Scalar.newBuilder().setValue(mem))
builder.addResourcesBuilder()
.setName("cpus")
.setType(Value.Type.SCALAR)
.setScalar(Scalar.newBuilder().setValue(cpu))
builder.setId(OfferID.newBuilder().setValue(s"o${id.toString}").build())
.setFrameworkId(FrameworkID.newBuilder().setValue("f1"))
.setSlaveId(SlaveID.newBuilder().setValue(s"s${id.toString}"))
.setHostname(s"host${id.toString}").build()
}
val driver = mock[SchedulerDriver]
val taskScheduler = mock[TaskSchedulerImpl]
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1",
new ExecutorInfo("host1", 2, Map.empty, Map.empty)))
val sc = mock[SparkContext]
when(sc.executorMemory).thenReturn(100)
when(sc.getSparkHome()).thenReturn(Option("/path"))
when(sc.executorEnvs).thenReturn(new mutable.HashMap[String, String])
when(sc.conf).thenReturn(new SparkConf)
when(sc.listenerBus).thenReturn(listenerBus)
val backend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master")
val minMem = backend.executorMemory(sc)
val minCpu = 4
val mesosOffers = new java.util.ArrayList[Offer]
mesosOffers.add(createOffer(1, minMem, minCpu))
mesosOffers.add(createOffer(2, minMem - 1, minCpu))
mesosOffers.add(createOffer(3, minMem, minCpu))
val expectedWorkerOffers = new ArrayBuffer[WorkerOffer](2)
expectedWorkerOffers += new WorkerOffer(
mesosOffers.get(0).getSlaveId.getValue,
mesosOffers.get(0).getHostname,
(minCpu - backend.mesosExecutorCores).toInt
)
expectedWorkerOffers += new WorkerOffer(
mesosOffers.get(2).getSlaveId.getValue,
mesosOffers.get(2).getHostname,
(minCpu - backend.mesosExecutorCores).toInt
)
val taskDesc = new TaskDescription(
taskId = 1L,
attemptNumber = 0,
executorId = "s1",
name = "n1",
index = 0,
partitionId = 0,
addedFiles = mutable.Map.empty[String, Long],
addedJars = mutable.Map.empty[String, Long],
addedArchives = mutable.Map.empty[String, Long],
properties = new Properties(),
resources = immutable.Map.empty[String, ResourceInformation],
ByteBuffer.wrap(new Array[Byte](0)))
when(taskScheduler.resourceOffers(
expectedWorkerOffers.toIndexedSeq)).thenReturn(Seq(Seq(taskDesc)))
when(taskScheduler.CPUS_PER_TASK).thenReturn(2)
val capture = ArgumentCaptor.forClass(classOf[Collection[TaskInfo]])
when(
driver.launchTasks(
meq(Collections.singleton(mesosOffers.get(0).getId)),
capture.capture(),
any(classOf[Filters])
)
).thenReturn(Status.valueOf(1))
when(driver.declineOffer(mesosOffers.get(1).getId)).thenReturn(Status.valueOf(1))
when(driver.declineOffer(mesosOffers.get(2).getId)).thenReturn(Status.valueOf(1))
backend.resourceOffers(driver, mesosOffers)
verify(driver, times(1)).launchTasks(
meq(Collections.singleton(mesosOffers.get(0).getId)),
capture.capture(),
any(classOf[Filters])
)
verify(driver, times(1)).declineOffer(mesosOffers.get(1).getId)
verify(driver, times(1)).declineOffer(mesosOffers.get(2).getId)
assert(capture.getValue.size() === 1)
val taskInfo = capture.getValue.iterator().next()
assert(taskInfo.getName.equals("n1"))
val cpus = taskInfo.getResourcesList.get(0)
assert(cpus.getName.equals("cpus"))
assert(cpus.getScalar.getValue.equals(2.0))
assert(taskInfo.getSlaveId.getValue.equals("s1"))
// Unwanted resources offered on an existing node. Make sure they are declined
val mesosOffers2 = new java.util.ArrayList[Offer]
mesosOffers2.add(createOffer(1, minMem, minCpu))
reset(taskScheduler)
reset(driver)
when(taskScheduler.resourceOffers(any(classOf[IndexedSeq[WorkerOffer]]), any[Boolean]))
.thenReturn(Seq(Seq()))
when(taskScheduler.CPUS_PER_TASK).thenReturn(2)
when(driver.declineOffer(mesosOffers2.get(0).getId)).thenReturn(Status.valueOf(1))
backend.resourceOffers(driver, mesosOffers2)
verify(driver, times(1)).declineOffer(mesosOffers2.get(0).getId)
}
test("can handle multiple roles") {
val driver = mock[SchedulerDriver]
val taskScheduler = mock[TaskSchedulerImpl]
val listenerBus = mock[LiveListenerBus]
listenerBus.post(
SparkListenerExecutorAdded(anyLong, "s1",
new ExecutorInfo("host1", 2, Map.empty, Map.empty)))
val sc = mock[SparkContext]
when(sc.executorMemory).thenReturn(100)
when(sc.getSparkHome()).thenReturn(Option("/path"))
when(sc.executorEnvs).thenReturn(new mutable.HashMap[String, String])
when(sc.conf).thenReturn(new SparkConf)
when(sc.listenerBus).thenReturn(listenerBus)
val id = 1
val builder = Offer.newBuilder()
builder.addResourcesBuilder()
.setName("mem")
.setType(Value.Type.SCALAR)
.setRole("prod")
.setScalar(Scalar.newBuilder().setValue(500))
builder.addResourcesBuilder()
.setName("cpus")
.setRole("prod")
.setType(Value.Type.SCALAR)
.setScalar(Scalar.newBuilder().setValue(1))
builder.addResourcesBuilder()
.setName("mem")
.setRole("dev")
.setType(Value.Type.SCALAR)
.setScalar(Scalar.newBuilder().setValue(600))
builder.addResourcesBuilder()
.setName("cpus")
.setRole("dev")
.setType(Value.Type.SCALAR)
.setScalar(Scalar.newBuilder().setValue(2))
val offer = builder.setId(OfferID.newBuilder().setValue(s"o${id.toString}").build())
.setFrameworkId(FrameworkID.newBuilder().setValue("f1"))
.setSlaveId(SlaveID.newBuilder().setValue(s"s${id.toString}"))
.setHostname(s"host${id.toString}").build()
val mesosOffers = new java.util.ArrayList[Offer]
mesosOffers.add(offer)
val backend = new MesosFineGrainedSchedulerBackend(taskScheduler, sc, "master")
val expectedWorkerOffers = new ArrayBuffer[WorkerOffer](1)
expectedWorkerOffers += new WorkerOffer(
mesosOffers.get(0).getSlaveId.getValue,
mesosOffers.get(0).getHostname,
2 // Deducting 1 for executor
)
val taskDesc = new TaskDescription(
taskId = 1L,
attemptNumber = 0,
executorId = "s1",
name = "n1",
index = 0,
partitionId = 0,
addedFiles = mutable.Map.empty[String, Long],
addedJars = mutable.Map.empty[String, Long],
addedArchives = mutable.Map.empty[String, Long],
properties = new Properties(),
resources = immutable.Map.empty[String, ResourceInformation],
ByteBuffer.wrap(new Array[Byte](0)))
when(taskScheduler.resourceOffers(
expectedWorkerOffers.toIndexedSeq)).thenReturn(Seq(Seq(taskDesc)))
when(taskScheduler.CPUS_PER_TASK).thenReturn(1)
val capture = ArgumentCaptor.forClass(classOf[Collection[TaskInfo]])
when(
driver.launchTasks(
meq(Collections.singleton(mesosOffers.get(0).getId)),
capture.capture(),
any(classOf[Filters])
)
).thenReturn(Status.valueOf(1))
backend.resourceOffers(driver, mesosOffers)
verify(driver, times(1)).launchTasks(
meq(Collections.singleton(mesosOffers.get(0).getId)),
capture.capture(),
any(classOf[Filters])
)
assert(capture.getValue.size() === 1)
val taskInfo = capture.getValue.iterator().next()
assert(taskInfo.getName.equals("n1"))
assert(taskInfo.getResourcesCount === 1)
val cpusDev = taskInfo.getResourcesList.get(0)
assert(cpusDev.getName.equals("cpus"))
assert(cpusDev.getScalar.getValue.equals(1.0))
assert(cpusDev.getRole.equals("dev"))
val executorResources = taskInfo.getExecutor.getResourcesList.asScala
assert(executorResources.exists { r =>
r.getName.equals("mem") && r.getScalar.getValue.equals(484.0) && r.getRole.equals("prod")
})
assert(executorResources.exists { r =>
r.getName.equals("cpus") && r.getScalar.getValue.equals(1.0) && r.getRole.equals("prod")
})
}
}
|
maropu/spark
|
resource-managers/mesos/src/test/scala/org/apache/spark/scheduler/cluster/mesos/MesosFineGrainedSchedulerBackendSuite.scala
|
Scala
|
apache-2.0
| 16,638 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.builder.query.prepared
import com.outworkers.phantom.PhantomSuite
import com.outworkers.phantom.tables.Recipe
import com.outworkers.phantom.dsl._
import com.outworkers.phantom.tables.bugs.VerizonRecord
import com.outworkers.util.samplers._
import scala.concurrent.duration._
class PreparedUpdateQueryTest extends PhantomSuite {
override def beforeAll(): Unit = {
super.beforeAll()
database.recipes.createSchema()
database.verizonSchema.createSchema()
}
it should "execute a prepared update query with a single argument bind" in {
val updated = genOpt[ShortString].map(_.value)
val query = database.recipes.update
.where(_.url eqs ?)
.modify(_.description setTo ?)
.prepare()
val recipe = gen[Recipe]
val chain = for {
_ <- database.recipes.store(recipe).future()
get <- database.recipes.select.where(_.url eqs recipe.url).one()
_ <- query.bind(updated, recipe.url).future()
get2 <- database.recipes.select.where(_.url eqs recipe.url).one()
} yield (get, get2)
whenReady(chain) { case (initial, afterUpdate) =>
initial shouldBe defined
initial.value shouldEqual recipe
afterUpdate shouldBe defined
afterUpdate.value.url shouldEqual recipe.url
afterUpdate.value.props shouldEqual recipe.props
afterUpdate.value.ingredients shouldEqual recipe.ingredients
afterUpdate.value.servings shouldEqual recipe.servings
afterUpdate.value.lastCheckedAt shouldEqual recipe.lastCheckedAt
afterUpdate.value.uid shouldEqual recipe.uid
afterUpdate.value.description shouldEqual updated
}
}
it should "execute an asynchronous prepared update query with a single argument bind" in {
val updated = genOpt[ShortString].map(_.value)
val recipe = gen[Recipe]
val chain = for {
query <- database.recipes.update.where(_.url eqs ?).modify(_.description setTo ?).prepareAsync()
_ <- database.recipes.store(recipe).future()
get <- database.recipes.select.where(_.url eqs recipe.url).one()
_ <- query.bind(updated, recipe.url).future()
get2 <- database.recipes.select.where(_.url eqs recipe.url).one()
} yield (get, get2)
whenReady(chain) { case (initial, afterUpdate) =>
initial shouldBe defined
initial.value shouldEqual recipe
afterUpdate shouldBe defined
afterUpdate.value.url shouldEqual recipe.url
afterUpdate.value.props shouldEqual recipe.props
afterUpdate.value.ingredients shouldEqual recipe.ingredients
afterUpdate.value.servings shouldEqual recipe.servings
afterUpdate.value.lastCheckedAt shouldEqual recipe.lastCheckedAt
afterUpdate.value.uid shouldEqual recipe.uid
afterUpdate.value.description shouldEqual updated
}
}
it should "execute a prepared update query with a three argument bind" in {
val updated = genOpt[ShortString].map(_.value)
val updatedUid = gen[UUID]
val query = database.recipes.update
.where(_.url eqs ?)
.modify(_.description setTo ?)
.and(_.uid setTo ?)
.prepare()
val recipe = gen[Recipe]
val chain = for {
_ <- database.recipes.store(recipe).future()
get <- database.recipes.select.where(_.url eqs recipe.url).one()
_ <- query.bind(updated, updatedUid, recipe.url).future()
get2 <- database.recipes.select.where(_.url eqs recipe.url).one()
} yield (get, get2)
whenReady(chain) { case (initial, afterUpdate) =>
initial shouldBe defined
initial.value shouldEqual recipe
afterUpdate shouldBe defined
afterUpdate.value.url shouldEqual recipe.url
afterUpdate.value.props shouldEqual recipe.props
afterUpdate.value.ingredients shouldEqual recipe.ingredients
afterUpdate.value.servings shouldEqual recipe.servings
afterUpdate.value.lastCheckedAt shouldEqual recipe.lastCheckedAt
afterUpdate.value.uid shouldEqual updatedUid
afterUpdate.value.description shouldEqual updated
}
}
it should "execute an async prepared update query with a three argument bind" in {
val updated = genOpt[ShortString].map(_.value)
val updatedUid = gen[UUID]
val recipe = gen[Recipe]
val chain = for {
query <- database.recipes
.update
.where(_.url eqs ?)
.modify(_.description setTo ?)
.and(_.uid setTo ?)
.prepareAsync()
_ <- database.recipes.store(recipe).future()
get <- database.recipes.select.where(_.url eqs recipe.url).one()
_ <- query.bind(updated, updatedUid, recipe.url).future()
get2 <- database.recipes.select.where(_.url eqs recipe.url).one()
} yield (get, get2)
whenReady(chain) { case (initial, afterUpdate) =>
initial shouldBe defined
initial.value shouldEqual recipe
afterUpdate shouldBe defined
afterUpdate.value.url shouldEqual recipe.url
afterUpdate.value.props shouldEqual recipe.props
afterUpdate.value.ingredients shouldEqual recipe.ingredients
afterUpdate.value.servings shouldEqual recipe.servings
afterUpdate.value.lastCheckedAt shouldEqual recipe.lastCheckedAt
afterUpdate.value.uid shouldEqual updatedUid
afterUpdate.value.description shouldEqual updated
}
}
it should "correctly chain type parameters in conditional non-async prepared update clauses" in {
val sample = gen[VerizonRecord].copy(isDeleted = true)
val sample2 = gen[VerizonRecord].copy(isDeleted = true)
val bindable = db.verizonSchema.update
.where(_.uid eqs ?)
.modify(_.isdeleted setTo ?)
.ifExists
.consistencyLevel_=(ConsistencyLevel.LOCAL_QUORUM)
.prepare()
val chain = for {
_ <- db.verizonSchema.storeRecord(sample)
_ <- db.verizonSchema.storeRecord(sample2)
updated <- bindable.bind(false, sample.uid).future()
res <- db.verizonSchema.select.where(_.uid eqs sample.uid).one()
} yield (updated, res)
whenReady(chain) { case (_, res) =>
res shouldBe defined
res.value.isDeleted shouldBe false
}
}
it should "correctly chain type parameters in conditional async prepared update clauses" in {
val sample = gen[VerizonRecord].copy(isDeleted = true)
val sample2 = gen[VerizonRecord].copy(isDeleted = true)
val chain = for {
_ <- db.verizonSchema.storeRecord(sample)
_ <- db.verizonSchema.storeRecord(sample2)
_ <- db.verizonSchema.updateDeleteStatus.flatMap(_.bind(false, sample.uid).future())
res <- db.verizonSchema.select.where(_.uid eqs sample.uid).one()
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value.isDeleted shouldBe false
}
}
it should "allow using TTL in update statements using a long value" in {
val sample = gen[VerizonRecord].copy(isDeleted = true)
val sample2 = gen[VerizonRecord].copy(isDeleted = true)
val chain = for {
_ <- db.verizonSchema.storeRecord(sample)
_ <- db.verizonSchema.storeRecord(sample2)
_ <- db.verizonSchema.update
.where(_.uid eqs sample.uid)
.modify(_.isdeleted setTo false)
.ttl(seconds = 5L)
.future()
res <- db.verizonSchema.select.where(_.uid eqs sample.uid).one()
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value.isDeleted shouldBe false
}
}
it should "allow using TTL in update statements using a Duration value" in {
val sample = gen[VerizonRecord].copy(isDeleted = true)
val sample2 = gen[VerizonRecord].copy(isDeleted = true)
val chain = for {
_ <- db.verizonSchema.storeRecord(sample)
_ <- db.verizonSchema.storeRecord(sample2)
_ <- db.verizonSchema.update
.where(_.uid eqs sample.uid)
.modify(_.isdeleted setTo false)
.ttl(5.seconds)
.future()
res <- db.verizonSchema.select.where(_.uid eqs sample.uid).one()
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value.isDeleted shouldBe false
}
}
it should "allow using TTL in prepared update statements" in {
val sample = gen[VerizonRecord].copy(isDeleted = true)
val sample2 = gen[VerizonRecord].copy(isDeleted = true)
lazy val updateWithTTL = db.verizonSchema.update
.where(_.uid eqs ?)
.modify(_.isdeleted setTo ?)
.ttl(?)
.prepareAsync()
val chain = for {
_ <- db.verizonSchema.storeRecord(sample)
_ <- db.verizonSchema.storeRecord(sample2)
_ <- updateWithTTL.flatMap(_.bind(5, false, sample.uid).future())
res <- db.verizonSchema.select.where(_.uid eqs sample.uid).one()
} yield res
whenReady(chain) { res =>
res shouldBe defined
res.value.isDeleted shouldBe false
}
}
}
|
outworkers/phantom
|
phantom-dsl/src/test/scala/com/outworkers/phantom/builder/query/prepared/PreparedUpdateQueryTest.scala
|
Scala
|
apache-2.0
| 9,405 |
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import ch.megard.akka.http.cors.scaladsl.CorsDirectives.cors
import com.mongodb.event.{ClusterClosedEvent, ClusterDescriptionChangedEvent, ClusterListener, ClusterOpeningEvent}
import com.typesafe.config.{Config, ConfigFactory}
import net.cucumbersome.rpgRoller.warhammer.combat.CombatController
import net.cucumbersome.rpgRoller.warhammer.combat.domain.{ActorBasedCombatService, CombatInitializer}
import net.cucumbersome.rpgRoller.warhammer.combat.domain.CombatHandler.CombatInitialized
import net.cucumbersome.rpgRoller.warhammer.infrastructure.CommandGateway
import net.cucumbersome.rpgRoller.warhammer.infrastructure.mongo.CollectionBuilder
import net.cucumbersome.rpgRoller.warhammer.infrastructure.repositories.{ActorRepository, MongoDbActorRepository}
import net.cucumbersome.rpgRoller.warhammer.player.ActorsController
import net.cucumbersome.rpgRoller.warhammer.swagger.SwaggerDocService
import org.mongodb.scala.connection.ClusterSettings
import org.mongodb.scala.{MongoClient, MongoClientSettings, ServerAddress}
import org.slf4j.LoggerFactory
import collection.JavaConverters._
object Main {
def main(args: Array[String]): Unit = {
val logger = LoggerFactory.getLogger(getClass)
implicit val system = ActorSystem("rpgRoller")
implicit val materializer = ActorMaterializer()
implicit val ec = system.dispatcher
val config = ConfigFactory.load
val repo = initializeActorsRepository(config)
val actorsController = new ActorsController(repo)
logger.info("Initialized actors controller")
val port = config.getInt("endpoint.port")
val domain = config.getString("endpoint.domain")
val combatHandler = CombatInitializer.initializeCombatHandler
val commandGateway = system.actorOf(CommandGateway.props(combatHandler), "commandGateway")
val combatController = new CombatController(CombatInitializer.initializeCombatService(commandGateway, repo))
logger.info("initialized combat controller")
val swaggerService = new SwaggerDocService(domain, port)
logger.info("initialized swagger service")
val routes = cors()(combatController.route ~ actorsController.route ~ swaggerService.routes)
logger.info("application ready!")
Http().bindAndHandle(routes, domain, port)
}
def initializeActorsRepository(config: Config): ActorRepository = {
val mongoUri = config.getString("mongo.uri")
val mongoDatabaseName = config.getString("mongo.database")
val mongoClient = MongoClient(mongoUri)
val database = mongoClient.getDatabase(mongoDatabaseName)
val actorCollection = CollectionBuilder.buildActorsCollection(database)
new MongoDbActorRepository(actorCollection)
}
case object MongoClusetListener extends ClusterListener {
val logger = LoggerFactory.getLogger(getClass)
override def clusterOpening(event: ClusterOpeningEvent): Unit = {
logger.info(s"Cluster opened $event")
}
override def clusterClosed(event: ClusterClosedEvent): Unit = {
logger.info(s"Cluster close $event")
}
override def clusterDescriptionChanged(event: ClusterDescriptionChangedEvent): Unit = {
logger.info(s"Cluster description changed $event")
}
}
}
|
CucumisSativus/rpgRollerBackend
|
src/main/scala/Main.scala
|
Scala
|
mit
| 3,332 |
/*
* Copyright 2001-2009 Stephen Colebourne
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.joda.time
import java.io.Serializable
import java.util.ArrayList
import java.util.Arrays
import java.util.HashMap
import java.util.List
import java.util.Map
import org.joda.time.field.FieldUtils
/**
* Controls a period implementation by specifying which duration fields are to be used.
* <p>
* The following implementations are provided:
* <ul>
* <li>Standard - years, months, weeks, days, hours, minutes, seconds, millis
* <li>YearMonthDayTime - years, months, days, hours, minutes, seconds, millis
* <li>YearMonthDay - years, months, days
* <li>YearWeekDayTime - years, weeks, days, hours, minutes, seconds, millis
* <li>YearWeekDay - years, weeks, days
* <li>YearDayTime - years, days, hours, minutes, seconds, millis
* <li>YearDay - years, days, hours
* <li>DayTime - days, hours, minutes, seconds, millis
* <li>Time - hours, minutes, seconds, millis
* <li>plus one for each single type
* </ul>
*
* <p>
* PeriodType is thread-safe and immutable, and all subclasses must be as well.
*
* @author Brian S O'Neill
* @author Stephen Colebourne
* @since 1.0
*/
@SerialVersionUID(2274324892792009998L)
object PeriodType {
/** Cache of all the known types. */
private val cTypes: Map[PeriodType, AnyRef] = new HashMap[PeriodType, AnyRef](32)
private[time] var YEAR_INDEX: Int = 0
private[time] var MONTH_INDEX: Int = 1
private[time] var WEEK_INDEX: Int = 2
private[time] var DAY_INDEX: Int = 3
private[time] var HOUR_INDEX: Int = 4
private[time] var MINUTE_INDEX: Int = 5
private[time] var SECOND_INDEX: Int = 6
private[time] var MILLI_INDEX: Int = 7
private var cStandard: PeriodType = null
private var cYMDTime: PeriodType = null
private var cYMD: PeriodType = null
private var cYWDTime: PeriodType = null
private var cYWD: PeriodType = null
private var cYDTime: PeriodType = null
private var cYD: PeriodType = null
private var cDTime: PeriodType = null
private var cTime: PeriodType = null
private var cYears: PeriodType = null
private var cMonths: PeriodType = null
private var cWeeks: PeriodType = null
private var cDays: PeriodType = null
private var cHours: PeriodType = null
private var cMinutes: PeriodType = null
private var cSeconds: PeriodType = null
private var cMillis: PeriodType = null
/**
* Gets a type that defines all standard fields.
* <ul>
* <li>years
* <li>months
* <li>weeks
* <li>days
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def standard: PeriodType = {
var `type`: PeriodType = cStandard
if (`type` == null) {
`type` = new PeriodType("Standard", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.months, DurationFieldType.weeks, DurationFieldType.days, DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](0, 1, 2, 3, 4, 5, 6, 7))
cStandard = `type`
}
return `type`
}
/**
* Gets a type that defines all standard fields except weeks.
* <ul>
* <li>years
* <li>months
* <li>days
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def yearMonthDayTime: PeriodType = {
var `type`: PeriodType = cYMDTime
if (`type` == null) {
`type` = new PeriodType("YearMonthDayTime", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.months, DurationFieldType.days, DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](0, 1, -1, 2, 3, 4, 5, 6))
cYMDTime = `type`
}
return `type`
}
/**
* Gets a type that defines the year, month and day fields.
* <ul>
* <li>years
* <li>months
* <li>days
* </ul>
*
* @return the period type
* @since 1.1
*/
def yearMonthDay: PeriodType = {
var `type`: PeriodType = cYMD
if (`type` == null) {
`type` = new PeriodType("YearMonthDay", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.months, DurationFieldType.days), Array[Int](0, 1, -1, 2, -1, -1, -1, -1))
cYMD = `type`
}
return `type`
}
/**
* Gets a type that defines all standard fields except months.
* <ul>
* <li>years
* <li>weeks
* <li>days
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def yearWeekDayTime: PeriodType = {
var `type`: PeriodType = cYWDTime
if (`type` == null) {
`type` = new PeriodType("YearWeekDayTime", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.weeks, DurationFieldType.days, DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](0, -1, 1, 2, 3, 4, 5, 6))
cYWDTime = `type`
}
return `type`
}
/**
* Gets a type that defines year, week and day fields.
* <ul>
* <li>years
* <li>weeks
* <li>days
* </ul>
*
* @return the period type
* @since 1.1
*/
def yearWeekDay: PeriodType = {
var `type`: PeriodType = cYWD
if (`type` == null) {
`type` = new PeriodType("YearWeekDay", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.weeks, DurationFieldType.days), Array[Int](0, -1, 1, 2, -1, -1, -1, -1))
cYWD = `type`
}
return `type`
}
/**
* Gets a type that defines all standard fields except months and weeks.
* <ul>
* <li>years
* <li>days
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def yearDayTime: PeriodType = {
var `type`: PeriodType = cYDTime
if (`type` == null) {
`type` = new PeriodType("YearDayTime", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.days, DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](0, -1, -1, 1, 2, 3, 4, 5))
cYDTime = `type`
}
return `type`
}
/**
* Gets a type that defines the year and day fields.
* <ul>
* <li>years
* <li>days
* </ul>
*
* @return the period type
* @since 1.1
*/
def yearDay: PeriodType = {
var `type`: PeriodType = cYD
if (`type` == null) {
`type` = new PeriodType("YearDay", Array[DurationFieldType](DurationFieldType.years, DurationFieldType.days), Array[Int](0, -1, -1, 1, -1, -1, -1, -1))
cYD = `type`
}
return `type`
}
/**
* Gets a type that defines all standard fields from days downwards.
* <ul>
* <li>days
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def dayTime: PeriodType = {
var `type`: PeriodType = cDTime
if (`type` == null) {
`type` = new PeriodType("DayTime", Array[DurationFieldType](DurationFieldType.days, DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](-1, -1, -1, 0, 1, 2, 3, 4))
cDTime = `type`
}
return `type`
}
/**
* Gets a type that defines all standard time fields.
* <ul>
* <li>hours
* <li>minutes
* <li>seconds
* <li>milliseconds
* </ul>
*
* @return the period type
*/
def time: PeriodType = {
var `type`: PeriodType = cTime
if (`type` == null) {
`type` = new PeriodType("Time", Array[DurationFieldType](DurationFieldType.hours, DurationFieldType.minutes, DurationFieldType.seconds, DurationFieldType.millis), Array[Int](-1, -1, -1, -1, 0, 1, 2, 3))
cTime = `type`
}
return `type`
}
/**
* Gets a type that defines just the years field.
*
* @return the period type
*/
def years: PeriodType = {
var `type`: PeriodType = cYears
if (`type` == null) {
`type` = new PeriodType("Years", Array[DurationFieldType](DurationFieldType.years), Array[Int](0, -1, -1, -1, -1, -1, -1, -1))
cYears = `type`
}
return `type`
}
/**
* Gets a type that defines just the months field.
*
* @return the period type
*/
def months: PeriodType = {
var `type`: PeriodType = cMonths
if (`type` == null) {
`type` = new PeriodType("Months", Array[DurationFieldType](DurationFieldType.months), Array[Int](-1, 0, -1, -1, -1, -1, -1, -1))
cMonths = `type`
}
return `type`
}
/**
* Gets a type that defines just the weeks field.
*
* @return the period type
*/
def weeks: PeriodType = {
var `type`: PeriodType = cWeeks
if (`type` == null) {
`type` = new PeriodType("Weeks", Array[DurationFieldType](DurationFieldType.weeks), Array[Int](-1, -1, 0, -1, -1, -1, -1, -1))
cWeeks = `type`
}
return `type`
}
/**
* Gets a type that defines just the days field.
*
* @return the period type
*/
def days: PeriodType = {
var `type`: PeriodType = cDays
if (`type` == null) {
`type` = new PeriodType("Days", Array[DurationFieldType](DurationFieldType.days), Array[Int](-1, -1, -1, 0, -1, -1, -1, -1))
cDays = `type`
}
return `type`
}
/**
* Gets a type that defines just the hours field.
*
* @return the period type
*/
def hours: PeriodType = {
var `type`: PeriodType = cHours
if (`type` == null) {
`type` = new PeriodType("Hours", Array[DurationFieldType](DurationFieldType.hours), Array[Int](-1, -1, -1, -1, 0, -1, -1, -1))
cHours = `type`
}
return `type`
}
/**
* Gets a type that defines just the minutes field.
*
* @return the period type
*/
def minutes: PeriodType = {
var `type`: PeriodType = cMinutes
if (`type` == null) {
`type` = new PeriodType("Minutes", Array[DurationFieldType](DurationFieldType.minutes), Array[Int](-1, -1, -1, -1, -1, 0, -1, -1))
cMinutes = `type`
}
return `type`
}
/**
* Gets a type that defines just the seconds field.
*
* @return the period type
*/
def seconds: PeriodType = {
var `type`: PeriodType = cSeconds
if (`type` == null) {
`type` = new PeriodType("Seconds", Array[DurationFieldType](DurationFieldType.seconds), Array[Int](-1, -1, -1, -1, -1, -1, 0, -1))
cSeconds = `type`
}
return `type`
}
/**
* Gets a type that defines just the millis field.
*
* @return the period type
*/
def millis: PeriodType = {
var `type`: PeriodType = cMillis
if (`type` == null) {
`type` = new PeriodType("Millis", Array[DurationFieldType](DurationFieldType.millis), Array[Int](-1, -1, -1, -1, -1, -1, -1, 0))
cMillis = `type`
}
return `type`
}
/**
* Gets a period type that contains the duration types of the array.
* <p>
* Only the 8 standard duration field types are supported.
*
* @param types the types to include in the array.
* @return the period type
* @since 1.1
*/
def forFields(types: Array[DurationFieldType]): PeriodType = {
if (types == null || types.length == 0) {
throw new IllegalArgumentException("Types array must not be null or empty")
}
{
var i: Int = 0
while (i < types.length) {
{
if (types(i) == null) {
throw new IllegalArgumentException("Types array must not contain null")
}
}
({
i += 1; i - 1
})
}
}
val cache: Map[PeriodType, AnyRef] = cTypes
if (cache.isEmpty) {
cache.put(standard, standard)
cache.put(yearMonthDayTime, yearMonthDayTime)
cache.put(yearMonthDay, yearMonthDay)
cache.put(yearWeekDayTime, yearWeekDayTime)
cache.put(yearWeekDay, yearWeekDay)
cache.put(yearDayTime, yearDayTime)
cache.put(yearDay, yearDay)
cache.put(dayTime, dayTime)
cache.put(time, time)
cache.put(years, years)
cache.put(months, months)
cache.put(weeks, weeks)
cache.put(days, days)
cache.put(hours, hours)
cache.put(minutes, minutes)
cache.put(seconds, seconds)
cache.put(millis, millis)
}
val inPartType: PeriodType = new PeriodType(null, types, null)
val cached: AnyRef = cache.get(inPartType)
if (cached.isInstanceOf[PeriodType]) {
return cached.asInstanceOf[PeriodType]
}
if (cached != null) {
throw new IllegalArgumentException("PeriodType does not support fields: " + cached)
}
var `type`: PeriodType = standard
val list: List[DurationFieldType] = new ArrayList[DurationFieldType](Arrays.asList(types))
if (list.remove(DurationFieldType.years) == false) {
`type` = `type`.withYearsRemoved
}
if (list.remove(DurationFieldType.months) == false) {
`type` = `type`.withMonthsRemoved
}
if (list.remove(DurationFieldType.weeks) == false) {
`type` = `type`.withWeeksRemoved
}
if (list.remove(DurationFieldType.days) == false) {
`type` = `type`.withDaysRemoved
}
if (list.remove(DurationFieldType.hours) == false) {
`type` = `type`.withHoursRemoved
}
if (list.remove(DurationFieldType.minutes) == false) {
`type` = `type`.withMinutesRemoved
}
if (list.remove(DurationFieldType.seconds) == false) {
`type` = `type`.withSecondsRemoved
}
if (list.remove(DurationFieldType.millis) == false) {
`type` = `type`.withMillisRemoved
}
if (list.size > 0) {
cache.put(inPartType, list)
throw new IllegalArgumentException("PeriodType does not support fields: " + list)
}
val checkPartType: PeriodType = new PeriodType(null, `type`.iTypes, null)
val checkedType: PeriodType = cache.get(checkPartType).asInstanceOf[PeriodType]
if (checkedType != null) {
cache.put(checkPartType, checkedType)
return checkedType
}
cache.put(checkPartType, `type`)
return `type`
}
}
@SerialVersionUID(2274324892792009998L)
class PeriodType extends Serializable {
/** The name of the type */
private final val iName: String = null
/** The array of types */
private final val iTypes: Array[DurationFieldType] = null
/** The array of indices */
private final val iIndices: Array[Int] = null
/**
* Constructor.
*
* @param name the name
* @param types the types
* @param indices the indices
*/
protected def this(name: String, types: Array[DurationFieldType], indices: Array[Int]) {
this()
`super`
iName = name
iTypes = types
iIndices = indices
}
/**
* Gets the name of the period type.
*
* @return the name
*/
def getName: String = {
return iName
}
/**
* Gets the number of fields in the period type.
*
* @return the number of fields
*/
def size: Int = {
return iTypes.length
}
/**
* Gets the field type by index.
*
* @param index the index to retrieve
* @return the field type
* @throws IndexOutOfBoundsException if the index is invalid
*/
def getFieldType(index: Int): DurationFieldType = {
return iTypes(index)
}
/**
* Checks whether the field specified is supported by this period.
*
* @param type the type to check, may be null which returns false
* @return true if the field is supported
*/
def isSupported(`type`: DurationFieldType): Boolean = {
return (indexOf(`type`) >= 0)
}
/**
* Gets the index of the field in this period.
*
* @param type the type to check, may be null which returns -1
* @return the index of -1 if not supported
*/
def indexOf(`type`: DurationFieldType): Int = {
{
var i: Int = 0
val isize: Int = size
while (i < isize) {
{
if (iTypes(i) eq `type`) {
return i
}
}
({
i += 1; i - 1
})
}
}
return -1
}
/**
* Gets a debugging to string.
*
* @return a string
*/
override def toString: String = {
return "PeriodType[" + getName + "]"
}
/**
* Gets the indexed field part of the period.
*
* @param period the period to query
* @param index the index to use
* @return the value of the field, zero if unsupported
*/
private[time] def getIndexedField(period: ReadablePeriod, index: Int): Int = {
val realIndex: Int = iIndices(index)
return (if (realIndex == -1) 0 else period.getValue(realIndex))
}
/**
* Sets the indexed field part of the period.
*
* @param period the period to query
* @param index the index to use
* @param values the array to populate
* @param newValue the value to set
* @throws UnsupportedOperationException if not supported
*/
private[time] def setIndexedField(period: ReadablePeriod, index: Int, values: Array[Int], newValue: Int): Boolean = {
val realIndex: Int = iIndices(index)
if (realIndex == -1) {
throw new UnsupportedOperationException("Field is not supported")
}
values(realIndex) = newValue
return true
}
/**
* Adds to the indexed field part of the period.
*
* @param period the period to query
* @param index the index to use
* @param values the array to populate
* @param valueToAdd the value to add
* @return true if the array is updated
* @throws UnsupportedOperationException if not supported
*/
private[time] def addIndexedField(period: ReadablePeriod, index: Int, values: Array[Int], valueToAdd: Int): Boolean = {
if (valueToAdd == 0) {
return false
}
val realIndex: Int = iIndices(index)
if (realIndex == -1) {
throw new UnsupportedOperationException("Field is not supported")
}
values(realIndex) = FieldUtils.safeAdd(values(realIndex), valueToAdd)
return true
}
/**
* Returns a version of this PeriodType instance that does not support years.
*
* @return a new period type that supports the original set of fields except years
*/
def withYearsRemoved: PeriodType = {
return withFieldRemoved(0, "NoYears")
}
/**
* Returns a version of this PeriodType instance that does not support months.
*
* @return a new period type that supports the original set of fields except months
*/
def withMonthsRemoved: PeriodType = {
return withFieldRemoved(1, "NoMonths")
}
/**
* Returns a version of this PeriodType instance that does not support weeks.
*
* @return a new period type that supports the original set of fields except weeks
*/
def withWeeksRemoved: PeriodType = {
return withFieldRemoved(2, "NoWeeks")
}
/**
* Returns a version of this PeriodType instance that does not support days.
*
* @return a new period type that supports the original set of fields except days
*/
def withDaysRemoved: PeriodType = {
return withFieldRemoved(3, "NoDays")
}
/**
* Returns a version of this PeriodType instance that does not support hours.
*
* @return a new period type that supports the original set of fields except hours
*/
def withHoursRemoved: PeriodType = {
return withFieldRemoved(4, "NoHours")
}
/**
* Returns a version of this PeriodType instance that does not support minutes.
*
* @return a new period type that supports the original set of fields except minutes
*/
def withMinutesRemoved: PeriodType = {
return withFieldRemoved(5, "NoMinutes")
}
/**
* Returns a version of this PeriodType instance that does not support seconds.
*
* @return a new period type that supports the original set of fields except seconds
*/
def withSecondsRemoved: PeriodType = {
return withFieldRemoved(6, "NoSeconds")
}
/**
* Returns a version of this PeriodType instance that does not support milliseconds.
*
* @return a new period type that supports the original set of fields except milliseconds
*/
def withMillisRemoved: PeriodType = {
return withFieldRemoved(7, "NoMillis")
}
/**
* Removes the field specified by indices index.
*
* @param indicesIndex the index to remove
* @param name the name addition
* @return the new type
*/
private def withFieldRemoved(indicesIndex: Int, name: String): PeriodType = {
val fieldIndex: Int = iIndices(indicesIndex)
if (fieldIndex == -1) {
return this
}
val types: Array[DurationFieldType] = new Array[DurationFieldType](size - 1)
{
var i: Int = 0
while (i < iTypes.length) {
{
if (i < fieldIndex) {
types(i) = iTypes(i)
}
else if (i > fieldIndex) {
types(i - 1) = iTypes(i)
}
}
({
i += 1; i - 1
})
}
}
val indices: Array[Int] = new Array[Int](8)
{
var i: Int = 0
while (i < indices.length) {
{
if (i < indicesIndex) {
indices(i) = iIndices(i)
}
else if (i > indicesIndex) {
indices(i) = (if (iIndices(i) == -1) -1 else iIndices(i) - 1)
}
else {
indices(i) = -1
}
}
({
i += 1; i - 1
})
}
}
return new PeriodType(getName + name, types, indices)
}
/**
* Compares this type to another object.
* To be equal, the object must be a PeriodType with the same set of fields.
*
* @param obj the object to compare to
* @return true if equal
*/
override def equals(obj: AnyRef): Boolean = {
if (this eq obj) {
return true
}
if (obj.isInstanceOf[PeriodType] == false) {
return false
}
val other: PeriodType = obj.asInstanceOf[PeriodType]
return (Arrays.equals(iTypes, other.iTypes))
}
/**
* Returns a hashcode based on the field types.
*
* @return a suitable hashcode
*/
override def hashCode: Int = {
var hash: Int = 0
{
var i: Int = 0
while (i < iTypes.length) {
{
hash += iTypes(i).hashCode
}
({
i += 1; i - 1
})
}
}
return hash
}
}
|
aparo/scalajs-joda
|
src/main/scala/org/joda/time/PeriodType.scala
|
Scala
|
apache-2.0
| 22,603 |
package org.pgscala
package builder
package converters
object PGNullableURLConverterBuilder extends PGNullableConverterBuilder {
override val imports = """
import java.net.URL;
import java.net.MalformedURLException;
"""
val pgType = "varchar"
val clazz = "java.net.URL"
val to = "url.toString()"
val from = "new URL(url)"
override val fromThrowsExceptions = Seq("MalformedURLException")
}
|
melezov/pgscala
|
builder/src/main/scala/org/pgscala/builder/converters/java/PGNullableURLConverterBuilder.scala
|
Scala
|
bsd-3-clause
| 407 |
package magnolia1
import language.experimental.macros
import scala.annotation.tailrec
import scala.reflect.*
case class TypeInfo(
owner: String,
short: String,
typeParams: Iterable[TypeInfo]
):
def full: String = s"$owner.$short"
object CaseClass:
trait Param[Typeclass[_], Type](
val label: String,
val index: Int,
val repeated: Boolean,
val annotations: List[Any],
val typeAnnotations: List[Any]
):
type PType
def typeclass: Typeclass[PType]
def deref(param: Type): PType
def default: Option[PType]
override def toString: String = s"Param($label)"
object Param:
def apply[F[_], T, P](
name: String,
idx: Int,
repeated: Boolean,
cbn: CallByNeed[F[P]],
defaultVal: CallByNeed[Option[P]],
annotations: List[Any],
typeAnnotations: List[Any]
): Param[F, T] =
new CaseClass.Param[F, T](
name,
idx,
repeated,
annotations,
typeAnnotations
):
type PType = P
def default: Option[PType] = defaultVal.value
def typeclass = cbn.value
def deref(value: T): P =
value.asInstanceOf[Product].productElement(idx).asInstanceOf[P]
end Param
end CaseClass
abstract class CaseClass[Typeclass[_], Type](
val typeInfo: TypeInfo,
val isObject: Boolean,
val isValueClass: Boolean,
val params: List[CaseClass.Param[Typeclass, Type]],
val annotations: List[Any],
val typeAnnotations: List[Any]
) extends Serializable:
type Param = CaseClass.Param[Typeclass, Type]
override def toString: String =
s"CaseClass(${typeInfo.full}, ${params.mkString(",")})"
def construct[PType](makeParam: Param => PType)(using ClassTag[PType]): Type
def constructMonadic[Monad[_]: Monadic, PType: ClassTag](
make: Param => Monad[PType]
): Monad[Type]
def constructEither[Err, PType: ClassTag](
makeParam: Param => Either[Err, PType]
): Either[List[Err], Type]
def rawConstruct(fieldValues: Seq[Any]): Type
def param[P](
name: String,
idx: Int,
repeated: Boolean,
cbn: CallByNeed[Typeclass[P]],
defaultVal: CallByNeed[Option[P]],
annotations: List[Any],
typeAnnotations: List[Any]
): Param =
new CaseClass.Param[Typeclass, Type](
name,
idx,
repeated,
annotations,
typeAnnotations
):
type PType = P
def default: Option[PType] = defaultVal.value
def typeclass = cbn.value
def deref(value: Type): P =
value.asInstanceOf[Product].productElement(idx).asInstanceOf[P]
end CaseClass
case class SealedTrait[Typeclass[_], Type](
typeInfo: TypeInfo,
subtypes: List[SealedTrait.Subtype[Typeclass, Type, _]],
annotations: List[Any],
typeAnnotations: List[Any],
isEnum: Boolean
) extends Serializable:
type Subtype[S] = SealedTrait.SubtypeValue[Typeclass, Type, S]
override def toString: String =
s"SealedTrait($typeInfo, List[${subtypes.mkString(",")}])"
def choose[Return](value: Type)(handle: Subtype[_] => Return): Return =
@tailrec
def rec(ix: Int): Return =
if ix < subtypes.length then
val sub = subtypes(ix)
if sub.isDefinedAt(value) then
handle(SealedTrait.SubtypeValue(sub, value))
else rec(ix + 1)
else
throw new IllegalArgumentException(
s"The given value `$value` is not a sub type of `$typeInfo`"
)
rec(0)
end SealedTrait
object SealedTrait:
class Subtype[Typeclass[_], Type, SType](
val typeInfo: TypeInfo,
val annotations: List[Any],
val typeAnnotations: List[Any],
val isObject: Boolean,
val index: Int,
callByNeed: CallByNeed[Typeclass[SType]],
isType: Type => Boolean,
asType: Type => SType & Type
) extends PartialFunction[Type, SType & Type],
Serializable:
def typeclass: Typeclass[SType & Type] =
callByNeed.value.asInstanceOf[Typeclass[SType & Type]]
def cast: PartialFunction[Type, SType & Type] = this
def isDefinedAt(t: Type): Boolean = isType(t)
def apply(t: Type): SType & Type = asType(t)
override def toString: String = s"Subtype(${typeInfo.full})"
object Subtype:
def apply[Typeclass[_], Type, SType](
typeInfo: TypeInfo,
annotations: List[Any],
typeAnnotations: List[Any],
isObject: Boolean,
index: Int,
callByNeed: CallByNeed[Typeclass[SType]]
) =
new Subtype[Typeclass, Type, SType](
typeInfo,
annotations,
typeAnnotations,
isObject,
index,
callByNeed,
_.isInstanceOf[SType & Type],
_.asInstanceOf[SType & Type]
)
class SubtypeValue[Typeclass[_], Type, S](
val subtype: Subtype[Typeclass, Type, S],
v: Type
):
export subtype.{typeclass, typeAnnotations, annotations, cast, typeInfo}
def value: S & Type = cast(v)
end SealedTrait
object CallByNeed:
def apply[A](a: => A): CallByNeed[A] = new CallByNeed(() => a)
final class CallByNeed[+A](private[this] var eval: () => A)
extends Serializable:
lazy val value: A =
val result = eval()
eval = null
result
|
propensive/magnolia
|
src/core/interface.scala
|
Scala
|
apache-2.0
| 5,214 |
package com.cloudray.scalapress.folder.tag
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
import com.cloudray.scalapress.folder.{FolderDao, Folder}
import javax.servlet.http.HttpServletRequest
import org.mockito.Mockito
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
class PrimaryFoldersTagTest extends FunSuite with MockitoSugar with OneInstancePerTest {
val root = new Folder
root.id = 1
val folder1 = new Folder
folder1.id = 123
folder1.name = "Earl Grey"
folder1.parent = root
val folder2 = new Folder
folder2.id = 667
folder2.name = "Assam"
folder2.parent = root
root.subfolders.add(folder1)
root.subfolders.add(folder2)
val req = mock[HttpServletRequest]
val context = new ScalapressContext()
context.folderDao = mock[FolderDao]
val sreq = ScalapressRequest(req, context)
Mockito.when(context.folderDao.root).thenReturn(root)
test("rendering happy path") {
val actual = PrimaryFoldersTag.render(sreq, Map.empty).get.replaceAll("\\\\s{2,}", "").replace("\\n", "")
assert(
"<span class='cat_link'><a href='/folder-667-assam'>Assam</a></span><span class='cat_link'><a href='/folder-123-earl-grey'>Earl Grey</a></span>" === actual)
}
}
|
vidyacraghav/scalapress
|
src/test/scala/com/cloudray/scalapress/folder/tag/PrimaryFoldersTagTest.scala
|
Scala
|
apache-2.0
| 1,303 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package mutable
import generic._
/** The canonical builder for collections that are addable, i.e. that support an efficient `+` method
* which adds an element to the collection.
*
* Collections are built from their empty element using this `+` method.
* @param empty the empty element of the collection.
* @tparam Elem the type of elements that get added to the builder.
* @tparam To the type of the built collection.
*
* @note "efficient `+`" is not idle talk. Do not use this on mutable collections or any others
* for which `+` may perform an unshared copy! See GrowingBuilder comments for more.
*
* @author Martin Odersky
* @version 2.8
* @since 2.8
*/
class AddingBuilder[Elem, To <: Addable[Elem, To] with collection.Iterable[Elem] with collection.IterableLike[Elem, To]](empty: To)
extends Builder[Elem, To] {
protected var elems: To = empty
def +=(x: Elem): this.type = { elems = elems + x; this }
def clear() { elems = empty }
def result: To = elems
}
|
cran/rkafkajars
|
java/scala/collection/mutable/AddingBuilder.scala
|
Scala
|
apache-2.0
| 1,563 |
package algebra
package ring
import scala.{specialized => sp}
/**
* CommutativeRig is a Rig that is commutative under multiplication.
*/
trait CommutativeRig[@sp(Int, Long, Float, Double) A] extends Any with Rig[A] with CommutativeSemiring[A] with MultiplicativeCommutativeMonoid[A]
object CommutativeRig extends AdditiveMonoidFunctions[CommutativeRig] with MultiplicativeMonoidFunctions[CommutativeRig] {
@inline final def apply[A](implicit r: CommutativeRig[A]): CommutativeRig[A] = r
}
|
sritchie/algebra
|
core/src/main/scala/algebra/ring/CommutativeRig.scala
|
Scala
|
mit
| 496 |
package com.github.easel.sjscharts.morrisjs
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.JSConverters._
import scala.scalajs.js.annotation.{JSGlobal, JSName}
import scala.scalajs.js.{JSON, UndefOr, |}
/**
* Created by erik on 1/7/16.
*/
//scalastyle:off
@js.native
@JSGlobal("Morris")
object MorrisStatic extends js.Object {
def Donut(params: DonutParams): ChartInstance = js.native
def Bar(params: BarParams): ChartInstance = js.native
}
@js.native
trait ChartInstance extends js.Object {
val raphael: js.Dynamic = js.native
def redraw(): Unit = js.native
def setData(data: js.Dynamic, redraw: UndefOr[Boolean] = js.undefined): Unit = js.native
}
@js.native
trait LabelValue extends js.Object {
val label: String
val value: Double
}
@js.native
trait Params extends js.Object {
val element: dom.Element | String = js.native
val resize: Boolean = js.native
}
@js.native
trait DonutParams extends Params {
val data: Array[LabelValue] = js.native
val colors: js.UndefOr[Array[String]] = js.native
val backgroundColor: js.UndefOr[String] = js.native
val labelColor: js.UndefOr[String] = js.native
}
@js.native
trait BarParams extends Params {
val data: Array[js.Object] = js.native
val xkey: String = js.native
val ykeys: Array[String] = js.native
val labels: Array[String] = js.native
val barColors: js.UndefOr[Array[String]] = js.native
val stacked: Boolean = js.native
val hideHover: String | Boolean = js.native
def hoverCallback(index: Int, options: js.Dynamic, original: String, row: js.Dynamic): String = js.native
val axes: Boolean = js.native
val grid: Boolean = js.native
val gridTextColor: String = js.native
val gridTextSize: String = js.native
val gridTextFamily: String = js.native
val gridTextWeight: String = js.native
}
object Morris {
trait ChartDefinition[T <: Params] {
def build: T
def element: dom.Element
def render(): ChartInstance
def withElement(element: dom.Element): ChartDefinition[T]
def chartData: js.Dynamic = js.Dynamic.literal()
}
case class Donut(
data: Map[String, Double],
hideHover: Boolean = false,
colors: Seq[String] = Seq.empty,
resize: Boolean = false,
element: dom.Element = dom.document.body
) extends ChartDefinition[DonutParams] {
def withElement(element: dom.Element) = this.copy(element = element)
override def chartData: js.Dynamic = data.map {
case (k, v) ⇒
js.Dynamic.literal(label = k, value = v)
}.toJSArray.asInstanceOf[js.Dynamic]
def build: DonutParams = {
val chartParams = js.Dynamic.literal(
element = element,
data = chartData,
resize = resize,
colors = if (colors.isEmpty) () else UndefOr.any2undefOrA(colors.toJSArray)
).asInstanceOf[DonutParams]
chartParams
}
def render() = MorrisStatic.Donut(build)
}
case class Bar(
data: Seq[(String, Seq[Option[Double]])],
labels: Seq[String],
stacked: Boolean = false,
hideHover: Boolean = false,
barColors: Seq[String] = Seq.empty,
resize: Boolean = false,
element: dom.Element = dom.document.body,
hoverCallback: Option[(Int, js.Dynamic, String, js.Dynamic) ⇒ String] = None
) extends ChartDefinition[BarParams] {
def withElement(element: dom.Element) = this.copy(element = element)
def build: BarParams = {
val jsData = data.map {
case (x, y) ⇒
val datum = js.Dynamic.literal("x" → x)
y.indices.foreach { i ⇒
val yAttr = s"y$i"
datum.updateDynamic(yAttr)(y(i).orUndefined)
yAttr
}
datum
}.toJSArray
val yKeys = data.headOption.map(_._2.indices.map(i ⇒ s"y$i")).getOrElse(Seq.empty).toJSArray
val params = js.Dynamic.literal(
element = element,
data = jsData,
labels = labels.toJSArray,
xkey = "x",
ykeys = yKeys,
stacked = stacked,
hideHover = hideHover,
resize = resize,
barColors = if (barColors.isEmpty) () else UndefOr.any2undefOrA(barColors.toJSArray),
hoverCallback = hoverCallback.orUndefined
).asInstanceOf[BarParams]
params
}
def render() = MorrisStatic.Bar(build)
}
}
|
easel/scalajs-charts
|
src/main/scala/com/github/easel/sjscharts/morrisjs/Morris.scala
|
Scala
|
mit
| 4,711 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import uk.gov.hmrc.ct.accounts.frs105.retriever.Frs105AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
case class AC8087(value: Option[Boolean]) extends CtBoxIdentifier(name = "The directors acknowledge their responsibilities for complying with the requirements of the Act with respect to accounting records and the preparation of accounts.")
with CtOptionalBoolean
with Input
with ValidatableBox[Frs105AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs105AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateAsMandatory(this)
)
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC8087.scala
|
Scala
|
apache-2.0
| 1,329 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import org.apache.flink.table.api.TableException
import org.apache.flink.table.plan.nodes.logical._
import org.apache.flink.table.plan.util.{FlinkRexUtil, RankUtil}
import org.apache.flink.table.runtime.rank.VariableRankRange
import org.apache.calcite.plan.RelOptRule.{any, operand}
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptUtil}
import org.apache.calcite.rel.RelCollations
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Calc
import org.apache.calcite.rex.{RexBuilder, RexInputRef, RexProgram}
import org.apache.calcite.util.ImmutableBitSet
import scala.collection.JavaConversions._
/**
* Planner rule that transposes [[FlinkLogicalCalc]] past [[FlinkLogicalRank]]
* to reduce rank input fields.
*/
class CalcRankTransposeRule
extends RelOptRule(
operand(classOf[FlinkLogicalCalc],
operand(classOf[FlinkLogicalRank], any())),
"CalcRankTransposeRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val calc: FlinkLogicalCalc = call.rel(0)
val rank: FlinkLogicalRank = call.rel(1)
val totalColumnCount = rank.getInput.getRowType.getFieldCount
// apply the rule only when calc could prune some columns
val pushableColumns = getPushableColumns(calc, rank)
pushableColumns.length < totalColumnCount
}
override def onMatch(call: RelOptRuleCall): Unit = {
val calc: FlinkLogicalCalc = call.rel(0)
val rank: FlinkLogicalRank = call.rel(1)
val pushableColumns = getPushableColumns(calc, rank)
val rexBuilder = calc.getCluster.getRexBuilder
// create a new Calc to project columns of Rank's input
val innerProgram = createNewInnerCalcProgram(
pushableColumns,
rank.getInput.getRowType,
rexBuilder)
val newInnerCalc = calc.copy(calc.getTraitSet, rank.getInput, innerProgram)
// create a new Rank on top of new Calc
var fieldMapping = pushableColumns.zipWithIndex.toMap
val newRank = createNewRankOnCalc(fieldMapping, newInnerCalc, rank)
// create a new Calc on top of newRank if needed
if (rank.outputRankNumber) {
// append RankNumber field mapping
val oldRankFunFieldIdx = RankUtil.getRankNumberColumnIndex(rank)
.getOrElse(throw new TableException("This should not happen"))
val newRankFunFieldIdx = RankUtil.getRankNumberColumnIndex(newRank)
.getOrElse(throw new TableException("This should not happen"))
fieldMapping += (oldRankFunFieldIdx -> newRankFunFieldIdx)
}
val topProgram = createNewTopCalcProgram(
calc.getProgram,
fieldMapping,
newRank.getRowType,
rexBuilder)
val equiv = if (topProgram.isTrivial) {
// Ignore newTopCac if it's program is trivial
newRank
} else {
calc.copy(calc.getTraitSet, newRank, topProgram)
}
call.transformTo(equiv)
}
private def getPushableColumns(calc: Calc, rank: FlinkLogicalRank): Array[Int] = {
val usedFields = getUsedFields(calc.getProgram)
val rankFunFieldIndex = RankUtil.getRankNumberColumnIndex(rank).getOrElse(-1)
val usedFieldsExcludeRankNumber = usedFields.filter(_ != rankFunFieldIndex)
val requiredFields = getKeyFields(rank)
usedFieldsExcludeRankNumber.union(requiredFields).distinct.sorted
}
private def getUsedFields(program: RexProgram): Array[Int] = {
val projects = program.getProjectList.map(program.expandLocalRef)
val condition = if (program.getCondition != null) {
program.expandLocalRef(program.getCondition)
} else {
null
}
RelOptUtil.InputFinder.bits(projects, condition).toArray
}
private def getKeyFields(rank: FlinkLogicalRank): Array[Int] = {
val partitionKey = rank.partitionKey.toArray
val orderKey = rank.orderKey.getFieldCollations.map(_.getFieldIndex).toArray
val uniqueKeys = rank.getCluster.getMetadataQuery.getUniqueKeys(rank.getInput)
val keysInUniqueKeys = if (uniqueKeys == null || uniqueKeys.isEmpty) {
Array[Int]()
} else {
uniqueKeys.flatMap(_.toArray).toArray
}
val rankRangeKey = rank.rankRange match {
case v: VariableRankRange => Array(v.getRankEndIndex)
case _ => Array[Int]()
}
// All key including partition key, order key, unique keys, VariableRankRange rankEndIndex
Set(partitionKey, orderKey, keysInUniqueKeys, rankRangeKey).flatten.toArray
}
private def createNewInnerCalcProgram(
projectedFields: Array[Int],
inputRowType: RelDataType,
rexBuilder: RexBuilder): RexProgram = {
val projects = projectedFields.map(RexInputRef.of(_, inputRowType))
val inputColNames = inputRowType.getFieldNames
val colNames = projectedFields.map(inputColNames.get)
RexProgram.create(inputRowType, projects.toList, null, colNames.toList, rexBuilder)
}
private def createNewTopCalcProgram(
oldTopProgram: RexProgram,
fieldMapping: Map[Int, Int],
inputRowType: RelDataType,
rexBuilder: RexBuilder): RexProgram = {
val oldProjects = oldTopProgram.getProjectList
val newProjects = oldProjects.map(oldTopProgram.expandLocalRef).map {
p => FlinkRexUtil.adjustInputRef(p, fieldMapping)
}
val oldCondition = oldTopProgram.getCondition
val newCondition = if (oldCondition != null) {
FlinkRexUtil.adjustInputRef(oldTopProgram.expandLocalRef(oldCondition), fieldMapping)
} else {
null
}
val colNames = oldTopProgram.getOutputRowType.getFieldNames
RexProgram.create(
inputRowType,
newProjects,
newCondition,
colNames,
rexBuilder)
}
private def createNewRankOnCalc(
fieldMapping: Map[Int, Int],
input: Calc,
rank: FlinkLogicalRank): FlinkLogicalRank = {
val newPartitionKey = rank.partitionKey.toArray.map(fieldMapping(_))
val oldOrderKey = rank.orderKey
val oldFieldCollations = oldOrderKey.getFieldCollations
val newFieldCollations = oldFieldCollations.map {
fc => fc.copy(fieldMapping(fc.getFieldIndex))
}
val newOrderKey = if (newFieldCollations.eq(oldFieldCollations)) {
oldOrderKey
} else {
RelCollations.of(newFieldCollations)
}
new FlinkLogicalRank(
rank.getCluster,
rank.getTraitSet,
input,
ImmutableBitSet.of(newPartitionKey: _*),
newOrderKey,
rank.rankType,
rank.rankRange,
rank.rankNumberType,
rank.outputRankNumber)
}
}
object CalcRankTransposeRule {
val INSTANCE = new CalcRankTransposeRule
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/rules/logical/CalcRankTransposeRule.scala
|
Scala
|
apache-2.0
| 7,346 |
import sbt._
import com.twitter.sbt._
class ChainsawProject(info: ProjectInfo) extends StandardProject(info) with SubversionPublisher {
override def subversionRepository = Some("http://svn.local.twitter.com/maven/")
val slf4jVersion = "1.5.8"
val slf4jApi = "org.slf4j" % "slf4j-api" % slf4jVersion withSources
val slf4j_simple = "org.slf4j" % "slf4j-simple" % slf4jVersion % "provided"
val specs = "org.scala-tools.testing" % "specs_2.8.1" % "1.6.6" % "test"
val mockito = "org.mockito" % "mockito-all" % "1.8.5" % "test"
}
|
twitter-archive/chainsaw
|
project/build/project.scala
|
Scala
|
apache-2.0
| 541 |
package tastytest
import IntersectionErasure._
object TestIntersectionErasure extends Suite("TestIntersectionErasure") {
def boxedId[T](t: T): T = t
val bWithA: B with A = new B with A {} // dotc erases to A, scalac to B
test("SAM bridges") {
val sam: IntersectionSAM = x => x
assert(sam(bWithA) === bWithA)
}
test("VC param")(
assert(boxedId(new IntersectionVC(bWithA)).unwrapped == bWithA)
)
test("VC method unboxed")(
assert(boxedId(new IntersectionVC(bWithA)).matchesInternal(bWithA))
)
test("VC method boxed")(
assert(boxedId(new IntersectionVC(bWithA)).matches(new IntersectionVC(bWithA)))
)
test("VC parametric param")(
assert(boxedId(new IntersectionVCParametric(bWithA)).unwrapped == bWithA)
)
test("VC parametric method unboxed")(
assert(boxedId(new IntersectionVCParametric(bWithA)).matchesInternal(bWithA))
)
test("VC parametric method boxed")(
assert(boxedId(new IntersectionVCParametric(bWithA)).matches(new IntersectionVCParametric(bWithA)))
)
}
|
scala/scala
|
test/tasty/run/src-2/tastytest/TestIntersectionErasure.scala
|
Scala
|
apache-2.0
| 1,037 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import scala.concurrent.Future
import scala.util.{Failure, Success}
import akka.Done
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.stream.ActorMaterializer
import spray.json._
import spray.json.DefaultJsonProtocol._
import kamon.Kamon
import whisk.common.AkkaLogging
import whisk.common.Logging
import whisk.common.LoggingMarkers
import whisk.common.TransactionId
import whisk.core.WhiskConfig
import whisk.core.connector.MessagingProvider
import whisk.core.database.RemoteCacheInvalidation
import whisk.core.database.CacheChangeNotification
import whisk.core.entitlement._
import whisk.core.entity._
import whisk.core.entity.ActivationId.ActivationIdGenerator
import whisk.core.entity.ExecManifest.Runtimes
import whisk.core.loadBalancer.{LoadBalancerService}
import whisk.http.BasicHttpService
import whisk.http.BasicRasService
import whisk.spi.SpiLoader
import whisk.core.containerpool.logging.LogStoreProvider
/**
* The Controller is the service that provides the REST API for OpenWhisk.
*
* It extends the BasicRasService so it includes a ping endpoint for monitoring.
*
* Akka sends messages to akka Actors -- the Controller is an Actor, ready to receive messages.
*
* It is possible to deploy a hot-standby controller. Each controller needs its own instance. This instance is a
* consecutive numbering, starting with 0.
* The state and cache of each controller is not shared to the other controllers.
* If the base controller crashes, the hot-standby controller will be used. After the base controller is up again,
* it will be used again. Because of the empty cache after restart, there are no problems with inconsistency.
* The only problem that could occur is, that the base controller is not reachable, but does not restart. After switching
* back to the base controller, there could be an inconsistency in the cache (e.g. if a user has updated an action). This
* inconsistency will be resolved by its own after removing the cached item, 5 minutes after it has been generated.
*
* Uses the Akka routing DSL: http://doc.akka.io/docs/akka-http/current/scala/http/routing-dsl/overview.html
*
* @param config A set of properties needed to run an instance of the controller service
* @param instance if running in scale-out, a unique identifier for this instance in the group
* @param verbosity logging verbosity
* @param executionContext Scala runtime support for concurrent operations
*/
class Controller(val instance: InstanceId,
runtimes: Runtimes,
implicit val whiskConfig: WhiskConfig,
implicit val actorSystem: ActorSystem,
implicit val materializer: ActorMaterializer,
implicit val logging: Logging)
extends BasicRasService {
override val numberOfInstances = whiskConfig.controllerInstances.toInt
override val instanceOrdinal = instance.toInt
TransactionId.controller.mark(
this,
LoggingMarkers.CONTROLLER_STARTUP(instance.toInt),
s"starting controller instance ${instance.toInt}")
/**
* A Route in Akka is technically a function taking a RequestContext as a parameter.
*
* The "~" Akka DSL operator composes two independent Routes, building a routing tree structure.
* @see http://doc.akka.io/docs/akka-http/current/scala/http/routing-dsl/routes.html#composing-routes
*/
override def routes(implicit transid: TransactionId): Route = {
super.routes ~ {
(pathEndOrSingleSlash & get) {
complete(info)
}
} ~ apiV1.routes ~ swagger.swaggerRoutes ~ internalInvokerHealth
}
// initialize datastores
private implicit val authStore = WhiskAuthStore.datastore(whiskConfig)
private implicit val entityStore = WhiskEntityStore.datastore(whiskConfig)
private implicit val activationStore = WhiskActivationStore.datastore(whiskConfig)
private implicit val cacheChangeNotification = Some(new CacheChangeNotification {
val remoteCacheInvalidaton = new RemoteCacheInvalidation(whiskConfig, "controller", instance)
override def apply(k: CacheKey) = {
remoteCacheInvalidaton.invalidateWhiskActionMetaData(k)
remoteCacheInvalidaton.notifyOtherInstancesAboutInvalidation(k)
}
})
// initialize backend services
private implicit val loadBalancer = new LoadBalancerService(whiskConfig, instance, entityStore)
private implicit val entitlementProvider = new LocalEntitlementProvider(whiskConfig, loadBalancer)
private implicit val activationIdFactory = new ActivationIdGenerator {}
private implicit val logStore = SpiLoader.get[LogStoreProvider].logStore(actorSystem)
// register collections
Collection.initialize(entityStore)
/** The REST APIs. */
implicit val controllerInstance = instance
private val apiV1 = new RestAPIVersion(whiskConfig, "api", "v1")
private val swagger = new SwaggerDocs(Uri.Path.Empty, "infoswagger.json")
/**
* Handles GET /invokers URI.
*
* @return JSON of invoker health
*/
private val internalInvokerHealth = {
implicit val executionContext = actorSystem.dispatcher
(path("invokers") & get) {
complete {
loadBalancer.allInvokers.map(_.map {
case (instance, state) => s"invoker${instance.toInt}" -> state.asString
}.toMap.toJson.asJsObject)
}
}
}
// controller top level info
private val info = Controller.info(whiskConfig, runtimes, List(apiV1.basepath()))
}
/**
* Singleton object provides a factory to create and start an instance of the Controller service.
*/
object Controller {
// requiredProperties is a Map whose keys define properties that must be bound to
// a value, and whose values are default values. A null value in the Map means there is
// no default value specified, so it must appear in the properties file
def requiredProperties =
Map(WhiskConfig.controllerInstances -> null) ++
ExecManifest.requiredProperties ++
RestApiCommons.requiredProperties ++
LoadBalancerService.requiredProperties ++
EntitlementProvider.requiredProperties
private def info(config: WhiskConfig, runtimes: Runtimes, apis: List[String]) =
JsObject(
"description" -> "OpenWhisk".toJson,
"support" -> JsObject(
"github" -> "https://github.com/apache/incubator-openwhisk/issues".toJson,
"slack" -> "http://slack.openwhisk.org".toJson),
"api_paths" -> apis.toJson,
"limits" -> JsObject(
"actions_per_minute" -> config.actionInvokePerMinuteLimit.toInt.toJson,
"triggers_per_minute" -> config.triggerFirePerMinuteLimit.toInt.toJson,
"concurrent_actions" -> config.actionInvokeConcurrentLimit.toInt.toJson),
"runtimes" -> runtimes.toJson)
def main(args: Array[String]): Unit = {
Kamon.start()
implicit val actorSystem = ActorSystem("controller-actor-system")
implicit val logger = new AkkaLogging(akka.event.Logging.getLogger(actorSystem, this))
// Prepare Kamon shutdown
CoordinatedShutdown(actorSystem).addTask(CoordinatedShutdown.PhaseActorSystemTerminate, "shutdownKamon") { () =>
logger.info(this, s"Shutting down Kamon with coordinated shutdown")
Kamon.shutdown()
Future.successful(Done)
}
// extract configuration data from the environment
val config = new WhiskConfig(requiredProperties)
val port = config.servicePort.toInt
// if deploying multiple instances (scale out), must pass the instance number as the
require(args.length >= 1, "controller instance required")
val instance = args(0).toInt
def abort(message: String) = {
logger.error(this, message)
actorSystem.terminate()
Await.result(actorSystem.whenTerminated, 30.seconds)
sys.exit(1)
}
if (!config.isValid) {
abort("Bad configuration, cannot start.")
}
val msgProvider = SpiLoader.get[MessagingProvider]
if (!msgProvider.ensureTopic(
config,
"completed" + instance,
Map(
"numPartitions" -> "1",
"replicationFactor" -> config.kafkaReplicationFactor,
"retention.bytes" -> config.kafkaTopicsCompletedRetentionBytes,
"retention.ms" -> config.kafkaTopicsCompletedRetentionMS,
"segment.bytes" -> config.kafkaTopicsCompletedSegmentBytes))) {
abort(s"failure during msgProvider.ensureTopic for topic completed$instance")
}
ExecManifest.initialize(config) match {
case Success(_) =>
val controller = new Controller(
InstanceId(instance),
ExecManifest.runtimesManifest,
config,
actorSystem,
ActorMaterializer.create(actorSystem),
logger)
BasicHttpService.startService(controller.route, port)(actorSystem, controller.materializer)
case Failure(t) =>
abort(s"Invalid runtimes manifest: $t")
}
}
}
|
paulcastro/openwhisk
|
core/controller/src/main/scala/whisk/core/controller/Controller.scala
|
Scala
|
apache-2.0
| 9,919 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.authenticator.pipeline
import silhouette.Fitting._
import silhouette.authenticator._
import silhouette.{ AuthState, Identity, LoginInfo }
import scala.concurrent.{ ExecutionContext, Future }
/**
* An authentication pipeline which reads an authenticator from a source and transforms it to an authentication state.
*
* @param reads The reads which transforms a source into an authenticator.
* @param identityReader The reader to retrieve the [[Identity]] for the [[LoginInfo]] stored in the
* [[silhouette.authenticator.Authenticator]] from the persistence layer.
* @param validators The list of validators to apply to the [[silhouette.authenticator.Authenticator]].
* @tparam S The type of the source.
* @tparam I The type of the identity.
*/
final case class ReadsAuthenticationPipeline[S, I <: Identity](
reads: Reads[S],
override protected val identityReader: LoginInfo => Future[Option[I]],
override protected val validators: Set[Validator] = Set()
)(
implicit
ec: ExecutionContext
) extends AuthenticationPipeline[Option[S], I] {
/**
* Apply the pipeline.
*
* @param source The source to read the authenticator from.
* @return An authentication state.
*/
override def read(source: Option[S]): Future[AuthState[I, Authenticator]] = source.andThenFuture(reads).toState
}
|
mohiva/silhouette
|
modules/authenticator/src/main/scala/silhouette/authenticator/pipeline/ReadsAuthenticationPipeline.scala
|
Scala
|
apache-2.0
| 2,134 |
package com.arcusys.valamis.web.configuration.database
import com.arcusys.valamis.persistence.common.SlickProfile
import com.arcusys.valamis.persistence.impl.slide.SlideTableComponent
import com.arcusys.valamis.slide.model.DeviceEntity
import slick.driver.JdbcProfile
import slick.jdbc.JdbcBackend
class CreateDefaultDevices(val driver: JdbcProfile, db: JdbcBackend#DatabaseDef)
extends SlideTableComponent
with SlickProfile {
import driver.simple._
def create(): Unit = {
val defaultDevices = db.withSession { implicit session =>
devices.firstOption
}
if (defaultDevices.isEmpty) {
val devicesList = Seq(
createDevice("desktop", 1024, 0, 768, 40),
createDevice("tablet", 768, 1023, 1024, 30),
createDevice("phone", 375, 767, 667, 20)
)
db.withTransaction { implicit session =>
devices ++= devicesList
}
}
}
private def createDevice(name: String,
minWidth: Int,
maxWidth: Int,
minHeight: Int,
margin: Int) = {
DeviceEntity(
name = name,
minWidth = minWidth,
maxWidth = maxWidth,
minHeight = minHeight,
margin = margin
)
}
}
|
igor-borisov/valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/configuration/database/CreateDefaultDevices.scala
|
Scala
|
gpl-3.0
| 1,271 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.pattern.orchestration
import scala.collection.generic.CanBuildFrom
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
/** The trait that represents futures like scala.concurrent.Future BUT works in a single threaded environment.
* It does not have blocking calls to the future and a future cannot be created from passing a closure.
* The future is obtained from the corresponding Promise or translated from one or more other Futures.
*
* Asynchronous computations that yield futures are created with the `future` call:
*
* {{{
* val s = "Hello"
* val f: Future[String] = future {
* s + " future!"
* }
* f onSuccess {
* case msg => println(msg)
* }
* }}}
*
* @author Philipp Haller, Heather Miller, Aleksandar Prokopec, Viktor Klang
*
* @define multipleCallbacks
* Multiple callbacks may be registered; there is no guarantee that they will be
* executed in a particular order.
*
* @define caughtThrowables
* The future may contain a throwable object and this means that the future failed.
* Futures obtained through combinators have the same exception as the future they were obtained from.
* The following throwable objects are not contained in the future:
* - `Error` - errors are not contained within futures
* - `InterruptedException` - not contained within futures
* - all `scala.util.control.ControlThrowable` except `NonLocalReturnControl` - not contained within futures
*
* Instead, the future is completed with a ExecutionException with one of the exceptions above
* as the cause.
* If a future is failed with a `scala.runtime.NonLocalReturnControl`,
* it is completed with a value from that throwable instead.
*
* @define nonDeterministic
* Note: using this method yields nondeterministic dataflow programs.
*
* @define forComprehensionExamples
* Example:
*
* {{{
* val f = future { 5 }
* val g = future { 3 }
* val h = for {
* x: Int <- f // returns Future(5)
* y: Int <- g // returns Future(5)
* } yield x + y
* }}}
*
* is translated to:
*
* {{{
* f flatMap { (x: Int) => g map { (y: Int) => x + y } }
* }}}
*
* @define callbackInContext
* The provided callback always runs in the provided implicit
*`ExecutionContext`, though there is no guarantee that the
* `execute()` method on the `ExecutionContext` will be called once
* per callback or that `execute()` will be called in the current
* thread. That is, the implementation may run multiple callbacks
* in a batch within a single `execute()` and it may run
* `execute()` either immediately or asynchronously.
*/
trait OFuture[+T] {
/* Callbacks */
/** When this future is completed successfully (i.e. with a value),
* apply the provided partial function to the value if the partial function
* is defined at that value.
*
* If the future has already been completed with a value,
* this will either be applied immediately or be scheduled asynchronously.
*
* $multipleCallbacks
* $callbackInContext
*/
def onSuccess[U](pf: PartialFunction[T, U]): Unit = onComplete {
case Success(v) if pf isDefinedAt v => pf(v)
case _ =>
}
/** When this future is completed with a failure (i.e. with a throwable),
* apply the provided callback to the throwable.
*
* $caughtThrowables
*
* If the future has already been completed with a failure,
* this will either be applied immediately or be scheduled asynchronously.
*
* Will not be called in case that the future is completed with a value.
*
* $multipleCallbacks
* $callbackInContext
*/
def onFailure[U](callback: PartialFunction[Throwable, U]): Unit = onComplete {
case Failure(t) if NonFatal(t) && callback.isDefinedAt(t) => callback(t)
case _ =>
}
/** When this future is completed, either through an exception, or a value,
* apply the provided function.
*
* If the future has already been completed,
* this will either be applied immediately or be scheduled asynchronously.
*
* $multipleCallbacks
* $callbackInContext
*/
def onComplete[U](func: Try[T] => U): Unit
/* Miscellaneous */
/** Returns whether the future has already been completed with
* a value or an exception.
*
* $nonDeterministic
*
* @return `true` if the future is already completed, `false` otherwise
*/
def isCompleted: Boolean
/** The value of this `Future`.
*
* If the future is not completed the returned value will be `None`.
* If the future is completed the value will be `Some(Success(t))`
* if it contains a valid result, or `Some(Failure(error))` if it contains
* an exception.
*/
def value: Option[Try[T]]
/* Projections */
/** Returns a failed projection of this future.
*
* The failed projection is a future holding a value of type `Throwable`.
*
* It is completed with a value which is the throwable of the original future
* in case the original future is failed.
*
* It is failed with a `NoSuchElementException` if the original future is completed successfully.
*
* Blocking on this future returns a value if the original future is completed with an exception
* and throws a corresponding exception if the original future fails.
*/
def failed: OFuture[Throwable] = {
val p = OPromise[Throwable]()
onComplete {
case Failure(t) => p success t
case Success(v) => p failure new NoSuchElementException("Future.failed not completed with a throwable.")
}
p.future
}
/** Returns the successful projection of this future.
*
* If the future has not been completed, a NoSuchElementException is thrown.
* If the future failed, the exception causing the failure is thrown.
* @return The value of this future, provided it is completed.
*/
def apply(): T = {
value match {
case Some(Failure(t)) => throw t
case Some(Success(v)) => v
case None => throw new NoSuchElementException("Future not completed.")
}
}
/* Monadic operations */
/** Asynchronously processes the value in the future once the value becomes available.
*
* Will not be called if the future fails.
*/
def foreach[U](f: T => U): Unit = onComplete {
case Success(r) => f(r)
case _ => // do nothing
}
/** Creates a new future by applying the 's' function to the successful result of
* this future, or the 'f' function to the failed result. If there is any non-fatal
* exception thrown when 's' or 'f' is applied, that exception will be propagated
* to the resulting future.
*
* @param s function that transforms a successful result of the receiver into a
* successful result of the returned future
* @param f function that transforms a failure of the receiver into a failure of
* the returned future
* @return a future that will be completed with the transformed value
*/
def transform[S](s: T => S, f: Throwable => Throwable): OFuture[S] = {
val p = OPromise[S]()
onComplete {
case result =>
try {
result match {
case Failure(t) => p failure f(t)
case Success(r) => p success s(r)
}
} catch {
case NonFatal(t) => p failure t
}
}
p.future
}
/** Creates a new future by applying a function to the successful result of
* this future. If this future is completed with an exception then the new
* future will also contain this exception.
*
* $forComprehensionExamples
*/
def map[S](f: T => S): OFuture[S] = { // transform(f, identity)
val p = OPromise[S]()
onComplete {
case result =>
try {
result match {
case Success(r) => p success f(r)
case f: Failure[_] => p complete f.asInstanceOf[Failure[S]]
}
} catch {
case NonFatal(t) => p failure t
}
}
p.future
}
/** Creates a new future by applying a function to the successful result of
* this future, and returns the result of the function as the new future.
* If this future is completed with an exception then the new future will
* also contain this exception.
*
* $forComprehensionExamples
*/
def flatMap[S](f: T => OFuture[S]): OFuture[S] = {
val p = OPromise[S]()
onComplete {
case f: Failure[_] => p complete f.asInstanceOf[Failure[S]]
case Success(v) =>
try {
f(v).onComplete({
case f: Failure[_] => p complete f.asInstanceOf[Failure[S]]
case Success(v0) => p success v0
})
} catch {
case NonFatal(t) => p failure t
}
}
p.future
}
/** Creates a new future by filtering the value of the current future with a predicate.
*
* If the current future contains a value which satisfies the predicate, the new future will also hold that value.
* Otherwise, the resulting future will fail with a `NoSuchElementException`.
*
* If the current future fails, then the resulting future also fails.
*
* Example:
* {{{
* val f = future { 5 }
* val g = f filter { _ % 2 == 1 }
* val h = f filter { _ % 2 == 0 }
* Await.result(g, Duration.Zero) // evaluates to 5
* Await.result(h, Duration.Zero) // throw a NoSuchElementException
* }}}
*/
def filter(pred: T => Boolean): OFuture[T] = {
val p = OPromise[T]()
onComplete {
case f: Failure[_] => p complete f.asInstanceOf[Failure[T]]
case Success(v) =>
try {
if (pred(v)) p success v
else p failure new NoSuchElementException("Future.filter predicate is not satisfied")
} catch {
case NonFatal(t) => p failure t
}
}
p.future
}
/** Used by for-comprehensions.
*/
final def withFilter(p: T => Boolean): OFuture[T] = filter(p)
// final def withFilter(p: T => Boolean) = new FutureWithFilter[T](this, p)
// final class FutureWithFilter[+S](self: Future[S], p: S => Boolean) {
// def foreach(f: S => Unit): Unit = self filter p foreach f
// def map[R](f: S => R) = self filter p map f
// def flatMap[R](f: S => Future[R]) = self filter p flatMap f
// def withFilter(q: S => Boolean): FutureWithFilter[S] = new FutureWithFilter[S](self, x => p(x) && q(x))
// }
/** Creates a new future by mapping the value of the current future, if the given partial function is defined at that value.
*
* If the current future contains a value for which the partial function is defined, the new future will also hold that value.
* Otherwise, the resulting future will fail with a `NoSuchElementException`.
*
* If the current future fails, then the resulting future also fails.
*
* Example:
* {{{
* val f = future { -5 }
* val g = f collect {
* case x if x < 0 => -x
* }
* val h = f collect {
* case x if x > 0 => x * 2
* }
* Await.result(g, Duration.Zero) // evaluates to 5
* Await.result(h, Duration.Zero) // throw a NoSuchElementException
* }}}
*/
def collect[S](pf: PartialFunction[T, S]): OFuture[S] = {
val p = OPromise[S]()
onComplete {
case f: Failure[_] => p complete f.asInstanceOf[Failure[S]]
case Success(v) =>
try {
if (pf.isDefinedAt(v)) p success pf(v)
else p failure new NoSuchElementException("Future.collect partial function is not defined at: " + v)
} catch {
case NonFatal(t) => p failure t
}
}
p.future
}
/** Creates a new future that will handle any matching throwable that this
* future might contain. If there is no match, or if this future contains
* a valid result then the new future will contain the same.
*
* Example:
*
* {{{
* future (6 / 0) recover { case e: ArithmeticException => 0 } // result: 0
* future (6 / 0) recover { case e: NotFoundException => 0 } // result: exception
* future (6 / 2) recover { case e: ArithmeticException => 0 } // result: 3
* }}}
*/
def recover[U >: T](pf: PartialFunction[Throwable, U]): OFuture[U] = {
val p = OPromise[U]()
onComplete { case tr => p.complete(tr recover pf) }
p.future
}
/** Creates a new future that will handle any matching throwable that this
* future might contain by assigning it a value of another future.
*
* If there is no match, or if this future contains
* a valid result then the new future will contain the same result.
*
* Example:
*
* {{{
* val f = future { Int.MaxValue }
* future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue
* }}}
*/
def recoverWith[U >: T](pf: PartialFunction[Throwable, OFuture[U]]): OFuture[U] = {
val p = OPromise[U]()
onComplete {
case Failure(t) if pf isDefinedAt t =>
try {
p completeWith pf(t)
} catch {
case NonFatal(t0) => p failure t0
}
case otherwise => p complete otherwise
}
p.future
}
/** Zips the values of `this` and `that` future, and creates
* a new future holding the tuple of their results.
*
* If `this` future fails, the resulting future is failed
* with the throwable stored in `this`.
* Otherwise, if `that` future fails, the resulting future is failed
* with the throwable stored in `that`.
*/
def zip[U](that: OFuture[U]): OFuture[(T, U)] = {
val p = OPromise[(T, U)]()
this onComplete {
case f: Failure[_] => p complete f.asInstanceOf[Failure[(T, U)]]
case Success(r) =>
that onSuccess {
case r2 => p success ((r, r2))
}
that onFailure {
case f => p failure f
}
}
p.future
}
/** Creates a new future which holds the result of this future if it was completed successfully, or, if not,
* the result of the `that` future if `that` is completed successfully.
* If both futures are failed, the resulting future holds the throwable object of the first future.
*
* Using this method will not cause concurrent programs to become nondeterministic.
*
* Example:
* {{{
* val f = future { sys.error("failed") }
* val g = future { 5 }
* val h = f fallbackTo g
* Await.result(h, Duration.Zero) // evaluates to 5
* }}}
*/
def fallbackTo[U >: T](that: OFuture[U]): OFuture[U] = {
val p = OPromise[U]()
onComplete {
case s @ Success(_) => p complete s
case _ => p completeWith that
}
p.future
}
/** Creates a new `Future[S]` which is completed with this `Future`'s result if
* that conforms to `S`'s erased type or a `ClassCastException` otherwise.
*/
def mapTo[S](implicit tag: ClassTag[S]): OFuture[S] = {
def boxedType(c: Class[_]): Class[_] = {
if (c.isPrimitive) OFuture.toBoxed(c) else c
}
val p = OPromise[S]()
onComplete {
case f: Failure[_] => p complete f.asInstanceOf[Failure[S]]
case Success(t) =>
p complete (try {
Success(boxedType(tag.runtimeClass).cast(t).asInstanceOf[S])
} catch {
case e: ClassCastException => Failure(e)
})
}
p.future
}
/** Applies the side-effecting function to the result of this future, and returns
* a new future with the result of this future.
*
* This method allows one to enforce that the callbacks are executed in a
* specified order.
*
* Note that if one of the chained `andThen` callbacks throws
* an exception, that exception is not propagated to the subsequent `andThen`
* callbacks. Instead, the subsequent `andThen` callbacks are given the original
* value of this future.
*
* The following example prints out `5`:
*
* {{{
* val f = future { 5 }
* f andThen {
* case r => sys.error("runtime exception")
* } andThen {
* case Failure(t) => println(t)
* case Success(v) => println(v)
* }
* }}}
*/
def andThen[U](pf: PartialFunction[Try[T], U]): OFuture[T] = {
val p = OPromise[T]()
onComplete {
case r => try if (pf isDefinedAt r) pf(r) finally p complete r
}
p.future
}
/**
* Converts this orchestration future to a scala.concurrent.Future.
* @return A scala.concurrent.Future representing this future.
*/
def toFuture: scala.concurrent.Future[T] = {
import scala.concurrent.{Promise => CPromise}
val cPromise = CPromise[T]()
onComplete {
case Success(v) => cPromise success v
case Failure(t) => cPromise failure t
}
cPromise.future
}
}
/** Future companion object.
*
* @define nonDeterministic
* Note: using this method yields nondeterministic dataflow programs.
*/
object OFuture {
private[orchestration] val toBoxed = Map[Class[_], Class[_]](
classOf[Boolean] -> classOf[java.lang.Boolean],
classOf[Byte] -> classOf[java.lang.Byte],
classOf[Char] -> classOf[java.lang.Character],
classOf[Short] -> classOf[java.lang.Short],
classOf[Int] -> classOf[java.lang.Integer],
classOf[Long] -> classOf[java.lang.Long],
classOf[Float] -> classOf[java.lang.Float],
classOf[Double] -> classOf[java.lang.Double],
classOf[Unit] -> classOf[scala.runtime.BoxedUnit]
)
/** Creates an already completed Future with the specified exception.
*
* @tparam T the type of the value in the future
* @return the newly created `Future` object
*/
def failed[T](exception: Throwable): OFuture[T] = OPromise.failed(exception).future
/** Creates an already completed Future with the specified result.
*
* @tparam T the type of the value in the future
* @return the newly created `Future` object
*/
def successful[T](result: T): OFuture[T] = OPromise.successful(result).future
/** Simple version of `Futures.traverse`. Transforms a `TraversableOnce[Future[A]]` into a `Future[TraversableOnce[A]]`.
* Useful for reducing many `Future`s into a single `Future`.
*/
def sequence[A, M[_] <: TraversableOnce[_]](in: M[OFuture[A]])(implicit cbf: CanBuildFrom[M[OFuture[A]], A, M[A]]): OFuture[M[A]] = {
in.foldLeft(OPromise.successful(cbf(in)).future) {
(fr, fa) => for (r <- fr; a <- fa.asInstanceOf[OFuture[A]]) yield r += a
} map (_.result())
}
/** Returns a `Future` to the result of the first future in the list that is completed.
*/
def firstCompletedOf[T](futures: TraversableOnce[OFuture[T]]): OFuture[T] = {
val p = OPromise[T]()
val completeFirst: Try[T] => Unit = p tryComplete _
futures.foreach(_ onComplete completeFirst)
p.future
}
/** Returns a `Future` that will hold the optional result of the first `Future` with a result that matches the predicate.
*/
def find[T](futurestravonce: TraversableOnce[OFuture[T]])(predicate: T => Boolean): OFuture[Option[T]] = {
val futures = futurestravonce.toBuffer
if (futures.isEmpty) OPromise.successful[Option[T]](None).future
else {
val result = OPromise[Option[T]]()
var ref = futures.size
val search: Try[T] => Unit = v => try {
v match {
case Success(r) => if (predicate(r)) result tryComplete Success(Some(r))
case _ =>
}
} finally {
ref -= 1
if (ref == 0) {
result tryComplete Success(None)
}
}
futures.foreach(_ onComplete search)
result.future
}
}
/** A non-blocking fold over the specified futures, with the start value of the given zero.
* The fold is performed on the thread where the last future is completed,
* the result will be the first failure of any of the futures, or any failure in the actual fold,
* or the result of the fold.
*
* Example:
* {{{
* val result = Await.result(Future.fold(futures)(0)(_ + _), 5 seconds)
* }}}
*/
def fold[T, R](futures: TraversableOnce[OFuture[T]])(zero: R)(foldFun: (R, T) => R): OFuture[R] = {
if (futures.isEmpty) OPromise.successful(zero).future
else sequence(futures).map(_.foldLeft(zero)(foldFun))
}
/** Initiates a fold over the supplied futures where the fold-zero is the result value of the `Future` that's completed first.
*
* Example:
* {{{
* val result = Await.result(Future.reduce(futures)(_ + _), 5 seconds)
* }}}
*/
def reduce[T, R >: T](futures: TraversableOnce[OFuture[T]])(op: (R, T) => R): OFuture[R] = {
if (futures.isEmpty) OPromise[R]().failure(new NoSuchElementException("reduce attempted on empty collection")).future
else sequence(futures).map(_ reduceLeft op)
}
/** Transforms a `TraversableOnce[A]` into a `Future[TraversableOnce[B]]` using the provided function `A => Future[B]`.
* This is useful for performing a parallel map. For example, to apply a function to all items of a list
* in parallel:
*
* {{{
* val myFutureList = Future.traverse(myList)(x => Future(myFunc(x)))
* }}}
*/
def traverse[A, B, M[_] <: TraversableOnce[_]](in: M[A])(fn: A => OFuture[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]]): OFuture[M[B]] =
in.foldLeft(OPromise.successful(cbf(in)).future) { (fr, a) =>
val fb = fn(a.asInstanceOf[A])
for (r <- fr; b <- fb) yield r += b
}.map(_.result())
}
/** A marker indicating that a `java.lang.Runnable` provided to `scala.concurrent.ExecutionContext`
* wraps a callback provided to `Future.onComplete`.
* All callbacks provided to a `Future` end up going through `onComplete`, so this allows an
* `ExecutionContext` to special-case callbacks that were executed by `Future` if desired.
*/
trait OnCompleteRunnable {
self: Runnable =>
}
|
keshin/squbs
|
squbs-pattern/src/main/scala/org/squbs/pattern/orchestration/OFuture.scala
|
Scala
|
apache-2.0
| 22,714 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package entities.characters.stimuli
import com.anathema_roguelike.entities.characters.perks.actions.targetingstrategies.shapes.Circle
import com.anathema_roguelike.entities.characters.player.Player
import com.anathema_roguelike.environment.Location
import com.anathema_roguelike.main.display.Display.DisplayLayer
import com.anathema_roguelike.main.display.{Color, VisualRepresentation}
import com.anathema_roguelike.main.display.animations.Ripple
import com.anathema_roguelike.main.ui.UIConfig
import com.anathema_roguelike.stats.characterstats.secondarystats.detection.senses.Hearing
import com.anathema_roguelike.entities.characters.Character
class Sound(magnitude: Double, owner: Option[Character] = None) extends Stimulus(magnitude, owner) {
def computePerceivedStimulus(location: Location, character: Character): Option[PerceivedStimulus] = {
(owner, character) match {
case (Some(_), _: Player) => new Ripple(location, getMagnitude / 5, 0.2f).create(DisplayLayer.DUNGEON_OVERLAY, UIConfig.DUNGEON_OFFSET)
case _ =>
}
//TODO determine how to handle sound being stopped/reduced by walls etc
if (location.distance(character) > getMagnitude / 5) {
Option.empty
} else {
val hearing = character.getStatAmount[Hearing]
val perceivedSound = getMagnitude * (4 * hearing) / (hearing + 10)
perceivedSound match {
case _ if perceivedSound >= 100 => new PerceivedStimulus(location, this, perceivedSound)
case _ if perceivedSound >= 50 => {
val percievedLocation = location.getEnvironment.getLocation(new Circle(location, () => 2.0).getRandomPassablePoint(location.getEnvironment))
new PerceivedStimulus(percievedLocation, this, perceivedSound)
}
case _ if perceivedSound >= 25 => {
val percievedLocation = location.getEnvironment.getLocation(new Circle(location, () => 3.0).getRandomPassablePoint(location.getEnvironment))
new PerceivedStimulus(percievedLocation, this, perceivedSound)
}
case _ => Option.empty
}
}
}
override def getVisualRepresentation = new VisualRepresentation('!', Color.WHITE)
}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/entities/characters/stimuli/Sound.scala
|
Scala
|
gpl-3.0
| 3,041 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package integration.interpreter
import java.io.{OutputStream, ByteArrayOutputStream}
import java.util.UUID
import com.ibm.spark.interpreter._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{BeforeAndAfter, FunSpec, Matchers}
import com.ibm.spark.global.StreamState
class AddExternalJarMagicSpecForIntegration
extends FunSpec with Matchers with MockitoSugar with BeforeAndAfter
{
private val outputResult = new ByteArrayOutputStream()
private var interpreter: Interpreter = _
before {
interpreter = new ScalaInterpreter(Nil, mock[OutputStream])
with StandardSparkIMainProducer
with StandardTaskManagerProducer
with StandardSettingsProducer
interpreter.start()
StreamState.setStreams(outputStream = outputResult)
}
after {
outputResult.reset()
}
describe("ScalaInterpreter") {
describe("#addJars") {
it("should be able to load an external jar") {
val testJarUrl = this.getClass.getClassLoader.getResource("TestJar.jar")
//
// NOTE: This can be done with any jar. I have tested it previously by
// downloading jgoodies, placing it in /tmp/... and loading it.
//
// Should fail since jar was not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
// Add jar to paths
interpreter.addJars(testJarUrl)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\\n")
}
it("should support Scala jars") {
val testJarUrl = this.getClass.getClassLoader.getResource("ScalaTestJar.jar")
// Should fail since jar was not added to paths
interpreter.interpret(
"import com.ibm.scalatestjar.TestClass")._1 should be (Results.Error)
// Add jar to paths
interpreter.addJars(testJarUrl)
// Should now succeed
interpreter.interpret(
"import com.ibm.scalatestjar.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new TestClass().runMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("You ran me!\\n")
}
it("should be able to add multiple jars at once") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// val interpreter = new ScalaInterpreter(List(), mock[OutputStream])
// with StandardSparkIMainProducer
// with StandardTaskManagerProducer
// with StandardSettingsProducer
// interpreter.start()
// Should fail since jars were not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Error)
// Add jars to paths
interpreter.addJars(testJar1Url, testJar2Url)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new com.ibm.testjar.TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\\n")
outputResult.reset()
interpreter.interpret(
"""println(new com.ibm.testjar2.TestClass().CallMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("3\\n")
}
it("should be able to add multiple jars in consecutive calls to addjar") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// val interpreter = new ScalaInterpreter(List(), mock[OutputStream])
// with StandardSparkIMainProducer
// with StandardTaskManagerProducer
// with StandardSettingsProducer
// interpreter.start()
// Should fail since jars were not added to paths
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Error)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Error)
// Add jars to paths
interpreter.addJars(testJar1Url)
interpreter.addJars(testJar2Url)
// Should now succeed
interpreter.interpret(
"import com.ibm.testjar.TestClass")._1 should be (Results.Success)
interpreter.interpret(
"import com.ibm.testjar2.TestClass")._1 should be (Results.Success)
// Should now run
interpreter.interpret(
"""println(new com.ibm.testjar.TestClass().sayHello("Chip"))"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("Hello, Chip\\n")
outputResult.reset()
interpreter.interpret(
"""println(new com.ibm.testjar2.TestClass().CallMe())"""
) should be ((Results.Success, Left("")))
outputResult.toString should be ("3\\n")
}
it("should not have issues with previous variables") {
val testJar1Url =
this.getClass.getClassLoader.getResource("TestJar.jar")
val testJar2Url =
this.getClass.getClassLoader.getResource("TestJar2.jar")
// Add a jar, which reinitializes the symbols
interpreter.addJars(testJar1Url)
interpreter.interpret(
"""
|val t = new com.ibm.testjar.TestClass()
""".stripMargin)._1 should be (Results.Success)
// Add a second jar, which reinitializes the symbols and breaks the
// above variable
interpreter.addJars(testJar2Url)
interpreter.interpret(
"""
|def runMe(testClass: com.ibm.testjar.TestClass) =
|testClass.sayHello("Hello")
""".stripMargin)._1 should be (Results.Success)
// This line should NOT explode if variable is rebound correctly
// otherwise you get the error of
//
// Message: <console>:16: error: type mismatch;
// found : com.ibm.testjar.com.ibm.testjar.com.ibm.testjar.com.ibm.
// testjar.com.ibm.testjar.TestClass
// required: com.ibm.testjar.com.ibm.testjar.com.ibm.testjar.com.ibm.
// testjar.com.ibm.testjar.TestClass
// runMe(t)
// ^
interpreter.interpret(
"""
|runMe(t)
""".stripMargin)._1 should be (Results.Success)
}
}
}
}
|
codeaudit/spark-kernel
|
kernel-api/src/test/scala/integration/interpreter/AddExternalJarMagicSpecForIntegration.scala
|
Scala
|
apache-2.0
| 7,771 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.aws
trait AdpAction extends AdpDataPipelineObject
/**
* An action to trigger the cancellation of a pending or unfinished activity, resource,
* or data node. AWS Data Pipeline attempts to put the activity, resource, or data node
* into the CANCELLED state if it does not finish by the lateAfterTimeout value.
*/
class AdpTerminate extends AdpAction {
val id = "TerminateAction"
val name = Option("TerminateTasks")
val `type` = "Terminate"
}
/**
* Sends an Amazon SNS notification message when an activity fails or finishes successfully.
*
* @param message The body text of the Amazon SNS notification. String Yes
* @param role The IAM role to use to create the Amazon SNS alarm. String Yes
* @param subject The subject line of the Amazon SNS notification message. String Yes
* @param topicArn The destination Amazon SNS topic ARN for the message. String Yes
*/
case class AdpSnsAlarm (
id: String,
name: Option[String],
subject: String,
message: String,
topicArn: String,
role: String
) extends AdpAction {
val `type` = "SnsAlarm"
}
|
realstraw/hyperion
|
core/src/main/scala/com/krux/hyperion/aws/AdpActions.scala
|
Scala
|
bsd-3-clause
| 1,330 |
/*
* Copyright (C) 2007-2008 Artima, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Example code from:
*
* Programming in Scala (First Edition, Version 6)
* by Martin Odersky, Lex Spoon, Bill Venners
*
* http://booksites.artima.com/programming_in_scala
*/
abstract class Food(val name: String) {
override def toString = name
}
|
peachyy/scalastu
|
modules/Food.scala
|
Scala
|
apache-2.0
| 884 |
package library.network.utils
import java.net.Socket
import library.network.rpcprotocol.LibraryClientRPCWorker
import library.services.ILibraryServer
class LibraryRPCConcurrentServer(val port: Int, val libraryServer: ILibraryServer)
extends AbstractConcurrentServer(port)
{
println("LibraryRPCConcurrentServer")
protected def createWorker(client: Socket): Thread =
{
val worker: LibraryClientRPCWorker = new LibraryClientRPCWorker(libraryServer, client)
new Thread(worker)
}
}
|
leyyin/university
|
systems-for-design-and-implementation/labs/lab3/LibraryNetworking/src/library/network/utils/LibraryRPCConcurrentServer.scala
|
Scala
|
mit
| 517 |
package views.html.search
import play.api.data.Form
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import lila.common.paginator.Paginator
import controllers.routes
object index {
import trans.search._
def apply(form: Form[_], paginator: Option[Paginator[lila.game.Game]] = None, nbGames: Long)(implicit
ctx: Context
) = {
val commons = bits of form
import commons._
views.html.base.layout(
title = searchInXGames.txt(nbGames.localize, nbGames),
moreJs = frag(
jsModule("gameSearch"),
infiniteScrollTag
),
moreCss = cssTag("search")
) {
main(cls := "box page-small search")(
h1(advancedSearch()),
st.form(
noFollow,
cls := "box__pad search__form",
action := s"${routes.Search.index()}#results",
method := "GET"
)(dataReqs)(
globalError(form),
table(
tr(
th(label(trans.players())),
td(cls := "usernames two-columns")(List("a", "b").map { p =>
div(form3.input(form("players")(p))(tpe := "text"))
})
),
colors(hide = true),
winner(hide = true),
loser(hide = true),
rating,
hasAi,
aiLevel,
source,
perf,
mode,
turns,
duration,
clockTime,
clockIncrement,
status,
winnerColor,
date,
sort,
analysed,
tr(
th,
td(cls := "action")(
submitButton(cls := "button")(trans.search.search()),
div(cls := "wait")(
spinner,
searchInXGames(nbGames.localize)
)
)
)
)
),
div(cls := "search__result", id := "results")(
paginator.map { pager =>
val permalink =
a(cls := "permalink", href := routes.Search.index(), noFollow)("Permalink")
if (pager.nbResults > 0)
frag(
div(cls := "search__status box__pad")(
strong(xGamesFound(pager.nbResults.localize, pager.nbResults)),
" • ",
permalink
),
div(cls := "search__rows infinite-scroll")(
views.html.game.widgets(pager.currentPageResults),
pagerNext(pager, np => routes.Search.index(np).url)
)
)
else
div(cls := "search__status box__pad")(
strong(xGamesFound(0)),
" • ",
permalink
)
}
)
)
}
}
}
|
luanlv/lila
|
app/views/search/index.scala
|
Scala
|
mit
| 2,864 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.geotools.sft
import java.util.regex.Pattern
import java.util.{Date, UUID}
import org.apache.commons.text.StringEscapeUtils
import org.geotools.feature.AttributeTypeBuilder
import org.locationtech.geomesa.utils.geotools.sft.SimpleFeatureSpec.AttributeSpec
import org.locationtech.jts.geom._
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.SimpleFeatureType
/**
* Intermediate format for simple feature types. Used for converting between string specs,
* typesafe config specs and `SimpleFeatureType`s
*
* @param attributes attributes
* @param options simple feature level optinos
*/
case class SimpleFeatureSpec(attributes: Seq[AttributeSpec], options: Map[String, AnyRef])
object SimpleFeatureSpec {
/**
* Intermediate format for attribute descriptors
*/
sealed trait AttributeSpec {
/**
* Attribute name
*
* @return name
*/
def name: String
/**
* Type binding
*
* @return class binding
*/
def clazz: Class[_]
/**
* Attribute level options - all options are stored as strings for simplicity.
* @see `RichAttributeDescriptors` for conversions.
*
* @return attribute level options
*/
def options: Map[String, String]
/**
* Convert to a spec string
*
* @return a partial spec string
*/
def toSpec: String = {
val opts = specOptions.map { case (k, v) =>
if (simpleOptionPattern.matcher(v).matches()) {
s":$k=$v"
} else {
s":$k='${StringEscapeUtils.escapeJava(v)}'"
}
}
s"$name:$getClassSpec${opts.mkString}"
}
/**
* Convert to a typesafe config map
*
* @return a spec map
*/
def toConfigMap: Map[String, String] = Map("name" -> name, "type" -> getClassSpec) ++ configOptions
/**
* Convert to an attribute descriptor
*
* @return a descriptor
*/
def toDescriptor: AttributeDescriptor = {
val builder = new AttributeTypeBuilder().binding(clazz)
descriptorOptions.foreach { case (k, v) => builder.userData(k, v) }
builderHook(builder)
builder.buildDescriptor(name)
}
/**
* Gets class binding as a spec string
*
* @return class part of spec string
*/
protected def getClassSpec: String = typeEncode(clazz)
/**
* Options encoded in the spec string
*
* @return options to include in the spec string conversion
*/
protected def specOptions: Map[String, String] = options
/**
* Options encoded in the config map
*
* @return options to include in the config map conversion
*/
protected def configOptions: Map[String, String] = options
/**
* Options set in the attribute descriptor
*
* @return options to include in the descriptor conversion
*/
protected def descriptorOptions: Map[String, String] = options
/**
* Hook for modifying attribute descriptor
*
* @param builder attribute desctiptor builder
*/
protected def builderHook(builder: AttributeTypeBuilder): Unit = {}
}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeOptions.{OptDefault, OptIndex, OptSrid}
private val simpleOptionPattern = Pattern.compile("[a-zA-Z0-9_]+")
def attribute(sft: SimpleFeatureType, descriptor: AttributeDescriptor): AttributeSpec = {
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import scala.collection.JavaConversions._
val name = descriptor.getLocalName
val binding = descriptor.getType.getBinding
val options = descriptor.getUserData.map { case (k, v) => k.toString -> v.toString }.toMap
if (simpleTypeMap.contains(binding.getSimpleName)) {
SimpleAttributeSpec(name, binding, options)
} else if (geometryTypeMap.contains(binding.getSimpleName)) {
val opts = if (sft != null && sft.getGeometryDescriptor == descriptor) { options + (OptDefault -> "true") } else { options }
GeomAttributeSpec(name, binding, opts)
} else if (classOf[java.util.List[_]].isAssignableFrom(binding)) {
val itemClass = Option(descriptor.getListType()).getOrElse(classOf[String])
ListAttributeSpec(name, itemClass, options)
} else if (classOf[java.util.Map[_, _]].isAssignableFrom(binding)) {
val (keyBinding, valueBinding) = descriptor.getMapTypes()
val keyClass = Option(keyBinding).getOrElse(classOf[String])
val valueClass = Option(valueBinding).getOrElse(classOf[String])
MapAttributeSpec(name, keyClass, valueClass, options)
} else {
throw new IllegalArgumentException(s"Unknown type binding $binding")
}
}
/**
* Simple attribute
*/
case class SimpleAttributeSpec(name: String, clazz: Class[_], options: Map[String, String]) extends AttributeSpec
/**
* Geometry attribute
*/
case class GeomAttributeSpec(name: String, clazz: Class[_], options: Map[String, String]) extends AttributeSpec {
private val default = options.get(OptDefault).exists(_.toBoolean)
override def toSpec: String = if (default) { s"*${super.toSpec}" } else { super.toSpec }
override def builderHook(builder: AttributeTypeBuilder): Unit = {
require(!options.get(OptSrid).exists(_.toInt != 4326),
s"Invalid SRID '${options(OptSrid)}'. Only 4326 is supported.")
builder.crs(org.locationtech.geomesa.utils.geotools.CRS_EPSG_4326)
}
// default geoms are indicated by the *
// we don't allow attribute indexing for geometries
override protected def specOptions: Map[String, String] = options - OptDefault - OptIndex
override protected def configOptions: Map[String, String] = options - OptIndex
override protected def descriptorOptions: Map[String, String] = options - OptIndex
}
/**
* List attribute
*/
case class ListAttributeSpec(name: String, subClass: Class[_], options: Map[String, String]) extends AttributeSpec {
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeConfigs.UserDataListType
override val clazz: Class[java.util.List[_]] = classOf[java.util.List[_]]
override val getClassSpec = s"List[${typeEncode(subClass)}]"
override protected def specOptions: Map[String, String] = options - UserDataListType
override protected def descriptorOptions: Map[String, String] = options + (UserDataListType -> subClass.getName)
}
/**
* Map attribute
*/
case class MapAttributeSpec(name: String, keyClass: Class[_], valueClass: Class[_], options: Map[String, String])
extends AttributeSpec {
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeConfigs._
override val clazz: Class[java.util.Map[_, _]] = classOf[java.util.Map[_, _]]
override val getClassSpec = s"Map[${typeEncode(keyClass)},${typeEncode(valueClass)}]"
override protected def specOptions: Map[String, String] =
options - UserDataMapValueType - UserDataMapKeyType
override protected def descriptorOptions: Map[String, String] =
options + (UserDataMapKeyType -> keyClass.getName) + (UserDataMapValueType -> valueClass.getName)
}
private val typeEncode: Map[Class[_], String] = Map(
classOf[java.lang.String] -> "String",
classOf[java.lang.Integer] -> "Integer",
classOf[java.lang.Double] -> "Double",
classOf[java.lang.Long] -> "Long",
classOf[java.lang.Float] -> "Float",
classOf[java.lang.Boolean] -> "Boolean",
classOf[UUID] -> "UUID",
classOf[Geometry] -> "Geometry",
classOf[Point] -> "Point",
classOf[LineString] -> "LineString",
classOf[Polygon] -> "Polygon",
classOf[MultiPoint] -> "MultiPoint",
classOf[MultiLineString] -> "MultiLineString",
classOf[MultiPolygon] -> "MultiPolygon",
classOf[GeometryCollection] -> "GeometryCollection",
classOf[Date] -> "Date",
classOf[java.sql.Date] -> "Date",
classOf[java.sql.Timestamp] -> "Timestamp",
classOf[java.util.List[_]] -> "List",
classOf[java.util.Map[_, _]] -> "Map",
classOf[Array[Byte]] -> "Bytes"
)
private [sft] val simpleTypeMap = Map(
"String" -> classOf[java.lang.String],
"java.lang.String" -> classOf[java.lang.String],
"string" -> classOf[java.lang.String],
"Integer" -> classOf[java.lang.Integer],
"java.lang.Integer" -> classOf[java.lang.Integer],
"int" -> classOf[java.lang.Integer],
"Int" -> classOf[java.lang.Integer],
"0" -> classOf[java.lang.Integer],
"Long" -> classOf[java.lang.Long],
"java.lang.Long" -> classOf[java.lang.Long],
"long" -> classOf[java.lang.Long],
"Double" -> classOf[java.lang.Double],
"java.lang.Double" -> classOf[java.lang.Double],
"double" -> classOf[java.lang.Double],
"0.0" -> classOf[java.lang.Double],
"Float" -> classOf[java.lang.Float],
"java.lang.Float" -> classOf[java.lang.Float],
"float" -> classOf[java.lang.Float],
"0.0f" -> classOf[java.lang.Float],
"Boolean" -> classOf[java.lang.Boolean],
"boolean" -> classOf[java.lang.Boolean],
"bool" -> classOf[java.lang.Boolean],
"java.lang.Boolean" -> classOf[java.lang.Boolean],
"true" -> classOf[java.lang.Boolean],
"false" -> classOf[java.lang.Boolean],
"UUID" -> classOf[UUID],
"Date" -> classOf[Date],
"Timestamp" -> classOf[java.sql.Timestamp],
"byte[]" -> classOf[Array[Byte]],
"Bytes" -> classOf[Array[Byte]]
)
private [sft] val geometryTypeMap = Map(
"Geometry" -> classOf[Geometry],
"Point" -> classOf[Point],
"LineString" -> classOf[LineString],
"Polygon" -> classOf[Polygon],
"MultiPoint" -> classOf[MultiPoint],
"MultiLineString" -> classOf[MultiLineString],
"MultiPolygon" -> classOf[MultiPolygon],
"GeometryCollection" -> classOf[GeometryCollection]
)
private [sft] val listTypeMap = Map(
"list" -> classOf[java.util.List[_]],
"List" -> classOf[java.util.List[_]],
"java.util.List" -> classOf[java.util.List[_]])
private [sft] val mapTypeMap = Map(
"map" -> classOf[java.util.Map[_, _]],
"Map" -> classOf[java.util.Map[_, _]],
"java.util.Map" -> classOf[java.util.Map[_, _]])
}
|
aheyne/geomesa
|
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/sft/SimpleFeatureSpec.scala
|
Scala
|
apache-2.0
| 11,298 |
/*
* Generation.scala
* (Muta)
*
* Copyright (c) 2013-2014 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Lesser General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.muta
trait Generation[Chromosome, Global] extends (util.Random => Chromosome) {
def size: Int
def global: Global
def seed: Int
}
|
Sciss/Muta
|
src/main/scala/de/sciss/muta/Generation.scala
|
Scala
|
lgpl-3.0
| 434 |
object Signature {
enum MatchDegree {
case NoMatch, ParamMatch, FullMatch
}
export MatchDegree._
// Check that exported values have singeleton types
val x: MatchDegree.NoMatch.type = NoMatch
// Check that the following two methods are not exported.
// Exporting them would lead to a double definition.
def values: Array[MatchDegree] = ???
def valueOf($name: String): MatchDegree = ???
}
|
som-snytt/dotty
|
tests/pos/export-enum.scala
|
Scala
|
apache-2.0
| 412 |
package net.janvsmachine.fpinscala.parsing
sealed trait Json
object Json {
case object JNull extends Json
case class JNumber(get: Double) extends Json
case class JString(get: String) extends Json
case class JBool(get: Boolean) extends Json
case class JArray(get: IndexedSeq[Json]) extends Json
case class JObject(get: Map[String, Json]) extends Json
def jsonParser[Err, Parser[+ _]](P: Parsers[Err, Parser]): Parser[Json] = {
import P._
val spaces: Parser[String] = char(' ').many.slice
val name: Parser[String] = regex("\\\\w".r)
val quote: Parser[Char] = char('"')
val identifier: Parser[String] =
(quote ** name ** quote)
.map { case ((_, id), _) => id }
val numberLiteral: Parser[JNumber] =
(regex("\\\\d+".r) ** regex("\\\\.\\\\d+".r).optionalOr(""))
.map { case (first, last) => JNumber(s"$first.$last".toDouble) }
val stringLiteral: Parser[JString] =
(quote ** regex("^[\\"]".r) ** quote)
.map { case ((_, id), _) => JString(id) }
val booleanLiteral: Parser[JBool] =
(string("true") | string("True") | string("false") | string("False"))
.map(b => JBool(b.toBoolean))
lazy val literal: Parser[Json] = numberLiteral | stringLiteral | booleanLiteral | array | json
lazy val literalAndSeparator: Parser[Json] = (literal ** char(','))
.map { case (l, _) => l }
lazy val arrayMembers: Parser[List[Json]] =
(many(literalAndSeparator) ** literal)
.map { case (ls, l) => ls ++ List(l) }
lazy val array: Parser[JArray] =
middle(char('[') ** (arrayMembers | succeed(List.empty)) ** char(']'))
.map(ls => JArray(ls.toVector))
lazy val objectKeyValue: Parser[(String, Json)] = ???
lazy val objectKeyValueAndSeparator: Parser[(String, Json)] = ???
lazy val objectMembers: Parser[List[(String, Json)]] = ???
many(objectKeyValueAndSeparator) or optionalOr(objectKeyValue, List.empty) // <<--
lazy val jsonObject: Parser[JObject] =
middle(char('{') ** objectMembers.map(members => JObject(members.toMap)) ** char('}'))
lazy val json: Parser[Json] =
array | jsonObject
???
}
}
|
stettix/fp-in-scala
|
src/main/scala/net/janvsmachine/fpinscala/parsing/Json.scala
|
Scala
|
apache-2.0
| 2,172 |
import sbt._
object Versions {
val scalaVersion = "2.11.6"
val ideaVersion = "143.379.1"
val sbtStructureVersion = "4.2.1"
val luceneVersion = "4.8.1"
val aetherVersion = "1.0.0.v20140518"
val sisuInjectVersion = "2.2.3"
val wagonVersion = "2.6"
val httpComponentsVersion = "4.3.1"
}
object Dependencies {
import Versions._
val sbtStructureExtractor012 = "org.jetbrains" % "sbt-structure-extractor-0-12" % sbtStructureVersion
val sbtStructureExtractor013 = "org.jetbrains" % "sbt-structure-extractor-0-13" % sbtStructureVersion
val sbtLaunch = "org.scala-sbt" % "sbt-launch" % "0.13.8"
val jamm = "com.github.jbellis" % "jamm" % "0.3.1"
val scalaLibrary = "org.scala-lang" % "scala-library" % scalaVersion
val scalaReflect = "org.scala-lang" % "scala-reflect" % scalaVersion
val scalaCompiler = "org.scala-lang" % "scala-compiler" % scalaVersion
val scalaXml = "org.scala-lang.modules" %% "scala-xml" % "1.0.2"
val scalaParserCombinators = "org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.4"
val sbtStructureCore = "org.jetbrains" % "sbt-structure-core_2.11" % sbtStructureVersion
val evoInflector = "org.atteo" % "evo-inflector" % "1.2"
val scalatestFindersPatched = "org.scalatest" % "scalatest-finders-patched" % "0.9.8"
val plexusContainerDefault = "org.codehaus.plexus" % "plexus-container-default" % "1.5.5"
val plexusClassworlds = "org.codehaus.plexus" % "plexus-classworlds" % "2.4"
val plexusUtils = "org.codehaus.plexus" % "plexus-utils" % "3.0.8"
val plexusComponentAnnotations = "org.codehaus.plexus" % "plexus-component-annotations" % "1.5.5"
val xbeanReflect = "org.apache.xbean" % "xbean-reflect" % "3.4"
val luceneCore = "org.apache.lucene" % "lucene-core" % luceneVersion
val luceneHighlighter = "org.apache.lucene" % "lucene-highlighter" % luceneVersion
val luceneMemory = "org.apache.lucene" % "lucene-memory" % luceneVersion
val luceneQueries = "org.apache.lucene" % "lucene-queries" % luceneVersion
val luceneQueryParser = "org.apache.lucene" % "lucene-queryparser" % luceneVersion
val luceneAnalyzers = "org.apache.lucene" % "lucene-analyzers-common" % luceneVersion
val luceneSandbox = "org.apache.lucene" % "lucene-sandbox" % luceneVersion
val aetherApi = "org.eclipse.aether" % "aether-api" % aetherVersion
val aetherUtil = "org.eclipse.aether" % "aether-util" % aetherVersion
val sisuInjectPlexus = "org.sonatype.sisu" % "sisu-inject-plexus" % sisuInjectVersion
val sisuInjectBean = "org.sonatype.sisu" % "sisu-inject-bean" % sisuInjectVersion
val sisuGuice = "org.sonatype.sisu" % "sisu-guice" % "3.0.3"
val wagonHttp = "org.apache.maven.wagon" % "wagon-http" % wagonVersion
val wagonHttpShared = "org.apache.maven.wagon" % "wagon-http-shared" % wagonVersion
val wagonProviderApi = "org.apache.maven.wagon" % "wagon-provider-api" % wagonVersion
val httpClient = "org.apache.httpcomponents" % "httpclient" % httpComponentsVersion
val httpCore = "org.apache.httpcomponents" % "httpcore" % httpComponentsVersion
val commonsLogging = "commons-logging" % "commons-logging" % "1.1.3"
val commonsCodec = "commons-codec" % "commons-codec" % "1.6"
val commonsLang = "commons-lang" % "commons-lang" % "2.6"
val commonsIo = "commons-io" % "commons-io" % "2.2"
val jsoup = "org.jsoup" % "jsoup" % "1.7.2"
val mavenIndexerCore = "org.apache.maven.indexer" % "indexer-core" % "6.0"
val mavenModel = "org.apache.maven" % "maven-model" % "3.0.5"
val junitInterface = "com.novocode" % "junit-interface" % "0.11" % "test"
val scalastyle_2_11 = "org.scalastyle" % "scalastyle_2.11" % "0.7.0"
val scalariform_2_11 = "org.scalariform" % "scalariform_2.11" % "0.1.7"
val macroParadise = "org.scalamacros" % "paradise" % "2.1.0-M5" cross CrossVersion.full
}
object DependencyGroups {
import Dependencies._
val plexusContainer = Seq(
plexusContainerDefault,
plexusClassworlds,
plexusUtils,
plexusComponentAnnotations,
xbeanReflect
)
val lucene = Seq(
luceneCore,
luceneHighlighter,
luceneMemory,
luceneQueries,
luceneQueryParser,
luceneAnalyzers,
luceneSandbox
)
val aether = Seq(
aetherApi,
aetherUtil
)
val sisu = Seq(
sisuInjectPlexus,
sisuInjectBean,
sisuGuice
)
val wagon = Seq(
wagonHttp,
wagonHttpShared,
wagonProviderApi,
httpClient,
httpCore,
commonsCodec,
commonsLogging,
commonsLang,
commonsIo,
jsoup
)
val scalastyle = Seq(
scalastyle_2_11,
scalariform_2_11
)
val mavenIndexer = Seq(
mavenIndexerCore,
mavenModel
) ++ plexusContainer ++ lucene ++ aether ++ sisu ++ wagon
val scalaCommunity = Seq(
scalaLibrary,
scalaReflect,
scalaXml,
scalaParserCombinators,
sbtStructureCore,
evoInflector,
scalatestFindersPatched,
jamm
) ++ mavenIndexer ++ scalastyle
val scalap = Seq(
scalaLibrary,
scalaReflect,
scalaCompiler
)
val scalaRunner = Seq(
"org.specs2" %% "specs2" % "2.3.11" % "provided" excludeAll ExclusionRule(organization = "org.ow2.asm")
)
val runners = Seq(
"org.specs2" %% "specs2" % "2.3.11" % "provided" excludeAll ExclusionRule(organization = "org.ow2.asm"),
"org.scalatest" % "scalatest_2.11" % "2.2.1" % "provided",
"com.lihaoyi" %% "utest" % "0.1.3" % "provided"
)
val sbtLaunchTestDownloader =
Seq("0.12.4", "0.13.0", "0.13.1", "0.13.2",
"0.13.5", "0.13.6", "0.13.7", "0.13.8",
"0.13.9")
.map(v => "org.scala-sbt" % "sbt-launch" % v)
val testDownloader = Seq(
"org.scalatest" % "scalatest_2.11" % "2.2.1",
"org.scalatest" % "scalatest_2.10" % "2.2.1",
"org.specs2" % "specs2_2.11" % "2.4.15",
"org.scalaz" % "scalaz-core_2.11" % "7.1.0",
"org.scalaz" % "scalaz-concurrent_2.11" % "7.1.0",
"org.scala-lang.modules" % "scala-xml_2.11" % "1.0.2",
"org.specs2" % "specs2_2.10" % "2.4.6",
"org.scalaz" % "scalaz-core_2.10" % "7.1.0",
"org.scalaz" % "scalaz-concurrent_2.10" % "7.1.0",
"org.scalaz.stream" % "scalaz-stream_2.11" % "0.6a",
"com.chuusai" % "shapeless_2.11" % "2.0.0",
"org.typelevel" % "scodec-bits_2.11" % "1.1.0-SNAPSHOT",
"org.typelevel" % "scodec-core_2.11" % "1.7.0-SNAPSHOT",
"org.scalatest" % "scalatest_2.11" % "2.1.7",
"org.scalatest" % "scalatest_2.10" % "2.1.7",
"org.scalatest" % "scalatest_2.10" % "1.9.2",
"com.github.julien-truffaut" %% "monocle-core" % "1.2.0-SNAPSHOT",
"com.github.julien-truffaut" %% "monocle-generic" % "1.2.0-SNAPSHOT",
"com.github.julien-truffaut" %% "monocle-macro" % "1.2.0-SNAPSHOT",
"io.spray" %% "spray-routing" % "1.3.1"
)
val sbtRuntime = Seq(
sbtStructureExtractor012,
sbtStructureExtractor013,
sbtLaunch
)
}
|
LPTK/intellij-scala
|
project/dependencies.scala
|
Scala
|
apache-2.0
| 6,811 |
/**
* Created by Variant on 16/3/16.
*/
//包对象
package object people{
val defaultName = "Scala"
}
//包中可以引用包对象定义的成员变量
package people{
class people{
val name = defaultName
}
}
object PackageOps {
}
//特殊引入写法
import java.util.{HashMap => JavaHashMap}
//不用scala中的Stringbuilder类
import scala.{StringBuilder => _}
//默认引入
//java.lang._ scala._ , scala.predef._
|
sparkLiwei/ProgrammingNote
|
scalaLearning/scalaOOP/PackageOps.scala
|
Scala
|
cc0-1.0
| 436 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.projections
import org.bdgenomics.formats.avro.Variant
object VariantField extends FieldEnumeration(Variant.SCHEMA$) {
val contig, start, end, referenceAllele, variantAllele = SchemaValue
}
|
erictu/adam
|
adam-core/src/main/scala/org/bdgenomics/adam/projections/VariantField.scala
|
Scala
|
apache-2.0
| 1,017 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.{Locale, TimeZone}
import org.scalatest.Assertions
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.{SparkEnv, SparkException}
import org.apache.spark.rdd.BlockRDD
import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.logical.Aggregate
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.exchange.Exchange
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.state.StateStore
import org.apache.spark.sql.expressions.scalalang.typed
import org.apache.spark.sql.functions._
import org.apache.spark.sql.streaming.OutputMode._
import org.apache.spark.sql.streaming.util.{MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
import org.apache.spark.storage.{BlockId, StorageLevel, TestBlockId}
object FailureSingleton {
var firstTime = true
}
class StreamingAggregationSuite extends StateStoreMetricsTest
with BeforeAndAfterAll with Assertions {
override def afterAll(): Unit = {
super.afterAll()
StateStore.stop()
}
import testImplicits._
test("simple count, update mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 3, 2),
CheckLastBatch((3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 3), (2, 2), (1, 1)),
// By default we run in new tuple mode.
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4))
)
}
test("count distinct") {
val inputData = MemoryStream[(Int, Seq[Int])]
val aggregated =
inputData.toDF()
.select($"*", explode($"_2") as 'value)
.groupBy($"_1")
.agg(size(collect_set($"value")))
.as[(Int, Int)]
testStream(aggregated, Update)(
AddData(inputData, (1, Seq(1, 2))),
CheckLastBatch((1, 2))
)
}
test("simple count, complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch((3, 1)),
AddData(inputData, 2),
CheckLastBatch((3, 1), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch((3, 2), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch((4, 4), (3, 2), (2, 2), (1, 1))
)
}
test("simple count, append mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
val e = intercept[AnalysisException] {
testStream(aggregated, Append)()
}
Seq("append", "not supported").foreach { m =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(m.toLowerCase(Locale.ROOT)))
}
}
test("sort after aggregate in complete mode") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.toDF("value", "count")
.orderBy($"count".desc)
.as[(Int, Long)]
testStream(aggregated, Complete)(
AddData(inputData, 3),
CheckLastBatch(isSorted = true, (3, 1)),
AddData(inputData, 2, 3),
CheckLastBatch(isSorted = true, (3, 2), (2, 1)),
StopStream,
StartStream(),
AddData(inputData, 3, 2, 1),
CheckLastBatch(isSorted = true, (3, 3), (2, 2), (1, 1)),
AddData(inputData, 4, 4, 4, 4),
CheckLastBatch(isSorted = true, (4, 4), (3, 3), (2, 2), (1, 1))
)
}
test("state metrics") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDS()
.flatMap(x => Seq(x, x + 1))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
implicit class RichStreamExecution(query: StreamExecution) {
def stateNodes: Seq[SparkPlan] = {
query.lastExecution.executedPlan.collect {
case p if p.isInstanceOf[StateStoreSaveExec] => p
}
}
}
// Test with Update mode
testStream(aggregated, Update)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 2 },
AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 2 },
AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 3 },
AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 3 },
AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 4 }
)
// Test with Complete mode
inputData.reset()
testStream(aggregated, Complete)(
AddData(inputData, 1),
CheckLastBatch((1, 1), (2, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 2 },
AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 2 },
AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 2 },
AddData(inputData, 2, 3),
CheckLastBatch((1, 1), (2, 2), (3, 2), (4, 1)),
AssertOnQuery { _.stateNodes.size === 1 },
AssertOnQuery { _.stateNodes.head.metrics.get("numOutputRows").get.value === 4 },
AssertOnQuery { _.stateNodes.head.metrics.get("numUpdatedStateRows").get.value === 3 },
AssertOnQuery { _.stateNodes.head.metrics.get("numTotalStateRows").get.value === 4 }
)
}
test("multiple keys") {
val inputData = MemoryStream[Int]
val aggregated =
inputData.toDF()
.groupBy($"value", $"value" + 1)
.agg(count("*"))
.as[(Int, Int, Long)]
testStream(aggregated, Update)(
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 1), (2, 3, 1)),
AddData(inputData, 1, 2),
CheckLastBatch((1, 2, 2), (2, 3, 2))
)
}
testQuietly("midbatch failure") {
val inputData = MemoryStream[Int]
FailureSingleton.firstTime = true
val aggregated =
inputData.toDS()
.map { i =>
if (i == 4 && FailureSingleton.firstTime) {
FailureSingleton.firstTime = false
sys.error("injected failure")
}
i
}
.groupBy($"value")
.agg(count("*"))
.as[(Int, Long)]
testStream(aggregated, Update)(
StartStream(),
AddData(inputData, 1, 2, 3, 4),
ExpectFailure[SparkException](),
StartStream(),
CheckLastBatch((1, 1), (2, 1), (3, 1), (4, 1))
)
}
test("typed aggregators") {
val inputData = MemoryStream[(String, Int)]
val aggregated = inputData.toDS().groupByKey(_._1).agg(typed.sumLong(_._2))
testStream(aggregated, Update)(
AddData(inputData, ("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)),
CheckLastBatch(("a", 30), ("b", 3), ("c", 1))
)
}
test("prune results by current_time, complete mode") {
import testImplicits._
val clock = new StreamManualClock
val inputData = MemoryStream[Long]
val aggregated =
inputData.toDF()
.groupBy($"value")
.agg(count("*"))
.where('value >= current_timestamp().cast("long") - 10L)
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// advance clock to 10 seconds, all keys retained
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 seconds, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 seconds, should retain keys >= 20
AddData(inputData, 0L, 85L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 seconds.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by a minute i.e., 90 seconds total
clock.advance(60 * 1000L)
true
},
StartStream(Trigger.ProcessingTime("10 seconds"), triggerClock = clock),
// The commit log blown, causing the last batch to re-run
CheckLastBatch((20L, 1), (85L, 1)),
AssertOnQuery { q =>
clock.getTimeMillis() == 90000L
},
// advance clock to 100 seconds, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(10 * 1000),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
test("prune results by current_date, complete mode") {
import testImplicits._
val clock = new StreamManualClock
val tz = TimeZone.getDefault.getID
val inputData = MemoryStream[Long]
val aggregated =
inputData.toDF()
.select(to_utc_timestamp(from_unixtime('value * DateTimeUtils.SECONDS_PER_DAY), tz))
.toDF("value")
.groupBy($"value")
.agg(count("*"))
.where($"value".cast("date") >= date_sub(current_date(), 10))
.select(($"value".cast("long") / DateTimeUtils.SECONDS_PER_DAY).cast("long"), $"count(1)")
testStream(aggregated, Complete)(
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// advance clock to 10 days, should retain all keys
AddData(inputData, 0L, 5L, 5L, 10L),
AdvanceManualClock(DateTimeUtils.MILLIS_PER_DAY * 10),
CheckLastBatch((0L, 1), (5L, 2), (10L, 1)),
// advance clock to 20 days, should retain keys >= 10
AddData(inputData, 15L, 15L, 20L),
AdvanceManualClock(DateTimeUtils.MILLIS_PER_DAY * 10),
CheckLastBatch((10L, 1), (15L, 2), (20L, 1)),
// advance clock to 30 days, should retain keys >= 20
AddData(inputData, 85L),
AdvanceManualClock(DateTimeUtils.MILLIS_PER_DAY * 10),
CheckLastBatch((20L, 1), (85L, 1)),
// bounce stream and ensure correct batch timestamp is used
// i.e., we don't take it from the clock, which is at 90 days.
StopStream,
AssertOnQuery { q => // clear the sink
q.sink.asInstanceOf[MemorySink].clear()
q.commitLog.purge(3)
// advance by 60 days i.e., 90 days total
clock.advance(DateTimeUtils.MILLIS_PER_DAY * 60)
true
},
StartStream(Trigger.ProcessingTime("10 day"), triggerClock = clock),
// Commit log blown, causing a re-run of the last batch
CheckLastBatch((20L, 1), (85L, 1)),
// advance clock to 100 days, should retain keys >= 90
AddData(inputData, 85L, 90L, 100L, 105L),
AdvanceManualClock(DateTimeUtils.MILLIS_PER_DAY * 10),
CheckLastBatch((90L, 1), (100L, 1), (105L, 1))
)
}
test("SPARK-19690: do not convert batch aggregation in streaming query to streaming") {
val streamInput = MemoryStream[Int]
val batchDF = Seq(1, 2, 3, 4, 5)
.toDF("value")
.withColumn("parity", 'value % 2)
.groupBy('parity)
.agg(count("*") as 'joinValue)
val joinDF = streamInput
.toDF()
.join(batchDF, 'value === 'parity)
// make sure we're planning an aggregate in the first place
assert(batchDF.queryExecution.optimizedPlan match { case _: Aggregate => true })
testStream(joinDF, Append)(
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)),
AddData(streamInput, 0, 1, 2, 3),
CheckLastBatch((0, 0, 2), (1, 1, 3)))
}
/**
* This method verifies certain properties in the SparkPlan of a streaming aggregation.
* First of all, it checks that the child of a `StateStoreRestoreExec` creates the desired
* data distribution, where the child could be an Exchange, or a `HashAggregateExec` which already
* provides the expected data distribution.
*
* The second thing it checks that the child provides the expected number of partitions.
*
* The third thing it checks that we don't add an unnecessary shuffle in-between
* `StateStoreRestoreExec` and `StateStoreSaveExec`.
*/
private def checkAggregationChain(
se: StreamExecution,
expectShuffling: Boolean,
expectedPartition: Int): Boolean = {
val executedPlan = se.lastExecution.executedPlan
val restore = executedPlan
.collect { case ss: StateStoreRestoreExec => ss }
.head
restore.child match {
case node: UnaryExecNode =>
assert(node.outputPartitioning.numPartitions === expectedPartition,
"Didn't get the expected number of partitions.")
if (expectShuffling) {
assert(node.isInstanceOf[Exchange], s"Expected a shuffle, got: ${node.child}")
} else {
assert(!node.isInstanceOf[Exchange], "Didn't expect a shuffle")
}
case _ => fail("Expected no shuffling")
}
var reachedRestore = false
// Check that there should be no exchanges after `StateStoreRestoreExec`
executedPlan.foreachUp { p =>
if (reachedRestore) {
assert(!p.isInstanceOf[Exchange], "There should be no further exchanges")
} else {
reachedRestore = p.isInstanceOf[StateStoreRestoreExec]
}
}
true
}
test("SPARK-21977: coalesce(1) with 0 partition RDD should be repartitioned to 1") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. Therefore in our SparkPlan, we
// don't have any shuffling. However, `coalesce(1)` only guarantees that the RDD has at most 1
// partition. Which means that if we have an input RDD with 0 partitions, nothing gets
// executed. Therefore the StateStore's don't save any delta files for a given trigger. This
// then leads to `FileNotFoundException`s in the subsequent batch.
// This isn't the only problem though. Once we introduce a shuffle before
// `StateStoreRestoreExec`, the input to the operator is an empty iterator. When performing
// `groupBy().agg(...)`, `HashAggregateExec` returns a `0` value for all aggregations. If
// we fail to restore the previous state in `StateStoreRestoreExec`, we save the 0 value in
// `StateStoreSaveExec` losing all previous state.
val aggregated: Dataset[Long] =
spark.readStream.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(1).groupBy().count().as[Long]
testStream(aggregated, Complete())(
AddBlockData(inputSource, Seq(1)),
CheckLastBatch(1),
AssertOnQuery("Verify no shuffling") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource), // create an empty trigger
CheckLastBatch(1),
AssertOnQuery("Verify that no exchange is required") { se =>
checkAggregationChain(se, expectShuffling = false, 1)
},
AddBlockData(inputSource, Seq(2, 3)),
CheckLastBatch(3),
AddBlockData(inputSource),
CheckLastBatch(3),
StopStream
)
}
}
test("SPARK-21977: coalesce(1) with aggregation should still be repartitioned when it " +
"has non-empty grouping keys") {
val inputSource = new BlockRDDBackedSource(spark)
MockSourceProvider.withMockSources(inputSource) {
withTempDir { tempDir =>
// `coalesce(1)` changes the partitioning of data to `SinglePartition` which by default
// satisfies the required distributions of all aggregations. However, when we have
// non-empty grouping keys, in streaming, we must repartition to
// `spark.sql.shuffle.partitions`, otherwise only a single StateStore is used to process
// all keys. This may be fine, however, if the user removes the coalesce(1) or changes to
// a `coalesce(2)` for example, then the default behavior is to shuffle to
// `spark.sql.shuffle.partitions` many StateStores. When this happens, all StateStore's
// except 1 will be missing their previous delta files, which causes the stream to fail
// with FileNotFoundException.
def createDf(partitions: Int): Dataset[(Long, Long)] = {
spark.readStream
.format((new MockSourceProvider).getClass.getCanonicalName)
.load().coalesce(partitions).groupBy('a % 1).count().as[(Long, Long)]
}
testStream(createDf(1), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
AddBlockData(inputSource, Seq(1)),
CheckLastBatch((0L, 1L)),
AssertOnQuery("Verify addition of exchange operator") { se =>
checkAggregationChain(
se,
expectShuffling = true,
spark.sessionState.conf.numShufflePartitions)
},
StopStream
)
testStream(createDf(2), Complete())(
StartStream(checkpointLocation = tempDir.getAbsolutePath),
Execute(se => se.processAllAvailable()),
AddBlockData(inputSource, Seq(2), Seq(3), Seq(4)),
CheckLastBatch((0L, 4L)),
AssertOnQuery("Verify no exchange added") { se =>
checkAggregationChain(
se,
expectShuffling = false,
spark.sessionState.conf.numShufflePartitions)
},
AddBlockData(inputSource),
CheckLastBatch((0L, 4L)),
StopStream
)
}
}
}
test("SPARK-22230: last should change with new batches") {
val input = MemoryStream[Int]
val aggregated = input.toDF().agg(last('value))
testStream(aggregated, OutputMode.Complete())(
AddData(input, 1, 2, 3),
CheckLastBatch(3),
AddData(input, 4, 5, 6),
CheckLastBatch(6),
AddData(input),
CheckLastBatch(6),
AddData(input, 0),
CheckLastBatch(0)
)
}
test("SPARK-23004: Ensure that TypedImperativeAggregate functions do not throw errors") {
// See the JIRA SPARK-23004 for more details. In short, this test reproduces the error
// by ensuring the following.
// - A streaming query with a streaming aggregation.
// - Aggregation function 'collect_list' that is a subclass of TypedImperativeAggregate.
// - Post shuffle partition has exactly 128 records (i.e. the threshold at which
// ObjectHashAggregateExec falls back to sort-based aggregation). This is done by having a
// micro-batch with 128 records that shuffle to a single partition.
// This test throws the exact error reported in SPARK-23004 without the corresponding fix.
withSQLConf("spark.sql.shuffle.partitions" -> "1") {
val input = MemoryStream[Int]
val df = input.toDF().toDF("value")
.selectExpr("value as group", "value")
.groupBy("group")
.agg(collect_list("value"))
testStream(df, outputMode = OutputMode.Update)(
AddData(input, (1 to spark.sqlContext.conf.objectAggSortBasedFallbackThreshold): _*),
AssertOnQuery { q =>
q.processAllAvailable()
true
}
)
}
}
/** Add blocks of data to the `BlockRDDBackedSource`. */
case class AddBlockData(source: BlockRDDBackedSource, data: Seq[Int]*) extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
source.addBlocks(data: _*)
(source, LongOffset(source.counter))
}
}
/**
* A Streaming Source that is backed by a BlockRDD and that can create RDDs with 0 blocks at will.
*/
class BlockRDDBackedSource(spark: SparkSession) extends Source {
var counter = 0L
private val blockMgr = SparkEnv.get.blockManager
private var blocks: Seq[BlockId] = Seq.empty
def addBlocks(dataBlocks: Seq[Int]*): Unit = synchronized {
dataBlocks.foreach { data =>
val id = TestBlockId(counter.toString)
blockMgr.putIterator(id, data.iterator, StorageLevel.MEMORY_ONLY)
blocks ++= id :: Nil
counter += 1
}
counter += 1
}
override def getOffset: Option[Offset] = synchronized {
if (counter == 0) None else Some(LongOffset(counter))
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = synchronized {
val rdd = new BlockRDD[Int](spark.sparkContext, blocks.toArray)
.map(i => InternalRow(i)) // we don't really care about the values in this test
blocks = Seq.empty
spark.internalCreateDataFrame(rdd, schema, isStreaming = true).toDF()
}
override def schema: StructType = MockSourceProvider.fakeSchema
override def stop(): Unit = {
blockMgr.getMatchingBlockIds(_.isInstanceOf[TestBlockId]).foreach(blockMgr.removeBlock(_))
}
}
}
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingAggregationSuite.scala
|
Scala
|
apache-2.0
| 22,605 |
package models
import org.bson.types.ObjectId
import play.api.libs.functional.syntax._
import play.api.libs.json.{JsPath, Reads}
/**
* Comic Class representing comic meta data and strategies for parsing strips from comic website.
* @param id Object id uniquely identifying a Comic.
* @param hostname String usually the domain and tld of the comic website, useful for matching or creating urls.
* @param title String represents the general name of the comic.
* @param creator String the name of the comic creator.
* @param bannerImage String location of the pre-generated image representing the comic in-app.
* @param firstUrl String a url of the very first strip of the comic. ETL starts here if delta is false.
*/
case class Comic(id: ObjectId, hostname: String, title: String, creator: String, isAdvertised: Boolean,
patreonUrl: Option[String], store_url: Option[String], bannerImage: String, firstUrl: String,
strategy: Strategy)
case class Strategy(next: Vector[String], image: Vector[String], title: Option[Vector[String]],
imageTitle: Option[Vector[String]], imageAlt: Option[Vector[String]],
bonusImage: Option[Vector[String]])
object Comic {
implicit val objectIdReader = ObjectIdReader
implicit val strategyReader: Reads[Strategy] = (
(JsPath \ "next").read[Vector[String]] and
(JsPath \ "image").read[Vector[String]] and
(JsPath \ "title").readNullable[Vector[String]] and
(JsPath \ "image_title").readNullable[Vector[String]] and
(JsPath \ "image_alt").readNullable[Vector[String]] and
(JsPath \ "bonus_image").readNullable[Vector[String]])(Strategy.apply _)
implicit val comicReader: Reads[Comic] = (
(JsPath \ "id").read[ObjectId] and
(JsPath \ "hostname").read[String] and
(JsPath \ "title").read[String] and
(JsPath \ "creator").read[String] and
(JsPath \ "is_advertised").read[Boolean] and
(JsPath \ "patreon_url").readNullable[String] and
(JsPath \ "store_url").readNullable[String] and
(JsPath \ "banner_image").read[String] and
(JsPath \ "first_url").read[String] and
(JsPath \ "strategy").read[Strategy])(Comic.apply _)
}
|
comicgator/comicgator
|
maestro/app/models/Comic.scala
|
Scala
|
mit
| 2,262 |
package org.jetbrains.plugins.scala
import com.intellij.ProjectTopics
import com.intellij.execution.ExecutionException
import com.intellij.openapi.Disposable
import com.intellij.openapi.editor.Document
import com.intellij.openapi.externalSystem.ExternalSystemModulePropertyManager
import com.intellij.openapi.fileEditor.{FileDocumentManager, FileEditorManager}
import com.intellij.openapi.module._
import com.intellij.openapi.project.{DumbService, Project, ProjectUtil}
import com.intellij.openapi.roots._
import com.intellij.openapi.roots.impl.libraries.LibraryEx
import com.intellij.openapi.roots.libraries.{Library, LibraryTablesRegistrar}
import com.intellij.openapi.util.{Key, UserDataHolder, UserDataHolderEx}
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi.{LanguageSubstitutors, PsiElement, PsiFile}
import com.intellij.util.PathsList
import org.jetbrains.annotations.TestOnly
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScStubElementType
import org.jetbrains.plugins.scala.lang.resolve.processor.precedence.PrecedenceTypes
import org.jetbrains.plugins.scala.macroAnnotations.CachedInUserData
import org.jetbrains.plugins.scala.project.settings.{ScalaCompilerConfiguration, ScalaCompilerSettings, ScalaCompilerSettingsProfile}
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
import org.jetbrains.plugins.scala.util.{ScalaPluginJars, UnloadAwareDisposable}
import org.jetbrains.sbt.Sbt
import org.jetbrains.sbt.language.SbtFileImpl
import org.jetbrains.sbt.project.module.SbtModuleType
import java.io.File
import java.net.URL
import scala.jdk.CollectionConverters._
import scala.language.implicitConversions
import scala.ref.Reference
/**
* @author Pavel Fatin
*/
package object project {
object UserDataKeys {
// used to "attach" a module to some scala file, which is out of any module for some reason
// the primary purpose is to attach a module for a scala scratch file
val SCALA_ATTACHED_MODULE = new Key[Reference[Module]]("ScalaAttachedModule")
}
implicit class LibraryExt(private val library: Library) extends AnyVal {
import LibraryExt._
def isScalaSdk: Boolean = library match {
case libraryEx: LibraryEx => libraryEx.isScalaSdk
case _ => false
}
def compilerVersion: Option[String] = name.flatMap(LibraryVersion.findFirstIn)
def hasRuntimeLibrary: Boolean = name.exists(isRuntimeLibrary)
private def name: Option[String] = Option(library.getName)
def jarUrls: Set[URL] =
library
.getFiles(OrderRootType.CLASSES)
.map(_.getPath)
.map(path => new URL(s"jar:file://$path"))
.toSet
}
object LibraryExt {
private val LibraryVersion = "(?<=[:\\\\-])\\\\d+\\\\.\\\\d+\\\\.\\\\d+[^:\\\\s]*".r
private[this] val RuntimeLibrary = "((?:scala|dotty|scala3)-library).+".r
private[this] val JarVersion = "(?<=-)\\\\d+\\\\.\\\\d+\\\\.\\\\d+\\\\S*(?=\\\\.jar$)".r
def isRuntimeLibrary(name: String): Boolean = RuntimeLibrary.findFirstIn(name).isDefined
def runtimeVersion(input: String): Option[String] = JarVersion.findFirstIn(input)
}
implicit class LibraryExExt(private val library: LibraryEx) extends AnyVal {
def isScalaSdk: Boolean = library.getKind == ScalaLibraryType.Kind
def properties: ScalaLibraryProperties = library.getProperties match {
case properties: ScalaLibraryProperties => properties
case _ => throw new IllegalStateException("Library is not a Scala SDK: " + library.getName)
}
}
implicit class ModuleExt(private val module: Module) extends AnyVal {
@CachedInUserData(module, ScalaCompilerConfiguration.modTracker(module.getProject))
private def scalaModuleSettings: Option[ScalaModuleSettings] =
ScalaModuleSettings(module)
def isBuildModule: Boolean =
module.getName.endsWith(org.jetbrains.sbt.Sbt.BuildModuleSuffix)
def isSourceModule: Boolean = SbtModuleType.unapply(module).isEmpty
def hasScala: Boolean =
scalaModuleSettings.isDefined
// TODO Generalize: hasScala(Version => Boolean), hasScala(_ >= Scala3)
def hasScala3: Boolean =
scalaModuleSettings.exists(_.hasScala3)
def languageLevel: Option[ScalaLanguageLevel] =
scalaModuleSettings.map(_.scalaLanguageLevel)
def hasNewCollectionsFramework: Boolean =
scalaModuleSettings.exists(_.hasNewCollectionsFramework)
def isIdBindingEnabled: Boolean =
scalaModuleSettings.exists(_.isIdBindingEnabled)
def scalaSdk: Option[LibraryEx] =
scalaModuleSettings.flatMap(_.scalaSdk)
def isSharedSourceModule: Boolean = ModuleType.get(module).getId == "SHARED_SOURCES_MODULE"
def isScalaJs: Boolean =
scalaModuleSettings.exists(_.isScalaJs)
def isScalaNative: Boolean =
scalaModuleSettings.exists(_.isScalaNative)
def hasNoIndentFlag: Boolean = scalaModuleSettings.exists(_.hasNoIndentFlag)
def hasOldSyntaxFlag: Boolean = scalaModuleSettings.exists(_.hasOldSyntaxFlag)
def isJvmModule: Boolean = !isScalaJs && !isScalaNative && !isSharedSourceModule
def findJVMModule: Option[Module] = {
if (isJvmModule) {
Some(module)
}
else if (isSharedSourceModule) {
val moduleManager = ModuleManager.getInstance(module.getProject)
val dependents = moduleManager.getModuleDependentModules(module).asScala
dependents.find(_.isJvmModule)
}
else {
sharedSourceDependency.flatMap(_.findJVMModule)
}
}
def sharedSourceDependency: Option[Module] =
ModuleRootManager.getInstance(module).getDependencies
.find(_.isSharedSourceModule)
def dependencyModules: Seq[Module] = {
val manager = ModuleManager.getInstance(module.getProject)
manager.getModules.filter(manager.isModuleDependent(module, _)).toSeq
}
def withDependencyModules: Seq[Module] =
module +: dependencyModules
def modifiableModel: ModifiableRootModel =
ModuleRootManager.getInstance(module).getModifiableModel
def libraries: Set[Library] = {
val processor = new CollectUniquesProcessorEx[Library]()
OrderEnumerator.orderEntries(module)
.librariesOnly()
.forEachLibrary(processor)
processor.results
}
def sbtVersion: Option[Version] =
scalaModuleSettings.flatMap(_.sbtVersion)
def isTrailingCommasEnabled: Boolean =
scalaModuleSettings.exists(_.isTrailingCommasEnabled)
def scalaCompilerSettingsProfile: ScalaCompilerSettingsProfile =
compilerConfiguration.getProfileForModule(module)
def scalaCompilerSettings: ScalaCompilerSettings =
compilerConfiguration.getSettingsForModule(module)
def configureScalaCompilerSettingsFrom(source: String, options: collection.Seq[String]): Unit = {
val baseDirectory = Option(ExternalSystemModulePropertyManager.getInstance(module).getRootProjectPath)
.getOrElse(module.getProject.getBasePath)
val compilerSettings = ScalaCompilerSettings.fromOptions(withPathsRelativeTo(baseDirectory, options.toSeq))
compilerConfiguration.configureSettingsForModule(module, source, compilerSettings)
}
private def withPathsRelativeTo(baseDirectory: String, options: Seq[String]): Seq[String] = options.map { option =>
if (option.startsWith("-Xplugin:")) {
val compoundPath = option.substring(9)
val compoundPathAbsolute = toAbsoluteCompoundPath(baseDirectory, compoundPath)
"-Xplugin:" + compoundPathAbsolute
} else {
option
}
}
// SCL-11861, SCL-18534
private def toAbsoluteCompoundPath(baseDirectory: String, compoundPath: String): String = {
// according to https://docs.scala-lang.org/overviews/compiler-options/index.html
// `,` is used as plugins separator: `-Xplugin PATHS1,PATHS2`
// but in SCL-11861 `;` is used
val pluginSeparator = if (compoundPath.contains(";")) ';' else ','
val paths = compoundPath.split(pluginSeparator)
val pathsAbsolute = paths.map(toAbsolutePath(baseDirectory, _))
pathsAbsolute.mkString(pluginSeparator.toString)
}
private def toAbsolutePath(baseDirectory: String, path: String): String = {
val file = new File(path).isAbsolute
if (file) path
else new File(baseDirectory, path).getPath
}
private def compilerConfiguration =
ScalaCompilerConfiguration.instanceIn(module.getProject)
def scalaLanguageLevel: Option[ScalaLanguageLevel] =
scalaModuleSettings.map(_.scalaLanguageLevel)
def scalaMinorVersion: Option[ScalaVersion] =
scalaModuleSettings.flatMap(_.scalaMinorVersion)
def scalaMinorVersionOrDefault: ScalaVersion =
scalaMinorVersion.getOrElse(ScalaVersion.default)
def isCompilerStrictMode: Boolean =
scalaModuleSettings.exists(_.isCompilerStrictMode)
def scalaCompilerClasspath: Seq[File] = module.scalaSdk
.fold(throw new ScalaSdkNotConfiguredException(module)) {
_.properties.compilerClasspath
}
def literalTypesEnabled: Boolean =
scalaModuleSettings.exists(_.literalTypesEnabled)
/**
* @see https://github.com/non/kind-projector
*/
def kindProjectorPluginEnabled: Boolean =
kindProjectorPlugin.isDefined
def kindProjectorPlugin: Option[String] =
scalaModuleSettings.flatMap(_.kindProjectorPlugin)
def kindProjectorUnderscorePlaceholdersEnabled: Boolean =
scalaModuleSettings.exists(_.kindProjectorUnderscorePlaceholdersEnabled)
def YKindProjectorOptionEnabled: Boolean =
scalaModuleSettings.exists(_.YKindProjectorOptionEnabled)
def YKindProjectorUnderscoresOptionEnabled: Boolean =
scalaModuleSettings.exists(_.YKindProjectorUnderscoresOptionEnabled)
def betterMonadicForPluginEnabled: Boolean =
scalaModuleSettings.exists(_.betterMonadicForPluginEnabled)
def contextAppliedPluginEnabled: Boolean =
scalaModuleSettings.exists(_.contextAppliedPluginEnabled)
/**
* Should we check if it's a Single Abstract Method?
* In 2.11 works with -Xexperimental
* In 2.12 works by default
*
* @return true if language level and flags are correct
*/
def isSAMEnabled: Boolean =
scalaModuleSettings.exists(_.isSAMEnabled)
def isSource3Enabled: Boolean =
scalaModuleSettings.exists(_.hasSource3Flag)
def features: ScalaFeatures =
scalaModuleSettings.fold(ScalaFeatures.default)(_.features)
def isPartialUnificationEnabled: Boolean =
scalaModuleSettings.exists(_.isPartialUnificationEnabled)
def isMetaEnabled: Boolean =
scalaModuleSettings.exists(_.isMetaEnabled)
def customDefaultImports: Option[Seq[String]] =
scalaModuleSettings.flatMap(_.customDefaultImports)
}
class ScalaSdkNotConfiguredException(module: Module) extends IllegalArgumentException(s"No Scala SDK configured for module: ${module.getName}")
implicit class ProjectExt(private val project: Project) extends AnyVal {
def unloadAwareDisposable: Disposable =
UnloadAwareDisposable.forProject(project)
def subscribeToModuleRootChanged(parentDisposable: Disposable = unloadAwareDisposable)
(onRootsChanged: ModuleRootEvent => Unit): Unit =
project.getMessageBus.connect(parentDisposable).subscribe(
ProjectTopics.PROJECT_ROOTS,
new ModuleRootListener {
override def rootsChanged(event: ModuleRootEvent): Unit = onRootsChanged(event)
}
)
private def manager =
ModuleManager.getInstance(project)
def modules: Seq[Module] =
manager.getModules.toSeq
def sourceModules: Seq[Module] = manager.getModules.filter(_.isSourceModule).toSeq
def modifiableModel: ModifiableModuleModel =
manager.getModifiableModel
def hasScala: Boolean = modulesWithScala.nonEmpty
// TODO Generalize: hasScala(Version => Boolean), hasScala(_ >= Scala3)
@CachedInUserData(project, ProjectRootManager.getInstance(project))
def hasScala3: Boolean = modulesWithScala.exists(_.hasScala3)
/**
* @return list of modules with Scala SDK setup
* @note it doesn't return any *-build modules even though it contains syntetic
*/
def modulesWithScala: Seq[Module] =
if (project.isDisposed) Seq.empty
else modulesWithScalaCached
@CachedInUserData(project, ProjectRootManager.getInstance(project))
private def modulesWithScalaCached: Seq[Module] =
modules.filter(m => m.hasScala && !m.isBuildModule)
def anyScalaModule: Option[Module] =
modulesWithScala.headOption
def libraries: Seq[Library] =
LibraryTablesRegistrar.getInstance.getLibraryTable(project).getLibraries.toSeq
def baseDir: VirtualFile = ProjectUtil.guessProjectDir(project)
// TODO: SCL-18097: it should be per-module, like for all other compiler flags (e.g. for isSAMEnabled)
def isPartialUnificationEnabled: Boolean = modulesWithScala.exists(_.isPartialUnificationEnabled)
def selectedDocument: Option[Document] =
Option(FileEditorManager.getInstance(project).getSelectedTextEditor)
.map(_.getDocument)
def isIntellijScalaPluginProject: Boolean = {
val name = project.getName
name == "scalaUltimate" || name == "scalaCommunity"
}
}
implicit class UserDataHolderExt(private val holder: UserDataHolder) extends AnyVal {
def getOrUpdateUserData[T](key: Key[T], update: => T): T = {
Option(holder.getUserData(key)).getOrElse {
val newValue = update
holder match {
case ex: UserDataHolderEx =>
ex.putUserDataIfAbsent(key, newValue)
case _ =>
holder.putUserData(key, newValue)
newValue
}
}
}
}
implicit class VirtualFileExt(private val file: VirtualFile) extends AnyVal {
def isScala3(implicit project: Project): Boolean =
LanguageSubstitutors.getInstance.substituteLanguage(
ScalaLanguage.INSTANCE,
file,
project
) != ScalaLanguage.INSTANCE
def findDocument: Option[Document] =
Option(FileDocumentManager.getInstance.getDocument(file))
def toFile: File =
new File(file.getCanonicalPath)
}
implicit class ProjectPsiFileExt(private val file: PsiFile) extends AnyVal {
def module: Option[Module] = {
val module1 = attachedFileModule
val module2 = module1.orElse(projectModule)
module2
}
@CachedInUserData(file, ProjectRootManager.getInstance(file.getProject))
private def projectModule: Option[Module] =
inReadAction { // assuming that most of the time it will be read from cache
val module = ModuleUtilCore.findModuleForPsiElement(file)
// for build.sbt files the appropriate module is the one with `-build` suffix
if (module != null && file.is[SbtFileImpl])
findBuildModule(module)
else
Option(module)
}
def scratchFileModule: Option[Module] =
attachedFileModule
private def attachedFileModule: Option[Module] =
Option(file.getUserData(UserDataKeys.SCALA_ATTACHED_MODULE)).flatMap(_.get)
def isMetaEnabled: Boolean =
!ScStubElementType.Processing &&
!DumbService.isDumb(file.getProject) &&
isEnabledIn(_.isMetaEnabled)
def isTrailingCommasEnabled: Boolean = {
import ScalaProjectSettings.TrailingCommasMode._
ScalaProjectSettings.getInstance(file.getProject).getTrailingCommasMode match {
case Enabled => true
case Disabled => false
case Auto => isEnabledIn(_.isTrailingCommasEnabled)
}
}
def isIdBindingEnabled: Boolean = isEnabledIn(_.isIdBindingEnabled)
private def isEnabledIn(predicate: Module => Boolean): Boolean =
isUnitTestMode && !ProjectPsiFileExt.enableFeaturesCheckInTests ||
file.module.exists(predicate)
}
object ProjectPsiFileExt {
// TODO: this is a dirty hack to suppress skipping features check in unit tests
// ideally we shouldn't check for `isUnitTestMode`, we should fix expected test data in all affected tests
@TestOnly
var enableFeaturesCheckInTests = false
}
private def findBuildModule(m: Module): Option[Module] = m match {
case SbtModuleType(_) => Some(m)
case _ => moduleByName(m.getProject, s"${m.getName}${Sbt.BuildModuleSuffix}")
}
//noinspection SameParameterValue
private def moduleByName(project: Project, name: String): Option[Module] =
ModuleManager.getInstance(project).getModules.find(_.getName == name)
implicit class ProjectPsiElementExt(private val element: PsiElement) extends AnyVal {
def module: Option[Module] = Option(element.getContainingFile).flatMap(_.module)
def isInScalaModule: Boolean = module.exists(_.hasScala)
def isInScala3Module: Boolean = module.exists(_.hasScala3)
def isCompilerStrictMode: Boolean = module.exists(_.isCompilerStrictMode)
def scalaLanguageLevel: Option[ScalaLanguageLevel] = module.flatMap(_.scalaLanguageLevel)
def scalaLanguageLevelOrDefault: ScalaLanguageLevel = scalaLanguageLevel.getOrElse(ScalaLanguageLevel.getDefault)
def scalaMinorVersion: Option[ScalaVersion] = module.flatMap(_.scalaMinorVersion)
def scalaMinorVersionOrDefault: ScalaVersion = scalaMinorVersion.getOrElse(ScalaVersion.default)
/**
* Is kind-projector plugin enabled or is -Ykind-projector scala 3 compiler option set.
*/
def kindProjectorEnabled: Boolean =
kindProjectorPluginEnabled || YKindProjectorOptionEnabled || YKindProjectorUnderscoresOptionEnabled
def underscoreWidlcardsDisabled: Boolean =
kindProjectorUnderscorePlaceholdersEnabled || YKindProjectorUnderscoresOptionEnabled
def kindProjectorPluginEnabled: Boolean = isDefinedInModuleOrProject(_.kindProjectorPluginEnabled)
def kindProjectorPlugin: Option[String] = inThisModuleOrProject(_.kindProjectorPlugin).flatten
def kindProjectorUnderscorePlaceholdersEnabled: Boolean = isDefinedInModuleOrProject(_.kindProjectorUnderscorePlaceholdersEnabled)
def YKindProjectorOptionEnabled: Boolean = isDefinedInModuleOrProject(_.YKindProjectorOptionEnabled)
def YKindProjectorUnderscoresOptionEnabled: Boolean = isDefinedInModuleOrProject(_.YKindProjectorUnderscoresOptionEnabled)
def betterMonadicForEnabled: Boolean = isDefinedInModuleOrProject(_.betterMonadicForPluginEnabled)
def contextAppliedEnabled: Boolean = isDefinedInModuleOrProject(_.contextAppliedPluginEnabled)
def isSAMEnabled: Boolean = isDefinedInModuleOrProject(_.isSAMEnabled)
def isSource3Enabled: Boolean = isDefinedInModuleOrProject(_.isSource3Enabled)
def isScala3OrSource3Enabled: Boolean = isDefinedInModuleOrProject(m => m.hasScala3 || m.isSource3Enabled)
def features: ScalaFeatures =
inThisModuleOrProject(_.features).getOrElse(ScalaFeatures.default)
def literalTypesEnabled: Boolean = {
val file = element.getContainingFile
file != null && (file.getLanguage == Scala3Language.INSTANCE || file.isDefinedInModuleOrProject(_.literalTypesEnabled))
}
def partialUnificationEnabled: Boolean = isDefinedInModuleOrProject(_.isPartialUnificationEnabled)
def newCollectionsFramework: Boolean = module.exists(_.hasNewCollectionsFramework)
def isMetaEnabled: Boolean =
element.isValid && (element.getContainingFile match {
case file: ScalaFile if !file.isCompiled => file.isMetaEnabled
case _ => false
})
def defaultImports: Seq[String] = PrecedenceTypes.forElement(element).defaultImports
private[ProjectPsiElementExt] def isDefinedInModuleOrProject(predicate: Module => Boolean): Boolean =
inThisModuleOrProject(predicate).getOrElse(false)
private def inThisModuleOrProject[T](predicate: Module => T): Option[T] =
module
.orElse(element.getProject.anyScalaModule)
.map(predicate)
}
implicit class PathsListExt(private val list: PathsList) extends AnyVal {
def addScalaCompilerClassPath(module: Module): Unit =
try {
val files = module.scalaCompilerClasspath.asJava
list.addAllFiles(files)
} catch {
case e: IllegalArgumentException => //noinspection ReferencePassedToNls
throw new ExecutionException(e.getMessage.replace("SDK", "facet"))
}
def addRunners(): Unit = list.add(ScalaPluginJars.runnersJar)
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/project/package.scala
|
Scala
|
apache-2.0
| 20,534 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.flink.table.`type`.{InternalType, InternalTypes}
import org.junit.{Assert, Test}
class FlinkTypeFactoryTest {
@Test
def testInternalToRelType(): Unit = {
val typeFactory = new FlinkTypeFactory(new FlinkTypeSystem)
def test(t: InternalType): Unit = {
Assert.assertEquals(
t,
FlinkTypeFactory.toInternalType(
typeFactory.createTypeFromInternalType(t, isNullable = true))
)
Assert.assertEquals(
t,
FlinkTypeFactory.toInternalType(
typeFactory.createTypeFromInternalType(t, isNullable = false))
)
// twice for cache.
Assert.assertEquals(
t,
FlinkTypeFactory.toInternalType(
typeFactory.createTypeFromInternalType(t, isNullable = true))
)
Assert.assertEquals(
t,
FlinkTypeFactory.toInternalType(
typeFactory.createTypeFromInternalType(t, isNullable = false))
)
}
test(InternalTypes.BOOLEAN)
test(InternalTypes.BYTE)
test(InternalTypes.STRING)
test(InternalTypes.DOUBLE)
test(InternalTypes.FLOAT)
test(InternalTypes.INT)
test(InternalTypes.LONG)
test(InternalTypes.SHORT)
test(InternalTypes.BINARY)
test(InternalTypes.DATE)
test(InternalTypes.TIME)
test(InternalTypes.TIMESTAMP)
test(InternalTypes.createArrayType(InternalTypes.DOUBLE))
test(InternalTypes.createMapType(InternalTypes.DOUBLE, InternalTypes.STRING))
test(InternalTypes.createRowType(InternalTypes.DOUBLE, InternalTypes.STRING))
}
}
|
ueshin/apache-flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/calcite/FlinkTypeFactoryTest.scala
|
Scala
|
apache-2.0
| 2,401 |
package com.dividezero.stubby.core.service.model
import com.dividezero.stubby.core.model.StubResponse
class StubServiceResult( // returned by the 'findMatch' method
val attempts: List[MatchResult],
val response: Option[StubResponse],
val delay: Option[Int]) {
def this(attempts: List[MatchResult]) = this(attempts, None, None)
def matchFound(): Boolean = attempts.exists(_.matches)
}
|
themillhousegroup/http-stub-server-scala
|
core/src/main/scala/com/dividezero/stubby/core/service/model/StubServiceResult.scala
|
Scala
|
apache-2.0
| 411 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.hbase.example
import org.apache.spark.SparkContext
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.Text
import org.apache.spark.SparkConf
import com.cloudera.spark.hbase.HBaseContext
object HBaseBulkPutExampleFromFile {
def main(args: Array[String]) {
if (args.length == 0) {
System.out.println("HBaseBulkPutExampleFromFile {tableName} {columnFamily} {inputFile}");
return;
}
val tableName = args(0)
val columnFamily = args(1)
val inputFile = args(2)
val sparkConf = new SparkConf().setAppName("HBaseBulkPutExampleFromFile " +
tableName + " " + columnFamily + " " + inputFile)
val sc = new SparkContext(sparkConf)
var rdd = sc.hadoopFile(
inputFile,
classOf[TextInputFormat],
classOf[LongWritable],
classOf[Text]).map(v => {
System.out.println("reading-" + v._2.toString())
v._2.toString()
})
val conf = HBaseConfiguration.create();
conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hdfs-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
val hbaseContext = new HBaseContext(sc, conf);
hbaseContext.bulkPut[String](rdd,
tableName,
(putRecord) => {
System.out.println("hbase-" + putRecord)
val put = new Put(Bytes.toBytes("Value- " + putRecord))
put.add(Bytes.toBytes("c"), Bytes.toBytes("1"), Bytes.toBytes(putRecord.length()))
put
},
true);
}
}
|
mrsqueeze/SparkOnHBase
|
src/main/scala/com/cloudera/spark/hbase/example/HBaseBulkPutExampleFromFile.scala
|
Scala
|
apache-2.0
| 2,684 |
package org.jetbrains.plugins.scala.lang.resolveSemanticDb
import java.nio.file.{Files, Path}
import scala.collection.mutable
import scala.meta.internal.semanticdb.Locator
case class SDbRef(symbol: String, position: TextPos, endPosition: TextPos, targetPosition: Option[TextPos]) {
def range: (TextPos, TextPos) = (position, endPosition)
lazy val pointsToLocal: Boolean = symbol.matches(raw"local\\d+")
override def toString: String =
s"$symbol($position..$endPosition) -> ${targetPosition.fold("<no position>")(_.toString)}"
}
object SDbRef {
implicit val ordering: Ordering[SDbRef] =
Ordering.by[SDbRef, TextPos](_.position)
.orElseBy(_.endPosition)
.orElseBy(_.symbol)
}
case class SDbFile(path: String, references: Seq[SDbRef]) {
def referencesAt(pos: TextPos, empty: Boolean): Seq[SDbRef] =
references.filter(if (empty) _.range.is(pos) else _.range.contains(pos))
}
case class SemanticDbStore(files: Seq[SDbFile]) {
def serialized: String = {
val b = new StringBuilder
for (file <- files) {
b ++= "Document "
b ++= file.path
b += '\\n'
for (ref <- file.references) {
b ++= s"(${ref.position}..${ref.endPosition}) ${ref.symbol}"
ref.targetPosition.foreach { targetPosition =>
b ++= s" -> $targetPosition"
}
b += '\\n'
}
b += '\\n'
}
val resultText = b.result()
// check that serialize and fromText work together
assert(this == SemanticDbStore.fromText(resultText))
resultText
}
}
object SemanticDbStore {
def fromSemanticDbPath(path: Path): SemanticDbStore = {
type UnfinishedRef = (String, TextPos, TextPos)
val positionOfSymbols = mutable.Map.empty[String, TextPos]
val unfinishedFiles = mutable.Map.empty[String, Seq[UnfinishedRef]]
Locator(path) { (_, payload) =>
for (doc <- payload.documents) {
val refs = Seq.newBuilder[UnfinishedRef]
for (occurrence <- doc.occurrences) {
def start = TextPos.ofStart(occurrence.range.get)
def end = TextPos.ofEnd(occurrence.range.get)
if (occurrence.role.isDefinition) positionOfSymbols += occurrence.symbol -> start
else if (occurrence.role.isReference) refs += ((occurrence.symbol, start, end))
}
unfinishedFiles += doc.uri -> refs.result()
}
}
val files = unfinishedFiles.iterator
.map {
case (path, unfinishedRefs) =>
val refs =
for ((symbol, start, end) <- unfinishedRefs)
yield SDbRef(symbol, start, end, targetPosition = positionOfSymbols.get(symbol))
SDbFile(path, refs.sorted)
}
.toSeq
.sortBy(_.path)
SemanticDbStore(files)
}
def fromTextFile(path: Path): SemanticDbStore =
fromText(Files.readString(path))
def fromText(text: String): SemanticDbStore = {
val lines = text.linesIterator
val files = Seq.newBuilder[SDbFile]
while (lines.hasNext) {
val pathLine = lines.next()
assert(pathLine.startsWith("Document "))
val path = pathLine.stripPrefix("Document ")
val refs =
for (refLine <- lines.takeWhile(_.nonEmpty)) yield {
refLine match {
case RefFromLine(ref) => ref
case s => throw new Exception("not a refline: " + s)
}
}
files += SDbFile(path, refs.toSeq)
}
SemanticDbStore(files.result())
}
private object RefFromLine {
private val pos = raw"(\\d+):(\\d+)"
private val RefLineParts = raw"\\($pos\\.\\.$pos\\) (.+?)(?: -> $pos)?".r
def unapply(s: String): Option[SDbRef] = {
s match {
case RefLineParts(startLine, startCol, endLine, endCol, symbol, targetLine, targetCol) =>
val targetPosition =
if (targetLine == null) None
else Some(TextPos(targetLine.toInt, targetCol.toInt))
Some(SDbRef(symbol, TextPos(startLine.toInt, startCol.toInt), TextPos(endLine.toInt, endCol.toInt), targetPosition))
case _ => None
}
}
}
/* Code to convert semanticdb binary data to new text based format
def main(args: Array[String]): Unit = {
val main = Path.of("/home/tobi/workspace/intellij-scala/community/scala/scala-impl/testdata/lang/resolveSemanticDb/out")
Files.list(main)
.filter(Files.isDirectory(_))
.forEach { path =>
val store = SemanticDbStore.fromSemanticDbPath(path)
Files.writeString(main.resolve(path.getFileName.toString + ".semdb"), store.serialized)
}
}
*/
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolveSemanticDb/SemanticDb.scala
|
Scala
|
apache-2.0
| 4,538 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package extraction
package imperative
trait Definitions extends oo.Trees { self: Trees =>
override type Symbols >: Null <: ImperativeAbstractSymbols
trait ImperativeAbstractSymbols
extends OOAbstractSymbols
with imperative.SymbolOps
with imperative.TypeOps {
self0: Symbols =>
// The only value that can be assigned to `trees`, but that has to be
// done in a concrete class explicitly overriding `trees`
// Otherwise, we can run into initialization issue.
protected val trees: self.type
// More or less the same here
protected val symbols: this.type
override protected def ensureWellFormedFunction(fd: FunDef): Unit = {
exprOps.preTraversal {
case fa @ self.FieldAssignment(obj, field, _) =>
// if the field that is assigned is not '@var', throw
if (fa.getField(using self0).exists(fieldVd => !fieldVd.flags.contains(IsVar))) {
throw NotWellFormedException(
fd,
Some(s"cannot assign to immutable field '${field.name}' of class '${obj.getType(using self0)}'")
)
}
case _ => ()
}(fd.fullBody)
super.ensureWellFormedFunction(fd)
}
}
}
|
epfl-lara/stainless
|
core/src/main/scala/stainless/extraction/imperative/Definitions.scala
|
Scala
|
apache-2.0
| 1,274 |
package is.hail.expr.ir
import scala.collection.mutable
object RefEquality {
def apply[T <: AnyRef](t: T): RefEquality[T] = new RefEquality[T](t)
}
class RefEquality[+T <: AnyRef](val t: T) {
override def equals(obj: scala.Any): Boolean = obj match {
case r: RefEquality[T] => t.eq(r.t)
case _ => false
}
override def hashCode(): Int = System.identityHashCode(t)
override def toString: String = s"RefEquality($t)"
}
object Memo {
def empty[T]: Memo[T] = new Memo[T](new mutable.HashMap[RefEquality[BaseIR], T])
}
class Memo[T] private(val m: mutable.HashMap[RefEquality[BaseIR], T]) {
def bind(ir: BaseIR, t: T): Memo[T] = bind(RefEquality(ir), t)
def bind(ir: RefEquality[BaseIR], t: T): Memo[T] = {
if (m.contains(ir))
throw new RuntimeException(s"IR already in memo: ${ ir.t }")
m += ir -> t
this
}
def contains(ir: BaseIR): Boolean = contains(RefEquality(ir))
def contains(ir: RefEquality[BaseIR]): Boolean = m.contains(ir)
def lookup(ir: BaseIR): T = lookup(RefEquality(ir))
def lookup(ir: RefEquality[BaseIR]): T = m(ir)
def apply(ir: BaseIR): T = lookup(ir)
def update(ir: BaseIR, t: => T): Unit = m.update(RefEquality(ir), t)
def get(ir: BaseIR): Option[T] = get(RefEquality(ir))
def get(ir: RefEquality[BaseIR]): Option[T] = m.get(ir)
def getOrElse(ir: BaseIR, default: => T): T = m.getOrElse(RefEquality(ir), default)
def getOrElseUpdate(ir: BaseIR, t: => T): T = m.getOrElseUpdate(RefEquality(ir), t)
def getOrElseUpdate(ir: RefEquality[BaseIR], t: => T): T = m.getOrElseUpdate(ir, t)
def delete(ir: BaseIR): Unit = delete(RefEquality(ir))
def delete(ir: RefEquality[BaseIR]): Unit = m -= ir
override def toString: String = s"Memo(${m})"
}
object HasIRSharing {
def apply(ir: BaseIR): Boolean = {
val m = mutable.HashSet.empty[RefEquality[BaseIR]]
def recur(x: BaseIR): Boolean = {
val re = RefEquality(x)
if (m.contains(re))
true
else {
m.add(re)
x.children.exists(recur)
}
}
recur(ir)
}
}
|
hail-is/hail
|
hail/src/main/scala/is/hail/expr/ir/RefEquality.scala
|
Scala
|
mit
| 2,064 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.alignment
trait AlignmentCombination {
val law: LawAxis
val moral: MoralAxis
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/alignment/AlignmentCombination.scala
|
Scala
|
apache-2.0
| 784 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.factories
import org.apache.flink.table.api.{BatchTableEnvironment, StreamTableEnvironment, TableEnvironment, TableException}
import org.apache.flink.table.descriptors.{Descriptor, DescriptorProperties}
import org.apache.flink.table.sinks.TableSink
import org.apache.flink.table.sources.TableSource
/**
* Utility for dealing with [[TableFactory]] using the [[TableFactoryService]].
*/
object TableFactoryUtil {
/**
* Returns a table source for a table environment.
*/
def findAndCreateTableSource[T](
tableEnvironment: TableEnvironment,
descriptor: Descriptor)
: TableSource[T] = {
val javaMap = DescriptorProperties.toJavaMap(descriptor)
tableEnvironment match {
case _: BatchTableEnvironment =>
TableFactoryService
.find(classOf[BatchTableSourceFactory[T]], javaMap)
.createBatchTableSource(javaMap)
case _: StreamTableEnvironment =>
TableFactoryService
.find(classOf[StreamTableSourceFactory[T]], javaMap)
.createStreamTableSource(javaMap)
case e@_ =>
throw new TableException(s"Unsupported table environment: ${e.getClass.getName}")
}
}
/**
* Returns a table sink for a table environment.
*/
def findAndCreateTableSink[T](
tableEnvironment: TableEnvironment,
descriptor: Descriptor)
: TableSink[T] = {
val javaMap = DescriptorProperties.toJavaMap(descriptor)
tableEnvironment match {
case _: BatchTableEnvironment =>
TableFactoryService
.find(classOf[BatchTableSinkFactory[T]], javaMap)
.createBatchTableSink(javaMap)
case _: StreamTableEnvironment =>
TableFactoryService
.find(classOf[StreamTableSinkFactory[T]], javaMap)
.createStreamTableSink(javaMap)
case e@_ =>
throw new TableException(s"Unsupported table environment: ${e.getClass.getName}")
}
}
}
|
zhangminglei/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/factories/TableFactoryUtil.scala
|
Scala
|
apache-2.0
| 2,761 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.http.parsing
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import play.api.test._
import play.api.mvc.BodyParsers
class IgnoreBodyParserSpec extends PlaySpecification {
"The ignore body parser" should {
def parse[A](value: A, bytes: ByteString, contentType: Option[String], encoding: String)(
implicit mat: Materializer
) = {
await(
BodyParsers.utils
.ignore(value)(FakeRequest().withHeaders(contentType.map(CONTENT_TYPE -> _).toSeq: _*))
.run(Source.single(bytes))
)
}
"ignore empty bodies" in new WithApplication() {
parse("foo", ByteString.empty, Some("text/plain"), "utf-8") must beRight("foo")
}
"ignore non-empty bodies" in new WithApplication() {
parse(42, ByteString(1), Some("application/xml"), "utf-8") must beRight(42)
parse("foo", ByteString(1, 2, 3), None, "utf-8") must beRight("foo")
}
}
}
|
wegtam/playframework
|
core/play-integration-test/src/it/scala/play/it/http/parsing/IgnoreBodyParserSpec.scala
|
Scala
|
apache-2.0
| 1,040 |
package org.jetbrains.plugins.scala.codeInspection.methodSignature
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil
import com.intellij.testFramework.fixtures.CodeInsightTestFixture
import org.jetbrains.plugins.scala.codeInspection.{ScalaInspectionBundle, ScalaQuickFixTestBase}
class EmptyParenthesesInspectionTest extends ScalaQuickFixTestBase {
import CodeInsightTestFixture.{CARET_MARKER => CARET}
protected override val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[AccessorLikeMethodInspection.EmptyParentheses]
protected override val description: String =
ScalaInspectionBundle.message("method.signature.accessor.empty.parenthesis")
private val hint = ScalaInspectionBundle.message("redundant.parentheses")
def test(): Unit = {
checkTextHasError(
text = s"def ${START}getStuff$END(): Boolean = true"
)
testQuickFix(
text = s"def get${CARET}Stuff(): Boolean = true",
expected = "def getStuff: Boolean = true",
hint
)
}
def test_ok(): Unit = {
checkTextHasNoErrors(
text = s"def getStuff(): Unit = ()"
)
checkTextHasNoErrors(
text = s"def stuff(): Int = 0"
)
}
def test_with_base_class(): Unit = {
checkTextHasError(
s"""
|class Impl extends Base {
| def getStuff: Int = 0
|}
|
|trait Base {
| def ${START}getStuff$END(): Int
|}
""".stripMargin
)
}
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/methodSignature/EmptyParenthesesInspectionTest.scala
|
Scala
|
apache-2.0
| 1,513 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.transform
import java.io.ByteArrayInputStream
import org.apache.arrow.memory.{BufferAllocator, RootAllocator}
import org.geotools.data.collection.ListFeatureCollection
import org.junit.runner.RunWith
import org.locationtech.geomesa.arrow.io.SimpleFeatureArrowFileReader
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.io.WithClose
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class ArrowConversionProcessTest extends Specification {
import scala.collection.JavaConversions._
implicit val allocator: BufferAllocator = new RootAllocator(Long.MaxValue)
val sft = SimpleFeatureTypes.createImmutableType("arrow", "name:String,dtg:Date,*geom:Point:srid=4326")
val process = new ArrowConversionProcess
val features = (0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"0$i", s"name${i % 2}", s"2017-02-20T00:00:0$i.000Z", s"POINT(40 ${50 + i})")
}
val collection = new ListFeatureCollection(sft, new Random(-1L).shuffle(features))
"ArrowConversionProcess" should {
"encode an empty feature collection" in {
val bytes = process.execute(new ListFeatureCollection(sft), null, null, null, null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()) must beEmpty
}
}
"encode a generic feature collection" in {
val bytes = process.execute(collection, null, null, null, null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq must
containTheSameElementsAs(features)
}
}
"encode a generic feature collection with dictionary values" in {
val bytes = process.execute(collection, null, null, Seq("name"), null, null, null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(bytes))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq must
containTheSameElementsAs(features)
reader.dictionaries.get("name") must beSome
}
}
"encode a generic feature collection with sorting" in {
val ascending = process.execute(collection, null, null, null, null, "dtg", null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(ascending))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq mustEqual features
}
val descending = process.execute(collection, null, null, null, null, "dtg", true, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(descending))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq mustEqual features.reverse
}
}
"encode a generic feature collection with sorting and dictionary values" in {
val ascending = process.execute(collection, null, null, Seq("name"), null, "dtg", null, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(ascending))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq mustEqual features
reader.dictionaries.get("name") must beSome
}
val descending = process.execute(collection, null, null, Seq("name"), null, "dtg", true, null, null).reduce(_ ++ _)
WithClose(SimpleFeatureArrowFileReader.streaming(() => new ByteArrayInputStream(descending))) { reader =>
reader.sft mustEqual sft
SelfClosingIterator(reader.features()).map(ScalaSimpleFeature.copy).toSeq mustEqual features.reverse
reader.dictionaries.get("name") must beSome
}
}
}
step {
allocator.close()
}
}
|
elahrvivaz/geomesa
|
geomesa-process/geomesa-process-vector/src/test/scala/org/locationtech/geomesa/process/transform/ArrowConversionProcessTest.scala
|
Scala
|
apache-2.0
| 4,938 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.MemorySize
import org.apache.flink.streaming.api.operators.SimpleOperatorFactory
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.data.RowData
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.ProjectionCodeGenerator.generateProjection
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, LongHashJoinGenerator}
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.planner.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.exec.{ExecEdge, ExecNode}
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, JoinUtil}
import org.apache.flink.table.runtime.operators.join.{HashJoinOperator, HashJoinType}
import org.apache.flink.table.runtime.typeutils.{BinaryRowDataSerializer, InternalTypeInfo}
import org.apache.flink.table.types.logical.RowType
import org.apache.calcite.plan._
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import org.apache.calcite.util.Util
import java.util
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for hash [[Join]].
*/
class BatchExecHashJoin(
cluster: RelOptCluster,
traitSet: RelTraitSet,
leftRel: RelNode,
rightRel: RelNode,
condition: RexNode,
joinType: JoinRelType,
// true if LHS is build side, else false
val leftIsBuild: Boolean,
// true if build side is broadcast, else false
val isBroadcast: Boolean,
val tryDistinctBuildRow: Boolean)
extends BatchExecJoinBase(cluster, traitSet, leftRel, rightRel, condition, joinType) {
private val (leftKeys, rightKeys) =
JoinUtil.checkAndGetJoinKeys(keyPairs, getLeft, getRight, allowEmptyKey = true)
val (buildKeys, probeKeys) = if (leftIsBuild) (leftKeys, rightKeys) else (rightKeys, leftKeys)
// Inputs could be changed. See [[BiRel.replaceInput]].
def buildRel: RelNode = if (leftIsBuild) getLeft else getRight
def probeRel: RelNode = if (leftIsBuild) getRight else getLeft
val hashJoinType: HashJoinType = HashJoinType.of(
leftIsBuild,
getJoinType.generatesNullsOnRight(),
getJoinType.generatesNullsOnLeft(),
getJoinType == JoinRelType.SEMI,
getJoinType == JoinRelType.ANTI)
override def copy(
traitSet: RelTraitSet,
conditionExpr: RexNode,
left: RelNode,
right: RelNode,
joinType: JoinRelType,
semiJoinDone: Boolean): Join = {
new BatchExecHashJoin(
cluster,
traitSet,
left,
right,
conditionExpr,
joinType,
leftIsBuild,
isBroadcast,
tryDistinctBuildRow)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("isBroadcast", "true", isBroadcast)
.item("build", if (leftIsBuild) "left" else "right")
.itemIf("tryDistinctBuildRow", "true", tryDistinctBuildRow)
}
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val leftRowCnt = mq.getRowCount(getLeft)
val rightRowCnt = mq.getRowCount(getRight)
if (leftRowCnt == null || rightRowCnt == null) {
return null
}
// assume memory is big enough to load into all build size data, spill will not happen.
// count in network cost of Exchange node before build size child here
val cpuCost = FlinkCost.HASH_CPU_COST * (leftRowCnt + rightRowCnt)
val (buildRowCount, buildRowSize) = if (leftIsBuild) {
(leftRowCnt, FlinkRelMdUtil.binaryRowAverageSize(getLeft))
} else {
(rightRowCnt, FlinkRelMdUtil.binaryRowAverageSize(getRight))
}
// We aim for a 200% utilization of the bucket table when all the partition buffers are full.
// TODO use BinaryHashBucketArea.RECORD_BYTES instead of 8
val bucketSize = buildRowCount * 8 / FlinkCost.HASH_COLLISION_WEIGHT
val recordSize = buildRowCount * (buildRowSize + BinaryRowDataSerializer.LENGTH_SIZE_IN_BYTES)
val memCost = (bucketSize + recordSize) * shuffleBuildCount(mq)
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
costFactory.makeCost(mq.getRowCount(this), cpuCost, 0, 0, memCost)
}
private[flink] def shuffleBuildCount(mq: RelMetadataQuery): Int = {
val probeRel = if (leftIsBuild) getRight else getLeft
if (isBroadcast) {
val rowCount = Util.first(mq.getRowCount(probeRel), 1)
val shuffleCount = rowCount * mq.getAverageRowSize(probeRel) /
FlinkCost.SQL_DEFAULT_PARALLELISM_WORKER_PROCESS_SIZE
Math.max(1, shuffleCount.toInt)
} else {
1
}
}
override def satisfyTraits(requiredTraitSet: RelTraitSet): Option[RelNode] = {
if (!isBroadcast) {
satisfyTraitsOnNonBroadcastHashJoin(requiredTraitSet)
} else {
satisfyTraitsOnBroadcastJoin(requiredTraitSet, leftIsBuild)
}
}
private def satisfyTraitsOnNonBroadcastHashJoin(
requiredTraitSet: RelTraitSet): Option[RelNode] = {
val requiredDistribution = requiredTraitSet.getTrait(FlinkRelDistributionTraitDef.INSTANCE)
val (canSatisfyDistribution, leftRequiredDistribution, rightRequiredDistribution) =
satisfyHashDistributionOnNonBroadcastJoin(requiredDistribution)
if (!canSatisfyDistribution) {
return None
}
val toRestrictHashDistributionByKeys = (distribution: FlinkRelDistribution) =>
getCluster.getPlanner
.emptyTraitSet
.replace(FlinkConventions.BATCH_PHYSICAL)
.replace(distribution)
val leftRequiredTraits = toRestrictHashDistributionByKeys(leftRequiredDistribution)
val rightRequiredTraits = toRestrictHashDistributionByKeys(rightRequiredDistribution)
val newLeft = RelOptRule.convert(getLeft, leftRequiredTraits)
val newRight = RelOptRule.convert(getRight, rightRequiredTraits)
val providedTraits = getTraitSet.replace(requiredDistribution)
// HashJoin can not satisfy collation.
Some(copy(providedTraits, Seq(newLeft, newRight)))
}
//~ ExecNode methods -----------------------------------------------------------
override def getInputNodes: util.List[ExecNode[BatchPlanner, _]] =
getInputs.map(_.asInstanceOf[ExecNode[BatchPlanner, _]])
override def getInputEdges: util.List[ExecEdge] = {
val (buildRequiredShuffle, probeRequiredShuffle) = if (isBroadcast) {
(ExecEdge.RequiredShuffle.broadcast(), ExecEdge.RequiredShuffle.any())
} else {
(ExecEdge.RequiredShuffle.hash(buildKeys), ExecEdge.RequiredShuffle.hash(probeKeys))
}
val probeDamBehavior = if (hashJoinType.buildLeftSemiOrAnti()) {
ExecEdge.DamBehavior.END_INPUT
} else {
ExecEdge.DamBehavior.PIPELINED
}
val buildEdge = ExecEdge.builder()
.requiredShuffle(buildRequiredShuffle)
.damBehavior(ExecEdge.DamBehavior.BLOCKING)
.priority(0)
.build()
val probeEdge = ExecEdge.builder()
.requiredShuffle(probeRequiredShuffle)
.damBehavior(probeDamBehavior)
.priority(1)
.build()
if (leftIsBuild) {
List(buildEdge, probeEdge)
} else {
List(probeEdge, buildEdge)
}
}
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchPlanner, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
planner: BatchPlanner): Transformation[RowData] = {
val config = planner.getTableConfig
val lInput = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
val rInput = getInputNodes.get(1).translateToPlan(planner)
.asInstanceOf[Transformation[RowData]]
// get type
val lType = lInput.getOutputType.asInstanceOf[InternalTypeInfo[RowData]].toRowType
val rType = rInput.getOutputType.asInstanceOf[InternalTypeInfo[RowData]].toRowType
val keyType = RowType.of(leftKeys.map(lType.getTypeAt): _*)
val condFunc = JoinUtil.generateConditionFunction(
config, cluster.getRexBuilder, getJoinInfo, lType, rType)
// projection for equals
val lProj = generateProjection(
CodeGeneratorContext(config), "HashJoinLeftProjection", lType, keyType, leftKeys)
val rProj = generateProjection(
CodeGeneratorContext(config), "HashJoinRightProjection", rType, keyType, rightKeys)
val (build, probe, bProj, pProj, bType, pType, reverseJoin) =
if (leftIsBuild) {
(lInput, rInput, lProj, rProj, lType, rType, false)
} else {
(rInput, lInput, rProj, lProj, rType, lType, true)
}
val mq = getCluster.getMetadataQuery
val buildRowSize = Util.first(mq.getAverageRowSize(buildRel), 24).toInt
val buildRowCount = Util.first(mq.getRowCount(buildRel), 200000).toLong
val probeRowCount = Util.first(mq.getRowCount(probeRel), 200000).toLong
// operator
val operator = if (LongHashJoinGenerator.support(hashJoinType, keyType, filterNulls)) {
LongHashJoinGenerator.gen(
config,
hashJoinType,
keyType,
bType,
pType,
buildKeys,
probeKeys,
buildRowSize,
buildRowCount,
reverseJoin,
condFunc)
} else {
SimpleOperatorFactory.of(HashJoinOperator.newHashJoinOperator(
hashJoinType,
condFunc,
reverseJoin,
filterNulls,
bProj,
pProj,
tryDistinctBuildRow,
buildRowSize,
buildRowCount,
probeRowCount,
keyType
))
}
val managedMemory = MemorySize.parse(config.getConfiguration.getString(
ExecutionConfigOptions.TABLE_EXEC_RESOURCE_HASH_JOIN_MEMORY)).getBytes
ExecNode.createTwoInputTransformation(
build,
probe,
getRelDetailedDescription,
operator,
InternalTypeInfo.of(FlinkTypeFactory.toLogicalRowType(getRowType)),
probe.getParallelism,
managedMemory)
}
}
|
greghogan/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecHashJoin.scala
|
Scala
|
apache-2.0
| 11,166 |
package output
import leon.lazyeval._
import leon.lang._
import leon.annotation._
import leon.instrumentation._
import leon.invariant._
object MergeSort {
case class Set1[T]()
abstract class IList {
def size: BigInt = {
this match {
case ICons(_, xs) =>
BigInt(1) + xs.size
case _ =>
BigInt(0)
}
} ensuring {
(x$1: BigInt) => x$1 >= BigInt(0)
}
}
case class ICons(x: BigInt, tail: IList) extends IList
case class INil() extends IList
abstract class ILList {
/*def size: BigInt = {
this match {
case LCons(_, xs37) =>
BigInt(1) + ssize(xs37)
case _ =>
BigInt(0)
}
} ensuring {
(x$218: BigInt) => x$218 >= BigInt(0)
}*/
}
case class LCons(x: BigInt, tail: LazyILList) extends ILList
case class LNil() extends ILList
abstract class LList {
def size: BigInt = {
this match {
case SNil() =>
BigInt(0)
case SCons(_, t) =>
BigInt(1) + t.size
}
} ensuring {
(x$3: BigInt) => x$3 >= BigInt(0)
}
}
case class SCons(x: LazyILList, tail: LList) extends LList
case class SNil() extends LList
/* def ssize(l: LazyILList): BigInt = {
evalLazyILList2(l).size
} ensuring {
(res: BigInt) => true
}
def fullSize(l: LList): BigInt = {
l match {
case SNil() =>
BigInt(0)
case SCons(l49, t) =>
ssize(l49) + fullSize(t)
}
} ensuring {
(x$46: BigInt) => x$46 >= BigInt(0)
}*/
/*def pairs(l: LList, st: Set1[LazyILList]): (LList, Set1[LazyILList]) = {
l match {
case SNil() =>
(SNil(), st)
case SCons(_, SNil()) =>
(l, st)
case SCons(l118, SCons(l215, rest)) =>
val a77 = pairs(rest, st)
(SCons(newILList(Merge(l118, l215), st), a77._1), a77._2)
}
} ensuring {
(res69: (LList, Set1[LazyILList])) =>
res69._1.size <= (l.size + BigInt(1)) / BigInt(2) &&
fullSize(l) == fullSize(res69._1) &&
time <= 10 * l.size + 4
}*/
abstract class LazyILList
case class Merge(a: LazyILList, b: LazyILList) extends LazyILList
case class Lsingle(x: BigInt) extends LazyILList
case class Lempty() extends LazyILList
//@library
def newILList(cc: LazyILList, st: Set1[LazyILList]): LazyILList = {
cc
} /*ensuring {
(res: LazyILList) => !st.contains(res)
}*/
//@library
/*def evalLazyILList(cl: LazyILList, st: Set1[LazyILList]): (ILList, Set1[LazyILList]) = {
cl match {
case t: Merge =>
(merge(t.a, t.b, Set1[LazyILList]()), Set1[LazyILList]())
case t: Lsingle =>
(lsingle(t.x, Set1[LazyILList]()), Set1[LazyILList]())
case t: Lempty =>
(lempty, Set1[LazyILList]())
}
} ensuring {
(res: (ILList, Set1[LazyILList])) =>
cl match {
case t: Merge =>
ssize(t.a) + ssize(t.b) == res._1.size &&
time <= 300 * res._1.size - 100
case t: Lsingle =>
true
case t: Lempty =>
true
}
}
def evalLazyILList2(cl: LazyILList): ILList = {
evalLazyILList(cl, Set1[LazyILList]())._1
} ensuring {
(res: ILList) => true
}*/
/* def constructMergeTree(l: LList, st: Set1[LazyILList]): (LList, Set1[LazyILList]) = {
l match {
case SNil() =>
(SNil(), st)
case SCons(_, SNil()) =>
(l, st)
case _ =>
val a76 = pairs(l, st)
constructMergeTree(a76._1, a76._2)
}
} ensuring {
(res: (LList, Set1[LazyILList])) =>
res._1.size <= BigInt(1) && fullSize(res._1) == fullSize(l) && (res._1 match {
case SCons(il1, SNil()) =>
fullSize(res._1) == ssize(il1)
case _ =>
true
}) &&
time <= 42 * l.size + 4
}*/
/* def merge(a: LazyILList, b: LazyILList, st: Set1[LazyILList]): ILList = {
require(evalLazyILList2(a) != LNil() && evalLazyILList2(b) != LNil())
evalLazyILList(b, st)._1 match {
case LNil() =>
evalLazyILList(a, st)._1
case bl @ LCons(x, xs36) =>
evalLazyILList(a, st)._1 match {
case LNil() =>
bl
case LCons(y, ys2) =>
if (y < x) {
LCons(y, Merge(ys2, b))
} else {
LCons(x, Merge(a, xs36))
}
}
}
} ensuring {
(res70 : ILList) => ssize(a) + ssize(b) == res70.size &&
time <= 300 * res70.size - 100
// (res70 match {
// case _ if res70.size == 1 =>
// time <= 300 * res70.size + 100
// case _ =>
// time <= 300 * res70.size - 100
// })
}*/
def IListToLList(l: IList, st: Set1[LazyILList]): LList = {
l match {
case INil() =>
SNil()
case ICons(x, xs) =>
SCons(newILList(Lsingle(x), st), IListToLList(xs, st))
}
} ensuring {
(res: LList) =>
//fullSize(res) == l.size && res.size == l.size &&
time <= 9 * l.size + 3
}
// def mergeSort(l: IList, st: Set1[LazyILList]): (ILList, Set1[LazyILList]) = {
// l match {
// case INil() =>
// (LNil(), st)
// case _ =>
// val scr = constructMergeTree(IListToLList(l, st), st)
// scr._1 match {
// case SCons(r13, SNil()) =>
// val dres = evalLazyILList(r13, scr._2)
// (dres._1, dres._2)
// }
// }
// } ensuring {
// (res: (ILList, Set1[LazyILList])) => true
// }
def lempty(): ILList = {
LNil()
} ensuring {
(res: ILList) => true
}
def lsingle(x: BigInt, st: Set1[LazyILList]): ILList = {
LCons(x, newILList(Lempty(), st))
} ensuring {
(res: ILList) => true
}
}
|
epfl-lara/leon
|
testcases/lazy-datastructures/ManualnOutdated/LazyMegeSort-edited.scala
|
Scala
|
gpl-3.0
| 5,675 |
package com.themillhousegroup.arallon.util
/**
* Should you ever need to rebuild the list of timezones,
* this will do it using the latest Java TZ data:
*/
object TZBuilder {
val caseInsensitiveStringSort = (a: String, b: String) => String.CASE_INSENSITIVE_ORDER.compare(a, b) < 0
lazy val allTimeZoneStrings = java.util.TimeZone.getAvailableIDs.toList
lazy val sortedTimeZoneStrings = allTimeZoneStrings.sortWith(caseInsensitiveStringSort)
val noForbiddenChars = (s: String) => !(s.contains("+") || s.contains("-"))
def rebuildTimeZones(): List[String] = {
val singleWordNames = sortedTimeZoneStrings.filter(noForbiddenChars).map { tz =>
val last = tz.split('/').last
last -> tz
}.toMap
singleWordNames.filter { case (k, v) => k != "UTC" }.map {
case (k, v) =>
k -> s"""final class $k extends NonUTCTimeZone("$v")\\n"""
}.values.toList.sortWith(caseInsensitiveStringSort)
}
}
|
themillhousegroup/arallon
|
src/main/scala/com/themillhousegroup/arallon/util/TZBuilder.scala
|
Scala
|
mit
| 941 |
package cronus.auth
import com.twitter.inject.Logging
import com.twitter.util.Await
import org.scalatest.WordSpec
import scala.annotation.tailrec
class SaltGeneratorTest extends WordSpec with Logging {
val salt = new SaltGenerator
"SaltGenerator" should {
"Not generate two salts that are the same" in {
@tailrec
def testSalt(i: Int): Unit = {
if (i == 0) return
val salt1 = Await.result(salt.getNext()).str()
val salt2 = Await.result(salt.getNext()).str()
assert(salt1 != salt2)
testSalt(i-1)
}
testSalt(100)
}
}
}
|
codeape/cronus
|
src/test/scala/cronus/auth/SaltGeneratorTest.scala
|
Scala
|
apache-2.0
| 603 |
package fs2.async.mutable
import fs2._
import fs2.Stream._
/**
* Asynchronous Topic.
*
* Topic allows you to distribute `A` published by arbitrary number of publishers to arbitrary number of subscribers.
*
* Topic has built-in back-pressure support implemented as maximum bound (`maxQueued`) that a subscriber is allowed to enqueue.
* Once that bound is hit, publishing may semantically block until the lagging subscriber consumes some of its queued elements.
*
* Additionally the subscriber has possibility to terminate whenever size of enqueued elements is over certain size
* by using `subscribeSize`.
*/
trait Topic[F[_],A] {
/**
* Published any elements from source of `A` to this topic.
* If any of the subscribers reach its `maxQueued` limit, then this will hold to publish next element
* before that subscriber consumes it's elements or terminates.
*/
def publish:Sink[F,A]
/**
* Publish one `A` to topic.
*
* This will wait until `A` is published to all subscribers.
* If one of the subscribers is over the `maxQueued` limit, this will wait to complete until that subscriber processes
* some of its elements to get room for this new. published `A`
*
*/
def publish1(a:A):F[Unit]
/**
* Subscribes to receive any published `A` to this topic.
*
* Always returns last `A` published first, and then any next `A` published.
*
* If the subscriber is over `maxQueued` bound of messages awaiting to be processed,
* then publishers will hold into publishing to the queue.
*
*/
def subscribe(maxQueued:Int):Stream[F,A]
/**
* Subscribes to receive published `A` to this topic.
*
* Always returns last `A` published first, and then any next `A` available
*
* Additionally this emits current size of the queue of `A` for this subscriber allowing
* you to terminate (or adjust) the subscription if subscriber is way behind the elements available.
*
* Note that queue size is approximate and may not be exactly the size when `A` was taken.
*
* If the subscriber is over `maxQueued` bound of messages awaiting to be processed,
* then publishers will hold into publishing to the queue.
*
*/
def subscribeSize(maxQueued:Int):Stream[F,(A, Int)]
/**
* Signal of current active subscribers
*/
def subscribers:fs2.async.immutable.Signal[F,Int]
}
object Topic {
def apply[F[_], A](initial:A)(implicit F: Async[F]):F[Topic[F,A]] = {
// Id identifying each subscriber uniquely
class ID
sealed trait Subscriber {
def publish(a:A):F[Unit]
def id:ID
def subscribe:Stream[F,A]
def subscribeSize:Stream[F,(A,Int)]
def unSubscribe:F[Unit]
}
F.bind(F.refOf((initial,Vector.empty[Subscriber]))) { state =>
F.map(async.signalOf[F,Int](0)) { subSignal =>
def mkSubscriber(maxQueued: Int):F[Subscriber] = {
F.bind(async.boundedQueue[F,A](maxQueued)) { q =>
F.bind(F.ref[A]) { firstA =>
F.bind(F.ref[Boolean]) { done =>
val sub = new Subscriber {
def unSubscribe: F[Unit] = {
F.bind(F.modify(state) { case (a,subs) => a -> subs.filterNot(_.id == id) }) { _ =>
F.bind(subSignal.modify(_ - 1))(_ => F.setPure(done)(true))
}
}
def subscribe: Stream[F, A] = eval(F.get(firstA)) ++ q.dequeue
def publish(a: A): F[Unit] = {
F.bind(q.offer1(a)) { offered =>
if (offered) F.pure(())
else {
eval(F.get(done)).interruptWhen(q.full.discrete.map(! _ )).last.flatMap {
case None => eval(publish(a))
case Some(_) => Stream.empty
}.run
}
}
}
def subscribeSize: Stream[F, (A,Int)] = eval(F.get(firstA)).map(_ -> 0) ++ q.dequeue.zip(q.size.continuous)
val id: ID = new ID
}
F.bind(F.modify(state){ case(a,s) => a -> (s :+ sub) }) { c =>
F.bind(subSignal.modify(_ + 1))(_ => F.map(F.setPure(firstA)(c.now._1))(_ => sub))
}
}}}
}
new Topic[F,A] {
def publish:Sink[F,A] =
_ flatMap( a => eval(publish1(a)))
def subscribers: Signal[F, Int] = subSignal
def publish1(a: A): F[Unit] =
F.bind(F.modify(state){ case (_,subs) => a -> subs }) { c => F.map(F.traverse(c.now._2)(_.publish(a)))(_ => ()) }
def subscribe(maxQueued: Int): Stream[F, A] =
bracket(mkSubscriber(maxQueued))(_.subscribe, _.unSubscribe)
def subscribeSize(maxQueued: Int): Stream[F, (A, Int)] =
bracket(mkSubscriber(maxQueued))(_.subscribeSize, _.unSubscribe)
}
}}
}
}
|
japgolly/scalaz-stream
|
core/src/main/scala/fs2/async/mutable/Topic.scala
|
Scala
|
mit
| 4,809 |
package com.crobox.clickhouse
import akka.http.scaladsl.model.StatusCode
sealed abstract class ClickhouseExecutionException(msg: String, cause: Throwable = null)
extends RuntimeException(msg, cause) {
val retryable: Boolean
}
case class ClickhouseException(message: String, query: String, cause: Throwable = null, statusCode: StatusCode)
extends ClickhouseExecutionException(message + s", query $query", cause) {
override val retryable: Boolean = true
}
case class ClickhouseChunkedException(message: String) extends ClickhouseExecutionException(message) {
override val retryable: Boolean = true
}
case class TooManyQueriesException()
extends ClickhouseExecutionException(
"The client's queue is full, you are trying to execute too many queries at the same time. This can be solved by either: checking the source of the queries to make sure this is not a bug\\n Increasing the buffer size under the property `crobox.clickhouse.client.buffer-size`\\n Adjust the settings of the super pool under `akka.http.host-connection-pool`"
) {
override val retryable: Boolean = false
}
|
crobox/clickhouse-scala-client
|
client/src/main/scala/com.crobox.clickhouse/ClickhouseException.scala
|
Scala
|
lgpl-3.0
| 1,107 |
package com.szadowsz.cadisainmduit.people.census.uk.norire
import java.io.File
import com.szadowsz.cadisainmduit.people.census.canada.CanadaNamesFreqCalculator._
import com.szadowsz.cadisainmduit.people.census.{CensusHandler, SegGenderCensusHandler}
import com.szadowsz.common.io.write.CsvWriter
import com.szadowsz.spark.ml.Lineage
import com.szadowsz.spark.ml.feature._
import org.apache.spark.ml.feature.{Bucketizer, IndexToString, QuantileDiscretizer, StringIndexerModel}
import org.apache.spark.sql.types.{IntegerType, NumericType}
import org.apache.spark.sql.{DataFrame, SparkSession}
/**
* Built to stitch all england and wales popular name data together
*
* Created on 19/10/2016.
*/
object NorireNamesFreqCalculator extends CensusHandler {
override protected def buildFractionPipeline(name: String, country: String, appCols: Array[String], popCols: Array[String]): Lineage = {
val pipe = new Lineage(name)
pipe.addStage(classOf[ValueCounter], "countValue" -> false, "value" -> null, "inputCols" -> appCols, "outputCol" -> s"${country}_appearCount")
val div = Map("outputCol" -> s"${country}_appFrac", "inputCol" -> s"${country}_appearCount", "total" -> appCols.length.toDouble, "decPlaces" -> 3)
pipe.addStage(classOf[DivisionTransformer], div)
pipe.addStage(classOf[Bucketizer], "inputCol" -> s"${country}_appFrac", "outputCol" -> s"${country}_App",
"splits" -> Array(Double.NegativeInfinity, 0.25, 0.5, 0.85, Double.PositiveInfinity))
pipe.addStage(classOf[IndexToString], "inputCol" -> s"${country}_App", "outputCol" -> s"${country}_AppRank", "labels" -> Array("rare","uncommon","common","basic"))
val excluded = Array(s"${country}_appFrac",s"${country}_App", s"${country}_appearCount" /*, "counts"*/) ++ appCols
pipe.addStage(classOf[ColFilterTransformer], "inputCols" -> excluded, "isInclusive" -> false)
pipe
}
def loadData(sess: SparkSession, path: String) = {
val f = new File(path)
val cols = extractSchema(f)
val stringDF = extractFile(sess, f, true, false)
val pipe = new Lineage("load")
pipe.addStage(classOf[CsvColumnExtractor], "inputCol" -> "fields", "outputCols" -> cols, "size" -> cols.length)
cols.filter(c => c != "name" && c != "gender").foreach(c => pipe.addStage(classOf[CastTransformer], "inputCol" -> c, "outputDataType" -> IntegerType))
val mod = pipe.fit(stringDF)
mod.transform(stringDF)
}
def loadData(save: Boolean): DataFrame = {
val sess = SparkSession.builder()
.config("spark.driver.host", "localhost")
.master("local[8]")
.getOrCreate()
val children = loadData(sess, "./data/tmp/NI/baby_names.csv")
val appFields = children.schema.fieldNames.filterNot(f => f == "name" || f == "gender")
val pipe: Lineage = buildFractionPipeline(s"ni-frac","NI", appFields,appFields)
val (_,result) = pipe.fitAndTransform(children)
if (save) {
val writer = new CsvWriter("./data/results/ni_baby_names.csv", "UTF-8", false)
writer.write(result.schema.fieldNames: _*)
val res = result.collect().map(r => r.toSeq.map(f => Option(f).map(_.toString).getOrElse(""))).filter(_.head.length > 0)
writer.writeAll(res.sortBy(seq => (seq.head)))
//writer.write(tots: _*)
writer.close()
}
result
}
def main(args: Array[String]): Unit = {
loadData(true)
}
}
|
zakski/project-cadisainmduit
|
module/dataprep-people/src/main/scala/com/szadowsz/cadisainmduit/people/census/uk/norire/NorireNamesFreqCalculator.scala
|
Scala
|
apache-2.0
| 3,371 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.pinot.connector.spark.connector
import java.util.{List => JList, Map => JMap}
import org.apache.helix.model.InstanceConfig
import org.apache.pinot.common.metrics.{BrokerMetrics, PinotMetricUtils}
import org.apache.pinot.common.request.BrokerRequest
import org.apache.pinot.common.utils.DataTable
import org.apache.pinot.connector.spark.datasource.PinotDataSourceReadOptions
import org.apache.pinot.connector.spark.exceptions.PinotException
import org.apache.pinot.connector.spark.utils.Logging
import org.apache.pinot.core.transport.{AsyncQueryResponse, QueryRouter, ServerInstance}
import org.apache.pinot.spi.config.table.TableType
import org.apache.pinot.sql.parsers.CalciteSqlCompiler
import scala.collection.JavaConverters._
/**
* Actual data fetcher from Pinot server with specific segments.
* Eg: offline-server1: segment1, segment2, segment3
*/
private[pinot] class PinotServerDataFetcher(
partitionId: Int,
pinotSplit: PinotSplit,
dataSourceOptions: PinotDataSourceReadOptions)
extends Logging {
private val sqlCompiler = new CalciteSqlCompiler()
private val brokerId = "apache_spark"
private val metricsRegistry = PinotMetricUtils.getPinotMetricsRegistry
private val brokerMetrics = new BrokerMetrics(metricsRegistry)
private val queryRouter = new QueryRouter(brokerId, brokerMetrics)
// TODO add support for TLS-secured server
def fetchData(): List[DataTable] = {
val routingTableForRequest = createRoutingTableForRequest()
val requestStartTime = System.nanoTime()
val pinotServerAsyncQueryResponse = pinotSplit.serverAndSegments.serverType match {
case TableType.REALTIME =>
val realtimeBrokerRequest =
sqlCompiler.compileToBrokerRequest(pinotSplit.generatedSQLs.realtimeSelectQuery)
submitRequestToPinotServer(null, null, realtimeBrokerRequest, routingTableForRequest)
case TableType.OFFLINE =>
val offlineBrokerRequest =
sqlCompiler.compileToBrokerRequest(pinotSplit.generatedSQLs.offlineSelectQuery)
submitRequestToPinotServer(offlineBrokerRequest, routingTableForRequest, null, null)
}
val pinotServerResponse = pinotServerAsyncQueryResponse.getResponse.values().asScala.toList
logInfo(s"Pinot server total response time in millis: ${System.nanoTime() - requestStartTime}")
closePinotServerConnection()
pinotServerResponse.foreach { response =>
logInfo(
s"Request stats; " +
s"responseSize: ${response.getResponseSize}, " +
s"responseDelayMs: ${response.getResponseDelayMs}, " +
s"deserializationTimeMs: ${response.getDeserializationTimeMs}, " +
s"submitDelayMs: ${response.getSubmitDelayMs}"
)
}
val dataTables = pinotServerResponse
.map(_.getDataTable)
.filter(_ != null)
if (dataTables.isEmpty) {
throw PinotException(s"${pinotSplit.serverAndSegments.toString} could not respond the query")
}
dataTables.filter(_.getNumberOfRows > 0)
}
private def createRoutingTableForRequest(): JMap[ServerInstance, JList[String]] = {
val nullZkId: String = null
val instanceConfig = new InstanceConfig(nullZkId)
instanceConfig.setHostName(pinotSplit.serverAndSegments.serverHost)
instanceConfig.setPort(pinotSplit.serverAndSegments.serverPort)
// TODO: support grpc and netty-sec
val serverInstance = new ServerInstance(instanceConfig)
Map(
serverInstance -> pinotSplit.serverAndSegments.segments.asJava
).asJava
}
private def submitRequestToPinotServer(
offlineBrokerRequest: BrokerRequest,
offlineRoutingTable: JMap[ServerInstance, JList[String]],
realtimeBrokerRequest: BrokerRequest,
realtimeRoutingTable: JMap[ServerInstance, JList[String]]): AsyncQueryResponse = {
logInfo(s"Request is sending to the ${pinotSplit.serverAndSegments.toString}")
queryRouter.submitQuery(
partitionId,
pinotSplit.generatedSQLs.rawTableName,
offlineBrokerRequest,
offlineRoutingTable,
realtimeBrokerRequest,
realtimeRoutingTable,
dataSourceOptions.pinotServerTimeoutMs
)
}
private def closePinotServerConnection(): Unit = {
queryRouter.shutDown()
logInfo("Pinot server connection closed")
}
}
object PinotServerDataFetcher {
def apply(
partitionId: Int,
pinotSplit: PinotSplit,
dataSourceOptions: PinotDataSourceReadOptions): PinotServerDataFetcher = {
new PinotServerDataFetcher(partitionId, pinotSplit, dataSourceOptions)
}
}
|
linkedin/pinot
|
pinot-connectors/pinot-spark-connector/src/main/scala/org/apache/pinot/connector/spark/connector/PinotServerDataFetcher.scala
|
Scala
|
apache-2.0
| 5,352 |
package models.join
import models.db._
/**
*
* @author ponkotuy
* Date: 14/10/22.
*/
case class SlotItemWithMaster(item: SlotItem, master: MasterSlotItem) {
def memberId = item.memberId
def slotitemId = master.id
def name = master.name
def category = master.category
def iconType = master.iconType
def level = item.level
def alv = item.alv
def colorClass: String = {
import tool.EquipIconType._
iconType.map {
case MainGunS | MainGunM | MainGunL | AntiShipBullet | Bomber => "danger"
case CaliberGun | AntiAirBullet | AntiAirGun | Fighter | ScoutSeaplane | Autogiro => "success"
case SubGun | Scout | Radar | EngineChamber | Searchlight | Flare => "warning"
case Torpedo | TorpedoBomber | Sonar | DepthBomb => "info"
case LandingCraft => "success"
case MaritimePartrolAircraft => "info"
case Armor => "purple"
case DamageControl | SimplifiedPackage | RepairFacility | Pilot | Command => ""
}
}.getOrElse("")
/** withLevelと言っているがついでに熟練度もオマケしちゃうぞ */
def nameWithLevel = master.name + item.withLevel + alvStr.getOrElse("")
def alvStr: Option[String] = alv.map { alv => s"(${alv})" }
def itemSnapshot(shipSnapshotId: Long, position: Int, now: Long) = new ItemSnapshot(
id = 0L,
memberId = memberId,
shipSnapshotId = shipSnapshotId,
position = position,
slotitemId = slotitemId,
level = level,
alv = alv,
created = now
)
}
|
ttdoda/MyFleetGirls
|
server/app/models/join/SlotItemWithMaster.scala
|
Scala
|
mit
| 1,494 |
package aoc.day07
import io.IO
object Part1 extends App {
/*
--- Day 7: Some Assembly Required ---
This year, Santa brought little Bobby Tables a set of wires and bitwise
logic gates! Unfortunately, little Bobby is a little under the recommended
age range, and he needs help assembling the circuit.
Each wire has an identifier (some lowercase letters) and can carry a 16-bit
signal (a number from 0 to 65535). A signal is provided to each wire by a
gate, another wire, or some specific value. Each wire can only get a signal
from one source, but can provide its signal to multiple destinations. A gate
provides no signal until all of its inputs have a signal.
The included instructions booklet describe how to connect the parts together:
x AND y -> z means to connect wires x and y to an AND gate, and then connect
its output to wire z.
For example:
123 -> x means that the signal 123 is provided to wire x.
x AND y -> z means that the bitwise AND of wire x and wire y is provided
to wire z.
p LSHIFT 2 -> q means that the value from wire p is left-shifted by 2 and
then provided to wire q.
NOT e -> f means that the bitwise complement of the value from wire e is
provided to wire f.
Other possible gates include OR (bitwise OR) and RSHIFT (right-shift). If, for
some reason, you'd like to emulate the circuit instead, almost all programming
languages (for example, C, JavaScript, or Python) provide operators for these
gates.
For example, here is a simple circuit:
123 -> x
456 -> y
x AND y -> d
x OR y -> e
x LSHIFT 2 -> f
y RSHIFT 2 -> g
NOT x -> h
NOT y -> i
After it is run, these are the signals on the wires:
d: 72
e: 507
f: 492
g: 114
h: 65412
i: 65079
x: 123
y: 456
In little Bobby's kit's instructions booklet (provided as your puzzle input),
what signal is ultimately provided to wire a?
*/
object WireBox {
private var registerDependencies = Map[String, Expr]() // Register dependencies between variables
private var registerValue = Map[String, Int]() // Register computation of values of the variables
private var needRecomputation = true // Flag to note when a recomputation is needed (i.e., a variable is added or modified)
// Computation stack
def addToRegister(vName: String, e: Expr): Unit = {
registerDependencies += (vName -> e)
needRecomputation = true
}
def variables: Set[String] =
registerDependencies.keys.toSet
def apply(varName: String): Int =
eval(registerDependencies(varName))
def assignement(expr: Expr, variable: Variable): Unit =
addToRegister(variable.name, expr)
private var evaluationStack = List[String]()
private def stack(name: String): Unit =
evaluationStack = name :: evaluationStack
private def pop(): Unit =
evaluationStack = evaluationStack.tail
private def reset(): Unit =
evaluationStack = List[String]()
def eval(e: Expr): Int = {
def evalHelper(e: Expr): Int =
e match {
case Value(v) => v
case AND(x, y) =>
evalHelper(x) & evalHelper(y)
case OR(x, y) =>
evalHelper(x) | evalHelper(y)
case LSHIFT(x: Expr, shift: Int) =>
evalHelper(x) << shift
case RSHIFT(x, shift) =>
evalHelper(x) >> shift
case NOT(x: Expr) =>
~evalHelper(x)
case Variable(name) =>
if (registerValue.contains(name)) {
// Already evaluated
registerValue(name)
} else {
// Evaluate
if (evaluationStack.contains(name)) {
// Cyclic dependency detected
throw new Exception(s"Cyclic dependency detected with $name")
} else {
val ref = registerDependencies(name) // Return NaN in case of bad reference
// Stacking of the ref name that is going to be computed
stack(name)
// Evaluation of its value
val value = evalHelper(ref)
registerValue += (name -> value)
// Remove the ref name from the stack (it is now evaluated)
pop()
value
}
}
}
// Clean the computation
if (needRecomputation) {
reset() // Reset stack
registerValue = Map[String, Int]()
needRecomputation = false
}
// Eval
evalHelper(e)
}
sealed abstract class Expr
case class Value(v: Int) extends Expr
case class Variable(name: String) extends Expr
case class AND(a: Expr, b: Expr) extends Expr
case class OR(a: Expr, b: Expr) extends Expr
case class LSHIFT(a: Expr, shift: Int) extends Expr
case class RSHIFT(a: Expr, shift: Int) extends Expr
case class NOT(x: Expr) extends Expr
override def toString =
s"WireBox(size=${registerDependencies.size})"
}
def stringToAssignementExecution(line: String): Unit = {
import WireBox._
val assignementR = """([a-zA-Z]+|[0-9]+) -> ([a-zA-Z]+)""".r
val binaryOpR = """([a-zA-Z]+|[0-9]+) (AND|OR) ([a-zA-Z]+|[0-9]+) -> ([a-zA-Z]+)""".r
val shiftR = """([a-zA-Z]+) (LSHIFT|RSHIFT) ([0-9]+) -> ([a-zA-Z]+)""".r
val notR = """NOT ([a-zA-Z]+) -> ([a-zA-Z]+)""".r
def stringToExpr(item: String): Expr =
if (item forall Character.isDigit) {
Value(item.toInt)
} else {
Variable(item)
}
line match {
case assignementR(expr, variable) =>
assignement(stringToExpr(expr), Variable(variable))
case binaryOpR(expr1, op, expr2, vAssign) =>
val operation = op match {
case "AND" => AND(stringToExpr(expr1), stringToExpr(expr2))
case "OR" => OR(stringToExpr(expr1), stringToExpr(expr2))
}
assignement(operation, Variable(vAssign))
case shiftR(v1, op, value, vAssign) =>
val operation = op match {
case "RSHIFT" => RSHIFT(Variable(v1), value.toInt)
case "LSHIFT" => LSHIFT(Variable(v1), value.toInt)
}
assignement(operation, Variable(vAssign))
case notR(v1, v2) =>
assignement(NOT(Variable(v1)), Variable(v2))
}
}
val input = IO.getLines()
for (op <- input) {
stringToAssignementExecution(op)
}
println(s"Signal provided to a: ${WireBox("a")}")
}
|
GuillaumeDD/AdventOfCode2015
|
src/main/scala/aoc/day07/Part1.scala
|
Scala
|
gpl-3.0
| 6,373 |
package screact
import scutil.core.implicits.*
import scutil.lang.*
// AKA Publisher
object Events {
// (in foldLeft never[T]) { _ orElse _ }
def multiOrElse[T](in:Seq[Events[T]]):Events[T] =
events {
(in mapFilter { _.message }).headOption
}
def multiOccurs[T](in:Seq[Events[T]]):Events[Seq[T]] =
events {
in mapFilter { _.message } optionBy { _.nonEmpty }
}
}
/** a Reactive without a (useful) current value just emitting events */
trait Events[+T] extends Reactive[Unit,T] {
private[screact] val cur:Unit = ()
// convert to Signal
/** the initial value at start and the last event's value afterwards */
final def hold[U>:T](initial:U):Signal[U] = {
// modify state only after evaluation of source nodes
var value = initial
signal {
message foreach { msgval =>
value = msgval
}
value
}
}
/** like hold, but with access to the previous value */
final def scan[U](initial:U)(func:(U,T)=>U):Signal[U] = {
// modify state only after evaluation of source nodes
var value = initial
signal {
message foreach { msgval =>
value = func(value, msgval)
}
value
}
}
// stateful events
/** take state and message, produce new state and output */
final def stateful[S,U](initial:S)(func:(S,T)=>(S,U)):Events[U] = {
// modify state only after evaluation of source nodes
var state = initial
events {
message match {
case Some(msgval) =>
val (newState, out) = func(state, msgval)
state = newState
Some(out)
case None =>
None
}
}
}
// with a zero
final def filter(func:Predicate[T]):Events[T] =
events { message filter func }
final def filterNot(func:Predicate[T]):Events[T] =
events { message filter !func }
final def collect[U](func:PartialFunction[T,U]):Events[U] =
events { message collect func }
@deprecated("use mapFilter", "0.207.0")
final def filterMap[U](func:T=>Option[U]):Events[U] =
mapFilter(func)
final def mapFilter[U](func:T=>Option[U]):Events[U] =
this map func collect { case Some(value) => value }
@deprecated("use flattenOption", "0.207.0")
final def filterOption[U](implicit ev:T <:< Option[U]):Events[U] =
flattenOption
final def flattenOption[U](implicit ev:T <:< Option[U]):Events[U] =
mapFilter(ev)
// functor
final def map[U](func:T=>U):Events[U] =
events { message map func }
// applicative functor
final def ap[U,V](source:Events[U])(implicit ev:T <:< (U=>V)):Events[V] =
for {
func <- this map ev
arg <- source
}
yield func(arg)
/*
final def pa[U](func:Events[T=>U]):Events[U] =
for {
func <- func
arg <- this
}
yield func(arg)
*/
// monad
final def flatMap[U](func:T=>Events[U]):Events[U] =
(this map func hold never).flattenEvents
final def flatten[U](implicit ev:T <:< Events[U]):Events[U] =
this flatMap ev
// monad to Signal
final def flatMapSignal[U](func:T=>Signal[U]):Events[U] =
events {
this.message map { it => func(it).current }
}
final def flattenSignal[U](implicit ev:T <:< Signal[U]):Events[U] =
this flatMapSignal ev
// monoid with never
final def orElse[U>:T](that:Events[U]):Events[U] =
(this, that) match {
case (_,_:NeverEvents[?]) => this
case (_:NeverEvents[?],_) => that
case _ =>
events {
// NOTE needs to access both message methods or registration fails!
val thisMessage = this.message
val thatMessage = that.message
thisMessage orElse thatMessage
}
}
final def mergeWith[U>:T](that:Events[U])(func:(U,U)=>U):Events[U] =
events {
// NOTE needs to access both message methods or registration fails!
(this.message, that.message) match {
case (Some(thisMessage), Some(thatMessage)) => Some(func(thisMessage, thatMessage))
case (Some(thisMessage), None) => Some(thisMessage)
case (None, Some(thatMessage)) => Some(thatMessage)
case (None, None) => None
}
}
// combine with values
final def tag[U](that: =>U):Events[U] =
this map { _ => that }
final def tagUnit:Events[Unit] =
this map constant(())
final def when(func: =>Boolean):Events[T] =
this filter { _ => func }
final def trueUnit(implicit ev:T <:< Boolean):Events[Unit] = {
val _ = ev
this collect { case true => () }
}
final def falseUnit(implicit ev:T <:< Boolean):Events[Unit] = {
val _ = ev
this collect { case false => () }
}
// combine with signals
final def snapshot[U](that:Signal[U]):Events[(T,U)] =
snapshotWith(that) { (_,_) }
final def snapshotOnly[U](that:Signal[U]):Events[U] =
snapshotWith(that) { (_, it) => it }
final def snapshotWith[U,V](that:Signal[U])(func:(T,U)=>V):Events[V] =
events {
val when = this.message
val what = that.current
when map { it => func(it, what) }
}
final def gate(that:Signal[Boolean]):Events[T] =
events {
val when = this.message
val gate = that.current
when filter { _ => gate }
}
// delayable
/** emits in the next update cycle */
final def delay(using observing:Observing):Events[T] = {
val out = new SourceEvents[T]
observing.observe(this, out.emit)
out
}
// other
@deprecated("use product", "0.207.0")
final def tuple[U](that:Events[U]):Events[(T,U)] =
product(that)
/** emits an event if both inputs fire at the same instant */
final def product[U](that:Events[U]):Events[(T,U)] =
map2(that) { (_,_) }
/** emits an event if both inputs fire at the same instant */
final def map2[U,V](that:Events[U])(func:(T,U)=>V):Events[V] =
events {
(this.message, that.message) match {
case (Some(thisMessage),Some(thatMessage)) => Some(func(thisMessage, thatMessage))
case _ => None
}
}
@deprecated("use fproduct", "0.207.0")
final def tupleBy[U](func:T=>U):Events[(T,U)] =
fproduct(func)
final def fproduct[U](func:T=>U):Events[(T,U)] =
this map { it => (it,func(it)) }
final def untuple[U,V](implicit ev:T <:< (U,V)):(Events[U],Events[V]) =
(map(_._1), map(_._2))
final def sum[U](that:Events[U]):Events[Either[T,U]] =
(this map { Left(_) }) orElse (that map { Right(_) })
final def unsum[U,V](implicit ev:T <:< Either[U,V]):(Events[U],Events[V]) =
( events { message flatMap { (it:T) => ev(it).left.toOption } },
events { message flatMap { (it:T) => ev(it).toOption } }
)
final def partition(func:T=>Boolean):(Events[T],Events[T]) =
( events { message filter func },
events { message filter !func }
)
/** take the first count events, drop the rest */
final def take(count:Int):Events[T] = {
// modify state only after evaluation of source nodes
var todo = count
events {
// when we're done, loosing the connection is actually a good thing
if (todo != 0) {
val out = message
todo -= 1
out
}
else {
None
}
}
}
/** drop the first count events, take the rest */
final def drop(count:Int):Events[T] = {
// modify state only after evaluation of source nodes
var todo = count
events {
// need to access message every time to avoid loss of connection
val value = message
val _ = value
if (todo != 0) {
todo -= 1
None
}
else {
value
}
}
}
}
|
ritschwumm/screact
|
src/main/scala/screact/Events.scala
|
Scala
|
bsd-2-clause
| 7,122 |
package com.twitter.finatra.validation.constraints
import com.twitter.finatra.validation.ErrorCode
import com.twitter.util.validation.constraintvalidation.TwitterConstraintValidatorContext
import jakarta.validation.{ConstraintValidator, ConstraintValidatorContext}
import java.util.concurrent.TimeUnit
import java.util.concurrent.TimeUnit._
import org.joda.time.{DateTime, DateTimeZone}
private[validation] object TimeGranularityConstraintValidator {
private[validation] def singularize(timeUnit: TimeUnit): String = {
val timeUnitStr = timeUnit.toString.toLowerCase
timeUnitStr.substring(0, timeUnitStr.length - 1)
}
}
/**
* The validator for [[TimeGranularity]] annotation.
*
* Validates if a given value is of a given time granularity (e.g., days, hours, minutes, seconds).
* E.g. A granularity of Minute is valid for 10:05:00 and not valid for 10:05:13.
*/
@deprecated("Users should prefer to use standard constraints.", "2021-03-05")
private[validation] class TimeGranularityConstraintValidator
extends ConstraintValidator[TimeGranularity, DateTime] {
import TimeGranularityConstraintValidator._
@volatile private[this] var timeGranularity: TimeUnit = _
override def initialize(constraintAnnotation: TimeGranularity): Unit = {
this.timeGranularity = constraintAnnotation.value
}
override def isValid(
obj: DateTime,
constraintValidatorContext: ConstraintValidatorContext
): Boolean = {
val valid = isGranularity(obj, timeGranularity)
if (!valid) {
TwitterConstraintValidatorContext
.withDynamicPayload(ErrorCode.InvalidTimeGranularity(obj, timeGranularity))
.withMessageTemplate(
s"[${obj.toString}] is not ${singularize(timeGranularity)} granularity")
.addConstraintViolation(constraintValidatorContext)
}
valid
}
/* Private */
private[this] def isGranularity(value: DateTime, timeGranularity: TimeUnit): Boolean = {
val utcDateTime = value.toDateTime(DateTimeZone.UTC)
toNanos(utcDateTime.getMillis, MILLISECONDS) % toNanos(1, timeGranularity) == 0
}
private[this] def toNanos(value: Long, timeUnit: TimeUnit): Long =
NANOSECONDS.convert(value, timeUnit)
}
|
twitter/finatra
|
validation/src/main/scala/com/twitter/finatra/validation/constraints/TimeGranularityConstraintValidator.scala
|
Scala
|
apache-2.0
| 2,198 |
package mashup
import java.util.concurrent.CountDownLatch
import akka.actor.{Actor, ActorRef, Props}
import com.danielasfregola.twitter4s.TwitterRestClient
import com.danielasfregola.twitter4s.entities._
import com.danielasfregola.twitter4s.entities.enums.ResultType
import mashup.AppImplicitsAndConfig.AppConfig
import mashup.Protocol._
import scala.concurrent.Future
import scala.util.{Failure, Success}
class TwitterActor(projectName: String, tweetCount: Int = 10, driver: ActorRef, doneSignal: CountDownLatch, appConfig: AppConfig)
extends Actor
with akka.actor.ActorLogging {
log.info(s"Beginning search of twitter for the $tweetCount most recent tweets about project $projectName")
import AppImplicitsAndConfig._
// setup the tokens for Dan's twitter4s module
val consumerToken = appConfig("consumer").asInstanceOf[ConsumerToken]
val accessToken = appConfig("access").asInstanceOf[AccessToken]
// and the client that's gonna do the heavy lifting - we don't need to do the raw api calls - coolio
val client = new TwitterRestClient(consumerToken, accessToken)
// enter the dragon, I mean, loop...
override def receive: Receive = {
// ok, so we've been given signal to search twitter
case TwitterSearch(gitHubProjectToSearch) =>
val future: Future[RatedData[StatusSearch]] = client.searchTweet(query = gitHubProjectToSearch.name, count = tweetCount, result_type = ResultType.Recent)
future.onComplete {
case Success(RatedData(rate_limit, StatusSearch(listTweets, _))) => sendMsg(ProjectTweetsInfo(gitHubProjectToSearch, listTweets.map(_.text)))
case Failure(_) => sendMsg(BadTwitterResponse(s"Failed to get response from twitter for $projectName"))
}
// ok, so we've been told we can quit now
case Quit => log.info(s"Twitter actor for project ${projectName} sent signal to shutdown...");
driver ! ActorShuttingDown(projectName)
context.stop(this.self)
// ah FFS, something unknown, log it so!
case _ => log.info("Recieved unknown error from twitter"); sendMsg(BadTwitterResponse(s"Something when wrong when searching twitter for $projectName"))
}
// inform the mother ship something has gone wrong as well as dounting down the latch
def sendMsg(command: Any) : Unit = {
driver ! command
doneSignal.countDown()
}
}
object TwitterActor {
def props(projectName: String)(tweetCount: Int = 10)(driver: ActorRef)(doneSignal: CountDownLatch)(implicit appConfig: AppConfig): Props
= Props(new TwitterActor(projectName, tweetCount, driver, doneSignal, appConfig))
}
|
kevin-orr/interviewQs
|
github-twitter-mashup-app/scala-with-actors/src/main/scala/mashup/TwitterActor.scala
|
Scala
|
mit
| 2,650 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data
import org.apache.accumulo.core.client.Connector
import org.apache.accumulo.core.client.impl.{MasterClient, Tables}
import org.apache.accumulo.core.client.mock.MockConnector
import org.apache.accumulo.core.data.{Mutation, Range, Value}
import org.apache.accumulo.core.security.ColumnVisibility
import org.apache.accumulo.core.security.thrift.TCredentials
import org.apache.accumulo.trace.instrument.Tracer
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.core.data.AccumuloBackedMetadata._
import org.locationtech.geomesa.core.util.{GeoMesaBatchWriterConfig, SelfClosingIterator}
import org.locationtech.geomesa.security.AuthorizationsProvider
import scala.collection.JavaConversions._
import scala.collection.mutable
/**
* GeoMesa Metadata/Catalog abstraction using key/value String pairs storing
* them on a per-featurename basis
*/
trait GeoMesaMetadata {
def delete(featureName: String, numThreads: Int)
def insert(featureName: String, key: String, value: String)
def insert(featureName: String, kvPairs: Map[String, String])
def insert(featureName: String, key: String, value: String, vis: String)
def read(featureName: String, key: String): Option[String]
def readRequired(featureName: String, key: String): String
def readRequiredNoCache(featureName: String, key: String): Option[String]
def expireCache(featureName: String)
def getFeatureTypes: Array[String]
def getTableSize(tableName: String): Long
}
class AccumuloBackedMetadata(connector: Connector,
catalogTable: String,
writeVisibilities: String,
authorizationsProvider: AuthorizationsProvider) extends GeoMesaMetadata {
// warning: only access this map in a synchronized fashion
private val metaDataCache = new mutable.HashMap[(String, String), Option[String]]()
private val metadataBWConfig =
GeoMesaBatchWriterConfig().setMaxMemory(10000L).setMaxWriteThreads(1)
/**
* Handles creating a mutation for writing metadata
*
* @param featureName
* @return
*/
private def getMetadataMutation(featureName: String) = new Mutation(getMetadataRowKey(featureName))
/**
* Handles encoding metadata into a mutation.
*
* @param featureName
* @param mutation
* @param key
* @param value
*/
private def putMetadata(featureName: String,
mutation: Mutation,
key: String,
value: String) {
mutation.put(new Text(key), EMPTY_COLQ, new Value(value.getBytes))
// also pre-fetch into the cache
if (!value.isEmpty) {
metaDataCache.synchronized { metaDataCache.put((featureName, key), Some(value)) }
}
}
/**
* Handles writing mutations
*
* @param mutations
*/
private def writeMutations(mutations: Mutation*): Unit = {
val writer = connector.createBatchWriter(catalogTable, metadataBWConfig)
for (mutation <- mutations) {
writer.addMutation(mutation)
}
writer.flush()
writer.close()
}
/**
* Handles deleting metadata from the catalog by using the Range obtained from the METADATA_TAG and featureName
* and setting that as the Range to be handled and deleted by Accumulo's BatchDeleter
*
* @param featureName the name of the table to query and delete from
* @param numThreads the number of concurrent threads to spawn for querying
*/
override def delete(featureName: String, numThreads: Int): Unit = {
val range = new Range(getMetadataRowKey(featureName))
val deleter = connector.createBatchDeleter(catalogTable,
authorizationsProvider.getAuthorizations,
numThreads,
metadataBWConfig)
deleter.setRanges(List(range))
deleter.delete()
deleter.close()
}
/**
* Creates the row id for a metadata entry
*
* @param featureName
* @return
*/
private def getMetadataRowKey(featureName: String) = new Text(METADATA_TAG + "_" + featureName)
/**
* Reads metadata from cache or scans if not available
*
* @param featureName
* @param key
* @return
*/
override def read(featureName: String, key: String): Option[String] =
metaDataCache.synchronized {
metaDataCache.getOrElseUpdate((featureName, key), readRequiredNoCache(featureName, key))
}
override def readRequired(featureName: String, key: String): String =
read(featureName, key)
.getOrElse(throw new RuntimeException(s"Unable to find required metadata property for key $key"))
/**
* Gets metadata by scanning the table, without the local cache
*
* Read metadata using scheme: ~METADATA_featureName metadataFieldName: insertionTimestamp metadataValue
*
* @param featureName
* @param key
* @return
*/
override def readRequiredNoCache(featureName: String, key: String): Option[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(getMetadataRowKey(featureName)))
scanner.fetchColumn(new Text(key), EMPTY_COLQ)
SelfClosingIterator(scanner).map(_.getValue.toString).toList.headOption
}
/**
* Create an Accumulo Scanner to the Catalog table to query Metadata for this store
*/
private def createCatalogScanner = connector.createScanner(catalogTable, authorizationsProvider.getAuthorizations)
override def expireCache(featureName: String) =
metaDataCache.synchronized {
metaDataCache.keys.filter { case (fn, _) => fn == featureName}.foreach(metaDataCache.remove)
}
override def insert(featureName: String, key: String, value: String) =
insert(featureName, Map(key -> value))
override def insert(featureName: String, kvPairs: Map[String, String]) = {
val mutation = getMetadataMutation(featureName)
kvPairs.foreach { case (k,v) =>
putMetadata(featureName, mutation, k, v)
}
writeMutations(mutation)
}
override def insert(featureName: String, key: String, value: String, vis: String) = {
val mutation = getMetadataMutation(featureName)
mutation.put(new Text(key), EMPTY_COLQ, new ColumnVisibility(vis), new Value(vis.getBytes))
writeMutations(mutation)
}
/**
* Scans metadata rows and pulls out the different feature types in the table
*
* @return
*/
override def getFeatureTypes: Array[String] = {
val scanner = createCatalogScanner
scanner.setRange(new Range(METADATA_TAG, METADATA_TAG_END))
// restrict to just schema cf so we only get 1 hit per feature
scanner.fetchColumnFamily(new Text(SCHEMA_KEY))
val resultItr = new Iterator[String] {
val src = scanner.iterator()
def hasNext = {
val next = src.hasNext
if (!next) {
scanner.close()
}
next
}
def next() = src.next().getKey.getRow.toString
}
resultItr.toArray.map(getFeatureNameFromMetadataRowKey)
}
/**
* Reads the feature name from a given metadata row key
*
* @param rowKey
* @return
*/
private def getFeatureNameFromMetadataRowKey(rowKey: String): String = {
val MetadataRowKeyRegex(featureName) = rowKey
featureName
}
// This lazily computed function helps shortcut getCount from scanning entire tables.
lazy val retrieveTableSize: (String) => Long =
if (connector.isInstanceOf[MockConnector]) {
(tableName: String) => -1
} else {
val masterClient = MasterClient.getConnection(connector.getInstance())
val tc = new TCredentials()
val mmi = masterClient.getMasterStats(Tracer.traceInfo(), tc)
(tableName: String) => {
val tableId = Tables.getTableId(connector.getInstance(), tableName)
val v = mmi.getTableMap.get(tableId)
v.getRecs
}
}
override def getTableSize(tableName: String): Long = {
retrieveTableSize(tableName)
}
}
object AccumuloBackedMetadata {
val MetadataRowKeyRegex = (METADATA_TAG + """_(.*)""").r
}
|
jnh5y/geomesa
|
geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/GeoMesaMetadata.scala
|
Scala
|
apache-2.0
| 8,687 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2021 GatlingCql developers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package io.github.gatling.cql.request
import com.datastax.oss.driver.api.core.CqlSession
import com.typesafe.scalalogging.StrictLogging
//just a wrapper around CqlProtocol
object CqlProtocolBuilder {
/**
* Defines an instance of [[com.datastax.oss.driver.api.core.CqlSession]] which will be used by the CQL protocol
* */
def session(session: CqlSession) = CqlProtocolBuilder(session)
}
case class CqlProtocolBuilder(session: CqlSession) extends StrictLogging {
def build = new CqlProtocol(session)
}
|
gatling-cql/GatlingCql
|
src/main/scala/io/github/gatling/cql/request/CqlProtocolBuilder.scala
|
Scala
|
mit
| 1,668 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.state
import java.io.{File, FileInputStream, InputStream}
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.Files
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.annotation.JsonInclude.Include
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
import com.fasterxml.jackson.module.scala.{DefaultScalaModule, ScalaObjectMapper}
import org.apache.commons.io.{FilenameUtils, IOUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{Path, PathFilter}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.streaming.CheckpointFileManager
import org.apache.spark.util.Utils
/**
* Class responsible for syncing RocksDB checkpoint files from local disk to DFS.
* For each version, checkpoint is saved in specific directory structure that allows successive
* versions to reuse to SST data files and archived log files. This allows each commit to be
* incremental, only new SST files and archived log files generated by RocksDB will be uploaded.
* The directory structures on local disk and in DFS are as follows.
*
* Local checkpoint dir structure
* ------------------------------
* RocksDB generates a bunch of files in the local checkpoint directory. The most important among
* them are the SST files; they are the actual log structured data files. Rest of the files contain
* the metadata necessary for RocksDB to read the SST files and start from the checkpoint.
* Note that the SST files are hard links to files in the RocksDB's working directory, and therefore
* successive checkpoints can share some of the SST files. So these SST files have to be copied to
* DFS in shared directory such that different committed versions can save them.
*
* We consider both SST files and archived log files as immutable files which can be shared between
* different checkpoints.
*
* localCheckpointDir
* |
* +-- OPTIONS-000005
* +-- MANIFEST-000008
* +-- CURRENT
* +-- 00007.sst
* +-- 00011.sst
* +-- archive
* | +-- 00008.log
* | +-- 00013.log
* ...
*
*
* DFS directory structure after saving to DFS as version 10
* -----------------------------------------------------------
* The SST and archived log files are given unique file names and copied to the shared subdirectory.
* Every version maintains a mapping of local immutable file name to the unique file name in DFS.
* This mapping is saved in a JSON file (named `metadata`), which is zipped along with other
* checkpoint files into a single file `[version].zip`.
*
* dfsRootDir
* |
* +-- SSTs
* | +-- 00007-[uuid1].sst
* | +-- 00011-[uuid2].sst
* +-- logs
* | +-- 00008-[uuid3].log
* | +-- 00013-[uuid4].log
* +-- 10.zip
* | +-- metadata <--- contains mapping between 00007.sst and [uuid1].sst,
* and the mapping between 00008.log and [uuid3].log
* | +-- OPTIONS-000005
* | +-- MANIFEST-000008
* | +-- CURRENT
* | ...
* |
* +-- 9.zip
* +-- 8.zip
* ...
*
* Note the following.
* - Each [version].zip is a complete description of all the data and metadata needed to recover
* a RocksDB instance at the corresponding version. The SST files and log files are not included
* in the zip files, they can be shared cross different versions. This is unlike the
* [version].delta files of HDFSBackedStateStore where previous delta files needs to be read
* to be recovered.
* - This is safe wrt speculatively executed tasks running concurrently in different executors
* as each task would upload a different copy of the generated immutable files and
* atomically update the [version].zip.
* - Immutable files are identified uniquely based on their file name and file size.
* - Immutable files can be reused only across adjacent checkpoints/versions.
* - This class is thread-safe. Specifically, it is safe to concurrently delete old files from a
* different thread than the task thread saving files.
*
* @param dfsRootDir Directory where the [version].zip files will be stored
* @param localTempDir Local directory for temporary work
* @param hadoopConf Hadoop configuration for talking to DFS
* @param loggingId Id that will be prepended in logs for isolating concurrent RocksDBs
*/
class RocksDBFileManager(
dfsRootDir: String,
localTempDir: File,
hadoopConf: Configuration,
loggingId: String = "")
extends Logging {
import RocksDBImmutableFile._
private val versionToRocksDBFiles = new ConcurrentHashMap[Long, Seq[RocksDBImmutableFile]]
private lazy val fm = CheckpointFileManager.create(new Path(dfsRootDir), hadoopConf)
private val fs = new Path(dfsRootDir).getFileSystem(hadoopConf)
private val onlyZipFiles = new PathFilter {
override def accept(path: Path): Boolean = path.toString.endsWith(".zip")
}
/** Save all the files in given local checkpoint directory as a committed version in DFS */
def saveCheckpointToDfs(checkpointDir: File, version: Long, numKeys: Long): Unit = {
logFilesInDir(checkpointDir, s"Saving checkpoint files for version $version")
val (localImmutableFiles, localOtherFiles) = listRocksDBFiles(checkpointDir)
val rocksDBFiles = saveImmutableFilesToDfs(version, localImmutableFiles)
val metadata = RocksDBCheckpointMetadata(rocksDBFiles, numKeys)
val metadataFile = localMetadataFile(checkpointDir)
metadata.writeToFile(metadataFile)
logInfo(s"Written metadata for version $version:\\n${metadata.prettyJson}")
if (version <= 1 && numKeys == 0) {
// If we're writing the initial version and there's no data, we have to explicitly initialize
// the root directory. Normally saveImmutableFilesToDfs will do this initialization, but
// when there's no data that method won't write any files, and zipToDfsFile uses the
// CheckpointFileManager.createAtomic API which doesn't auto-initialize parent directories.
val path = new Path(dfsRootDir)
if (!fm.exists(path)) fm.mkdirs(path)
}
zipToDfsFile(localOtherFiles :+ metadataFile, dfsBatchZipFile(version))
logInfo(s"Saved checkpoint file for version $version")
}
/**
* Load all necessary files for specific checkpoint version from DFS to given local directory.
* If version is 0, then it will delete all files in the directory. For other versions, it
* ensures that only the exact files generated during checkpointing will be present in the
* local directory.
*/
def loadCheckpointFromDfs(version: Long, localDir: File): RocksDBCheckpointMetadata = {
logInfo(s"Loading checkpoint files for version $version")
val metadata = if (version == 0) {
if (localDir.exists) Utils.deleteRecursively(localDir)
localDir.mkdirs()
RocksDBCheckpointMetadata(Seq.empty, 0)
} else {
// Delete all non-immutable files in local dir, and unzip new ones from DFS commit file
listRocksDBFiles(localDir)._2.foreach(_.delete())
Utils.unzipFilesFromFile(fs, dfsBatchZipFile(version), localDir)
// Copy the necessary immutable files
val metadataFile = localMetadataFile(localDir)
val metadata = RocksDBCheckpointMetadata.readFromFile(metadataFile)
logInfo(s"Read metadata for version $version:\\n${metadata.prettyJson}")
loadImmutableFilesFromDfs(metadata.immutableFiles, localDir)
versionToRocksDBFiles.put(version, metadata.immutableFiles)
metadataFile.delete()
metadata
}
logFilesInDir(localDir, s"Loaded checkpoint files for version $version")
metadata
}
/** Get the latest version available in the DFS directory. If no data present, it returns 0. */
def getLatestVersion(): Long = {
val path = new Path(dfsRootDir)
if (fm.exists(path)) {
fm.list(path, onlyZipFiles)
.map(_.getPath.getName.stripSuffix(".zip"))
.map(_.toLong)
.foldLeft(0L)(math.max)
} else {
0
}
}
/** Save immutable files to DFS directory */
private def saveImmutableFilesToDfs(
version: Long,
localFiles: Seq[File]): Seq[RocksDBImmutableFile] = {
// Get the immutable files used in previous versions, as some of those uploaded files can be
// reused for this version
logInfo(s"Saving RocksDB files to DFS for $version")
val prevFilesToSizes = versionToRocksDBFiles.values.asScala.flatten.map { f =>
f.localFileName -> f
}.toMap
var bytesCopied = 0L
var filesCopied = 0L
var filesReused = 0L
val immutableFiles = localFiles.map { localFile =>
prevFilesToSizes
.get(localFile.getName)
.filter(_.isSameFile(localFile))
.map { reusable =>
filesReused += 1
reusable
}.getOrElse {
val localFileName = localFile.getName
val dfsFileName = newDFSFileName(localFileName)
val dfsFile = dfsFilePath(dfsFileName)
// Note: The implementation of copyFromLocalFile() closes the output stream when there is
// any exception while copying. So this may generate partial files on DFS. But that is
// okay because until the main [version].zip file is written, those partial files are
// not going to be used at all. Eventually these files should get cleared.
fs.copyFromLocalFile(
new Path(localFile.getAbsoluteFile.toURI), dfsFile)
val localFileSize = localFile.length()
logInfo(s"Copied $localFile to $dfsFile - $localFileSize bytes")
filesCopied += 1
bytesCopied += localFileSize
RocksDBImmutableFile(localFile.getName, dfsFileName, localFileSize)
}
}
logInfo(s"Copied $filesCopied files ($bytesCopied bytes) from local to" +
s" DFS for version $version. $filesReused files reused without copying.")
versionToRocksDBFiles.put(version, immutableFiles)
immutableFiles
}
/**
* Copy files from DFS directory to a local directory. It will figure out which
* existing files are needed, and accordingly, unnecessary SST files are deleted while
* necessary and non-existing files are copied from DFS.
*/
private def loadImmutableFilesFromDfs(
immutableFiles: Seq[RocksDBImmutableFile], localDir: File): Unit = {
val requiredFileNameToFileDetails = immutableFiles.map(f => f.localFileName -> f).toMap
// Delete unnecessary local immutable files
listRocksDBFiles(localDir)._1
.foreach { existingFile =>
val isSameFile =
requiredFileNameToFileDetails.get(existingFile.getName).exists(_.isSameFile(existingFile))
if (!isSameFile) {
existingFile.delete()
logInfo(s"Deleted local file $existingFile")
}
}
var filesCopied = 0L
var bytesCopied = 0L
var filesReused = 0L
immutableFiles.foreach { file =>
val localFileName = file.localFileName
val localFile = localFilePath(localDir, localFileName)
if (!localFile.exists) {
val dfsFile = dfsFilePath(file.dfsFileName)
// Note: The implementation of copyToLocalFile() closes the output stream when there is
// any exception while copying. So this may generate partial files on DFS. But that is
// okay because until the main [version].zip file is written, those partial files are
// not going to be used at all. Eventually these files should get cleared.
fs.copyToLocalFile(dfsFile, new Path(localFile.getAbsoluteFile.toURI))
val localFileSize = localFile.length()
val expectedSize = file.sizeBytes
if (localFileSize != expectedSize) {
throw new IllegalStateException(
s"Copied $dfsFile to $localFile," +
s" expected $expectedSize bytes, found $localFileSize bytes ")
}
filesCopied += 1
bytesCopied += localFileSize
logInfo(s"Copied $dfsFile to $localFile - $localFileSize bytes")
} else {
filesReused += 1
}
}
logInfo(s"Copied $filesCopied files ($bytesCopied bytes) from DFS to local with " +
s"$filesReused files reused.")
}
/**
* Compress files to a single zip file in DFS. Only the file names are embedded in the zip.
* Any error while writing will ensure that the file is not written.
*/
private def zipToDfsFile(files: Seq[File], dfsZipFile: Path): Unit = {
lazy val filesStr = s"$dfsZipFile\\n\\t${files.mkString("\\n\\t")}"
var in: InputStream = null
val out = fm.createAtomic(dfsZipFile, overwriteIfPossible = true)
var totalBytes = 0L
val zout = new ZipOutputStream(out)
try {
files.foreach { file =>
zout.putNextEntry(new ZipEntry(file.getName))
in = new FileInputStream(file)
val bytes = IOUtils.copy(in, zout)
in.close()
zout.closeEntry()
totalBytes += bytes
}
zout.close() // so that any error in closing also cancels the output stream
logInfo(s"Zipped $totalBytes bytes (before compression) to $filesStr")
} catch {
case e: Exception =>
// Cancel the actual output stream first, so that zout.close() does not write the file
out.cancel()
logError(s"Error zipping to $filesStr", e)
throw e
} finally {
// Close everything no matter what happened
IOUtils.closeQuietly(in)
IOUtils.closeQuietly(zout)
}
}
/** Log the files present in a directory. This is useful for debugging. */
private def logFilesInDir(dir: File, msg: String): Unit = {
lazy val files = Option(Utils.recursiveList(dir)).getOrElse(Array.empty).map { f =>
s"${f.getAbsolutePath} - ${f.length()} bytes"
}
logInfo(s"$msg - ${files.length} files\\n\\t${files.mkString("\\n\\t")}")
}
private def newDFSFileName(localFileName: String): String = {
val baseName = FilenameUtils.getBaseName(localFileName)
val extension = FilenameUtils.getExtension(localFileName)
s"$baseName-${UUID.randomUUID}.$extension"
}
private def dfsBatchZipFile(version: Long): Path = new Path(s"$dfsRootDir/$version.zip")
private def localMetadataFile(parentDir: File): File = new File(parentDir, "metadata")
private def dfsFilePath(fileName: String): Path = {
if (isSstFile(fileName)) {
new Path(new Path(dfsRootDir, SST_FILES_DFS_SUBDIR), fileName)
} else if (isLogFile(fileName)) {
new Path(new Path(dfsRootDir, LOG_FILES_DFS_SUBDIR), fileName)
} else {
new Path(dfsRootDir, fileName)
}
}
private def localFilePath(localDir: File, fileName: String): File = {
if (isLogFile(fileName)) {
new File(new File(localDir, LOG_FILES_LOCAL_SUBDIR), fileName)
} else {
new File(localDir, fileName)
}
}
/**
* List all the RocksDB files that need be synced or recovered.
*/
private def listRocksDBFiles(localDir: File): (Seq[File], Seq[File]) = {
val topLevelFiles = localDir.listFiles.filter(!_.isDirectory)
val archivedLogFiles =
Option(new File(localDir, LOG_FILES_LOCAL_SUBDIR).listFiles())
.getOrElse(Array[File]())
// To ignore .log.crc files
.filter(file => isLogFile(file.getName))
val (topLevelSstFiles, topLevelOtherFiles) = topLevelFiles.partition(f => isSstFile(f.getName))
(topLevelSstFiles ++ archivedLogFiles, topLevelOtherFiles)
}
}
/**
* Classes to represent metadata of checkpoints saved to DFS. Since this is converted to JSON, any
* changes to this MUST be backward-compatible.
*/
case class RocksDBCheckpointMetadata(
sstFiles: Seq[RocksDBSstFile],
logFiles: Seq[RocksDBLogFile],
numKeys: Long) {
import RocksDBCheckpointMetadata._
def json: String = {
// We turn this field into a null to avoid write a empty logFiles field in the json.
val nullified = if (logFiles.isEmpty) this.copy(logFiles = null) else this
mapper.writeValueAsString(nullified)
}
def prettyJson: String = Serialization.writePretty(this)(RocksDBCheckpointMetadata.format)
def writeToFile(metadataFile: File): Unit = {
val writer = Files.newBufferedWriter(metadataFile.toPath, UTF_8)
try {
writer.write(s"v$VERSION\\n")
writer.write(this.json)
} finally {
writer.close()
}
}
def immutableFiles: Seq[RocksDBImmutableFile] = sstFiles ++ logFiles
}
/** Helper class for [[RocksDBCheckpointMetadata]] */
object RocksDBCheckpointMetadata {
val VERSION = 1
implicit val format = Serialization.formats(NoTypeHints)
/** Used to convert between classes and JSON. */
lazy val mapper = {
val _mapper = new ObjectMapper with ScalaObjectMapper
_mapper.setSerializationInclusion(Include.NON_ABSENT)
_mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
_mapper.registerModule(DefaultScalaModule)
_mapper
}
def readFromFile(metadataFile: File): RocksDBCheckpointMetadata = {
val reader = Files.newBufferedReader(metadataFile.toPath, UTF_8)
try {
val versionLine = reader.readLine()
if (versionLine != s"v$VERSION") {
throw new IllegalStateException(
s"Cannot read RocksDB checkpoint metadata of version $versionLine")
}
Serialization.read[RocksDBCheckpointMetadata](reader)
} finally {
reader.close()
}
}
def apply(rocksDBFiles: Seq[RocksDBImmutableFile], numKeys: Long): RocksDBCheckpointMetadata = {
val sstFiles = rocksDBFiles.collect { case file: RocksDBSstFile => file }
val logFiles = rocksDBFiles.collect { case file: RocksDBLogFile => file }
RocksDBCheckpointMetadata(sstFiles, logFiles, numKeys)
}
}
/**
* A RocksDBImmutableFile maintains a mapping between a local RocksDB file name and the name of
* its copy on DFS. Since these files are immutable, their DFS copies can be reused.
*/
sealed trait RocksDBImmutableFile {
def localFileName: String
def dfsFileName: String
def sizeBytes: Long
/**
* Whether another local file is same as the file described by this class.
* A file is same only when the name and the size are same.
*/
def isSameFile(otherFile: File): Boolean = {
otherFile.getName == localFileName && otherFile.length() == sizeBytes
}
}
/**
* Class to represent a RocksDB SST file. Since this is converted to JSON,
* any changes to these MUST be backward-compatible.
*/
private[sql] case class RocksDBSstFile(
localFileName: String,
dfsSstFileName: String,
sizeBytes: Long) extends RocksDBImmutableFile {
override def dfsFileName: String = dfsSstFileName
}
/**
* Class to represent a RocksDB Log file. Since this is converted to JSON,
* any changes to these MUST be backward-compatible.
*/
private[sql] case class RocksDBLogFile(
localFileName: String,
dfsLogFileName: String,
sizeBytes: Long) extends RocksDBImmutableFile {
override def dfsFileName: String = dfsLogFileName
}
object RocksDBImmutableFile {
val SST_FILES_DFS_SUBDIR = "SSTs"
val LOG_FILES_DFS_SUBDIR = "logs"
val LOG_FILES_LOCAL_SUBDIR = "archive"
def apply(localFileName: String, dfsFileName: String, sizeBytes: Long): RocksDBImmutableFile = {
if (isSstFile(localFileName)) {
RocksDBSstFile(localFileName, dfsFileName, sizeBytes)
} else if (isLogFile(localFileName)) {
RocksDBLogFile(localFileName, dfsFileName, sizeBytes)
} else {
null
}
}
def isSstFile(fileName: String): Boolean = fileName.endsWith(".sst")
def isLogFile(fileName: String): Boolean = fileName.endsWith(".log")
private def isArchivedLogFile(file: File): Boolean =
isLogFile(file.getName) && file.getParentFile.getName == LOG_FILES_LOCAL_SUBDIR
def isImmutableFile(file: File): Boolean = isSstFile(file.getName) || isArchivedLogFile(file)
}
|
wangmiao1981/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/state/RocksDBFileManager.scala
|
Scala
|
apache-2.0
| 21,099 |
/**
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
limitations under the License.
*/
package org.apache.spark.streaming.kafka.util
import kafka.consumer.ConsumerConfig
import kafka.consumer.ConsumerIterator
import kafka.consumer.ConsumerTimeoutException
import kafka.consumer.KafkaStream
import kafka.message.MessageAndMetadata
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import java.util.HashMap
import java.util.List
import java.util.Map
import java.util.Properties
import scala.collection.JavaConversions._
/**
* A Kafka Consumer implementation. This uses the current thread to fetch the
* next message from the queue and doesn't use a multi threaded implementation.
* So this implements a synchronous blocking call.
* To avoid infinite waiting, a timeout is implemented to wait only for
* 10 seconds before concluding that the message will not be available.
*/
object KafkaConsumer {
private def createConsumerConfig(zkUrl: String, groupId: String): ConsumerConfig = {
val props: Properties = new Properties
props.put("zookeeper.connect", zkUrl)
props.put("group.id", groupId)
props.put("zookeeper.session.timeout.ms", "400")
props.put("zookeeper.sync.time.ms", "200")
props.put("auto.commit.interval.ms", "1000")
props.put("auto.offset.reset", "smallest")
props.put("consumer.timeout.ms", "1000")
new ConsumerConfig(props)
}
}
class KafkaConsumer (val consumer: kafka.consumer.ConsumerConnector = kafka.consumer.Consumer.create(
KafkaConsumer.createConsumerConfig(TestUtil.getInstance
.getZkUrl, "group_1"))) {
private[util] var consumerMap: Map[String, scala.List[KafkaStream[Array[Byte],
Array[Byte]]]] = null
private final val logger: Logger = LoggerFactory.getLogger(classOf[KafkaConsumer])
def initTopicList(topics: List[String]) {
val topicCountMap: Map[String, Int] = new HashMap[String, Int]
for (topic <- topics) {
topicCountMap.put(topic, new Integer(1))
}
consumerMap = consumer.createMessageStreams(topicCountMap)
}
def getNextMessage(topic: String): MessageAndMetadata[_, _] = {
val streams: scala.List[KafkaStream[Array[Byte], Array[Byte]]] = consumerMap.get(topic)
val stream: KafkaStream[Array[Byte], Array[Byte]] = streams.get(0)
val it: ConsumerIterator[Array[Byte], Array[Byte]] = stream.iterator()
try {
if (it.hasNext()) {
it.next()
}
else {
null
}
}
catch {
case e: ConsumerTimeoutException => {
logger.error("0 messages available to fetch for the topic " + topic)
null
}
}
}
def shutdown(): Unit = {
consumer.shutdown()
}
}
|
shirleyyoung0812/spark-streaming-kafka-output
|
src/test/scala/org/cloudera/spark/streaming/kafka/util/KafkaConsumer.scala
|
Scala
|
apache-2.0
| 3,390 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.File
import org.scalactic.Equality
import org.apache.spark.sql.{DataFrame, QueryTest, Row}
import org.apache.spark.sql.catalyst.SchemaPruningTest
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StructType
abstract class SchemaPruningSuite
extends QueryTest
with FileBasedDataSourceTest
with SchemaPruningTest
with SharedSparkSession {
case class FullName(first: String, middle: String, last: String)
case class Company(name: String, address: String)
case class Employer(id: Int, company: Company)
case class Contact(
id: Int,
name: FullName,
address: String,
pets: Int,
friends: Array[FullName] = Array.empty,
relatives: Map[String, FullName] = Map.empty,
employer: Employer = null,
relations: Map[FullName, String] = Map.empty)
val janeDoe = FullName("Jane", "X.", "Doe")
val johnDoe = FullName("John", "Y.", "Doe")
val susanSmith = FullName("Susan", "Z.", "Smith")
val employer = Employer(0, Company("abc", "123 Business Street"))
val employerWithNullCompany = Employer(1, null)
val contacts =
Contact(0, janeDoe, "123 Main Street", 1, friends = Array(susanSmith),
relatives = Map("brother" -> johnDoe), employer = employer,
relations = Map(johnDoe -> "brother")) ::
Contact(1, johnDoe, "321 Wall Street", 3, relatives = Map("sister" -> janeDoe),
employer = employerWithNullCompany, relations = Map(janeDoe -> "sister")) :: Nil
case class Name(first: String, last: String)
case class BriefContact(id: Int, name: Name, address: String)
private val briefContacts =
BriefContact(2, Name("Janet", "Jones"), "567 Maple Drive") ::
BriefContact(3, Name("Jim", "Jones"), "6242 Ash Street") :: Nil
case class ContactWithDataPartitionColumn(
id: Int,
name: FullName,
address: String,
pets: Int,
friends: Array[FullName] = Array(),
relatives: Map[String, FullName] = Map(),
employer: Employer = null,
relations: Map[FullName, String] = Map(),
p: Int)
case class BriefContactWithDataPartitionColumn(id: Int, name: Name, address: String, p: Int)
val contactsWithDataPartitionColumn =
contacts.map {case Contact(id, name, address, pets, friends, relatives, employer, relations) =>
ContactWithDataPartitionColumn(id, name, address, pets, friends, relatives, employer,
relations, 1) }
val briefContactsWithDataPartitionColumn =
briefContacts.map { case BriefContact(id, name, address) =>
BriefContactWithDataPartitionColumn(id, name, address, 2) }
testSchemaPruning("select a single complex field") {
val query = sql("select name.middle from contacts")
checkScan(query, "struct<name:struct<middle:string>>")
checkAnswer(query.orderBy("id"), Row("X.") :: Row("Y.") :: Row(null) :: Row(null) :: Nil)
}
testSchemaPruning("select a single complex field and its parent struct") {
val query = sql("select name.middle, name from contacts")
checkScan(query, "struct<name:struct<first:string,middle:string,last:string>>")
checkAnswer(query.orderBy("id"),
Row("X.", Row("Jane", "X.", "Doe")) ::
Row("Y.", Row("John", "Y.", "Doe")) ::
Row(null, Row("Janet", null, "Jones")) ::
Row(null, Row("Jim", null, "Jones")) ::
Nil)
}
testSchemaPruning("select a single complex field array and its parent struct array") {
val query = sql("select friends.middle, friends from contacts where p=1")
checkScan(query,
"struct<friends:array<struct<first:string,middle:string,last:string>>>")
checkAnswer(query.orderBy("id"),
Row(Array("Z."), Array(Row("Susan", "Z.", "Smith"))) ::
Row(Array.empty[String], Array.empty[Row]) ::
Nil)
}
testSchemaPruning("select a single complex field from a map entry and its parent map entry") {
val query =
sql("select relatives[\\"brother\\"].middle, relatives[\\"brother\\"] from contacts where p=1")
checkScan(query,
"struct<relatives:map<string,struct<first:string,middle:string,last:string>>>")
checkAnswer(query.orderBy("id"),
Row("Y.", Row("John", "Y.", "Doe")) ::
Row(null, null) ::
Nil)
}
testSchemaPruning("select a single complex field and the partition column") {
val query = sql("select name.middle, p from contacts")
checkScan(query, "struct<name:struct<middle:string>>")
checkAnswer(query.orderBy("id"),
Row("X.", 1) :: Row("Y.", 1) :: Row(null, 2) :: Row(null, 2) :: Nil)
}
testSchemaPruning("partial schema intersection - select missing subfield") {
val query = sql("select name.middle, address from contacts where p=2")
checkScan(query, "struct<name:struct<middle:string>,address:string>")
checkAnswer(query.orderBy("id"),
Row(null, "567 Maple Drive") ::
Row(null, "6242 Ash Street") :: Nil)
}
testSchemaPruning("no unnecessary schema pruning") {
val query =
sql("select id, name.last, name.middle, name.first, relatives[''].last, " +
"relatives[''].middle, relatives[''].first, friends[0].last, friends[0].middle, " +
"friends[0].first, pets, address from contacts where p=2")
// We've selected every field in the schema. Therefore, no schema pruning should be performed.
// We check this by asserting that the scanned schema of the query is identical to the schema
// of the contacts relation, even though the fields are selected in different orders.
checkScan(query,
"struct<id:int,name:struct<first:string,middle:string,last:string>,address:string,pets:int," +
"friends:array<struct<first:string,middle:string,last:string>>," +
"relatives:map<string,struct<first:string,middle:string,last:string>>>")
checkAnswer(query.orderBy("id"),
Row(2, "Jones", null, "Janet", null, null, null, null, null, null, null, "567 Maple Drive") ::
Row(3, "Jones", null, "Jim", null, null, null, null, null, null, null, "6242 Ash Street") ::
Nil)
}
testSchemaPruning("empty schema intersection") {
val query = sql("select name.middle from contacts where p=2")
checkScan(query, "struct<name:struct<middle:string>>")
checkAnswer(query.orderBy("id"),
Row(null) :: Row(null) :: Nil)
}
testSchemaPruning("select a single complex field and in where clause") {
val query1 = sql("select name.first from contacts where name.first = 'Jane'")
checkScan(query1, "struct<name:struct<first:string>>")
checkAnswer(query1, Row("Jane") :: Nil)
val query2 = sql("select name.first, name.last from contacts where name.first = 'Jane'")
checkScan(query2, "struct<name:struct<first:string,last:string>>")
checkAnswer(query2, Row("Jane", "Doe") :: Nil)
val query3 = sql("select name.first from contacts " +
"where employer.company.name = 'abc' and p = 1")
checkScan(query3, "struct<name:struct<first:string>," +
"employer:struct<company:struct<name:string>>>")
checkAnswer(query3, Row("Jane") :: Nil)
val query4 = sql("select name.first, employer.company.name from contacts " +
"where employer.company is not null and p = 1")
checkScan(query4, "struct<name:struct<first:string>," +
"employer:struct<company:struct<name:string>>>")
checkAnswer(query4, Row("Jane", "abc") :: Nil)
}
testSchemaPruning("select nullable complex field and having is not null predicate") {
val query = sql("select employer.company from contacts " +
"where employer is not null and p = 1")
checkScan(query, "struct<employer:struct<company:struct<name:string,address:string>>>")
checkAnswer(query, Row(Row("abc", "123 Business Street")) :: Row(null) :: Nil)
}
testSchemaPruning("select a single complex field and is null expression in project") {
val query = sql("select name.first, address is not null from contacts")
checkScan(query, "struct<name:struct<first:string>,address:string>")
checkAnswer(query.orderBy("id"),
Row("Jane", true) :: Row("John", true) :: Row("Janet", true) :: Row("Jim", true) :: Nil)
}
testSchemaPruning("select a single complex field array and in clause") {
val query = sql("select friends.middle from contacts where friends.first[0] = 'Susan'")
checkScan(query,
"struct<friends:array<struct<first:string,middle:string>>>")
checkAnswer(query.orderBy("id"),
Row(Array("Z.")) :: Nil)
}
testSchemaPruning("select a single complex field from a map entry and in clause") {
val query =
sql("select relatives[\\"brother\\"].middle from contacts " +
"where relatives[\\"brother\\"].first = 'John'")
checkScan(query,
"struct<relatives:map<string,struct<first:string,middle:string>>>")
checkAnswer(query.orderBy("id"),
Row("Y.") :: Nil)
}
testSchemaPruning("select one complex field and having is null predicate on another " +
"complex field") {
val query = sql("select * from contacts")
.where("name.middle is not null")
.select(
"id",
"name.first",
"name.middle",
"name.last"
)
.where("last = 'Jones'")
.select(count("id")).toDF()
checkScan(query,
"struct<id:int,name:struct<middle:string,last:string>>")
checkAnswer(query, Row(0) :: Nil)
}
testSchemaPruning("select one deep nested complex field and having is null predicate on " +
"another deep nested complex field") {
val query = sql("select * from contacts")
.where("employer.company.address is not null")
.selectExpr(
"id",
"name.first",
"name.middle",
"name.last",
"employer.id as employer_id"
)
.where("employer_id = 0")
.select(count("id")).toDF()
checkScan(query,
"struct<id:int,employer:struct<id:int,company:struct<address:string>>>")
checkAnswer(query, Row(1) :: Nil)
}
testSchemaPruning("select nested field from a complex map key using map_keys") {
val query = sql("select map_keys(relations).middle[0], p from contacts")
checkScan(query, "struct<relations:map<struct<middle:string>,string>>")
checkAnswer(query, Row("Y.", 1) :: Row("X.", 1) :: Row(null, 2) :: Row(null, 2) :: Nil)
}
testSchemaPruning("select nested field from a complex map value using map_values") {
val query = sql("select map_values(relatives).middle[0], p from contacts")
checkScan(query, "struct<relatives:map<string,struct<middle:string>>>")
checkAnswer(query, Row("Y.", 1) :: Row("X.", 1) :: Row(null, 2) :: Row(null, 2) :: Nil)
}
protected def testSchemaPruning(testName: String)(testThunk: => Unit) {
test(s"Spark vectorized reader - without partition data column - $testName") {
withSQLConf(vectorizedReaderEnabledKey -> "true") {
withContacts(testThunk)
}
}
test(s"Spark vectorized reader - with partition data column - $testName") {
withSQLConf(vectorizedReaderEnabledKey -> "true") {
withContactsWithDataPartitionColumn(testThunk)
}
}
test(s"Non-vectorized reader - without partition data column - $testName") {
withSQLConf(vectorizedReaderEnabledKey -> "false") {
withContacts(testThunk)
}
}
test(s"Non-vectorized reader - with partition data column - $testName") {
withSQLConf(vectorizedReaderEnabledKey-> "false") {
withContactsWithDataPartitionColumn(testThunk)
}
}
}
private def withContacts(testThunk: => Unit) {
withTempPath { dir =>
val path = dir.getCanonicalPath
makeDataSourceFile(contacts, new File(path + "/contacts/p=1"))
makeDataSourceFile(briefContacts, new File(path + "/contacts/p=2"))
// Providing user specified schema. Inferred schema from different data sources might
// be different.
val schema = "`id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, " +
"`address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, " +
"`address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>,STRING>,`p` INT"
spark.read.format(dataSourceName).schema(schema).load(path + "/contacts")
.createOrReplaceTempView("contacts")
testThunk
}
}
private def withContactsWithDataPartitionColumn(testThunk: => Unit) {
withTempPath { dir =>
val path = dir.getCanonicalPath
makeDataSourceFile(contactsWithDataPartitionColumn, new File(path + "/contacts/p=1"))
makeDataSourceFile(briefContactsWithDataPartitionColumn, new File(path + "/contacts/p=2"))
// Providing user specified schema. Inferred schema from different data sources might
// be different.
val schema = "`id` INT,`name` STRUCT<`first`: STRING, `middle`: STRING, `last`: STRING>, " +
"`address` STRING,`pets` INT,`friends` ARRAY<STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>>,`relatives` MAP<STRING, STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>>,`employer` STRUCT<`id`: INT, `company`: STRUCT<`name`: STRING, " +
"`address`: STRING>>,`relations` MAP<STRUCT<`first`: STRING, `middle`: STRING, " +
"`last`: STRING>,STRING>,`p` INT"
spark.read.format(dataSourceName).schema(schema).load(path + "/contacts")
.createOrReplaceTempView("contacts")
testThunk
}
}
case class MixedCaseColumn(a: String, B: Int)
case class MixedCase(id: Int, CoL1: String, coL2: MixedCaseColumn)
private val mixedCaseData =
MixedCase(0, "r0c1", MixedCaseColumn("abc", 1)) ::
MixedCase(1, "r1c1", MixedCaseColumn("123", 2)) ::
Nil
testExactCaseQueryPruning("select with exact column names") {
val query = sql("select CoL1, coL2.B from mixedcase")
checkScan(query, "struct<CoL1:string,coL2:struct<B:int>>")
checkAnswer(query.orderBy("id"),
Row("r0c1", 1) ::
Row("r1c1", 2) ::
Nil)
}
testMixedCaseQueryPruning("select with lowercase column names") {
val query = sql("select col1, col2.b from mixedcase")
checkScan(query, "struct<CoL1:string,coL2:struct<B:int>>")
checkAnswer(query.orderBy("id"),
Row("r0c1", 1) ::
Row("r1c1", 2) ::
Nil)
}
testMixedCaseQueryPruning("select with different-case column names") {
val query = sql("select cOL1, cOl2.b from mixedcase")
checkScan(query, "struct<CoL1:string,coL2:struct<B:int>>")
checkAnswer(query.orderBy("id"),
Row("r0c1", 1) ::
Row("r1c1", 2) ::
Nil)
}
testMixedCaseQueryPruning("filter with different-case column names") {
val query = sql("select id from mixedcase where Col2.b = 2")
checkScan(query, "struct<id:int,coL2:struct<B:int>>")
checkAnswer(query.orderBy("id"), Row(1) :: Nil)
}
// Tests schema pruning for a query whose column and field names are exactly the same as the table
// schema's column and field names. N.B. this implies that `testThunk` should pass using either a
// case-sensitive or case-insensitive query parser
private def testExactCaseQueryPruning(testName: String)(testThunk: => Unit) {
test(s"Case-sensitive parser - mixed-case schema - $testName") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
withMixedCaseData(testThunk)
}
}
testMixedCaseQueryPruning(testName)(testThunk)
}
// Tests schema pruning for a query whose column and field names may differ in case from the table
// schema's column and field names
private def testMixedCaseQueryPruning(testName: String)(testThunk: => Unit) {
test(s"Case-insensitive parser - mixed-case schema - $testName") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withMixedCaseData(testThunk)
}
}
}
// Tests given test function with Spark vectorized reader and non-vectorized reader.
private def withMixedCaseData(testThunk: => Unit) {
withDataSourceTable(mixedCaseData, "mixedcase") {
testThunk
}
}
protected val schemaEquality = new Equality[StructType] {
override def areEqual(a: StructType, b: Any): Boolean =
b match {
case otherType: StructType => a.sameType(otherType)
case _ => false
}
}
protected def checkScan(df: DataFrame, expectedSchemaCatalogStrings: String*): Unit = {
checkScanSchemata(df, expectedSchemaCatalogStrings: _*)
// We check here that we can execute the query without throwing an exception. The results
// themselves are irrelevant, and should be checked elsewhere as needed
df.collect()
}
protected def checkScanSchemata(df: DataFrame, expectedSchemaCatalogStrings: String*): Unit = {
val fileSourceScanSchemata =
df.queryExecution.executedPlan.collect {
case scan: FileSourceScanExec => scan.requiredSchema
}
assert(fileSourceScanSchemata.size === expectedSchemaCatalogStrings.size,
s"Found ${fileSourceScanSchemata.size} file sources in dataframe, " +
s"but expected $expectedSchemaCatalogStrings")
fileSourceScanSchemata.zip(expectedSchemaCatalogStrings).foreach {
case (scanSchema, expectedScanSchemaCatalogString) =>
val expectedScanSchema = CatalystSqlParser.parseDataType(expectedScanSchemaCatalogString)
implicit val equality = schemaEquality
assert(scanSchema === expectedScanSchema)
}
}
}
|
pgandhi999/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
|
Scala
|
apache-2.0
| 18,538 |
package pgep.GeneticOperators.Reproducers
class TwoPointCrossover(selection: GenotypeSelector) extends Reproducer(2, 1, selection) {
def this() = this(null)
protected[Reproducers] override def apply(selected: List[Genotype]) = {
val (src1Idx, src2Idx) = if (random.nextInt(2) == 0) (0, 1) else (1, 0)
val src1 = selected(src1Idx)
val src2 = selected(src2Idx)
val geneLen = src1.gp.geneLen
val nrGenes = src1.gp.nrGenes
val sp1 = random.nextInt(geneLen * nrGenes - 2) + 1 // don't split at the very beginning or end
val sp2 = random.nextInt(geneLen * nrGenes - 2) + 1
val splitPos1 = sp1 min sp2
val splitPos2 = sp1 max sp2
val symbolPos1 = splitPos1 % geneLen
val genePos1 = splitPos1 / geneLen
val symbolPos2 = splitPos2 % geneLen
val genePos2 = splitPos2 / geneLen
val child = Genotype(src1.gp)
Genotype.copyLinearStructure(src1, child, 0, 0, genePos1 + 1, symbolPos1 + 1)
Genotype.copyLinearStructure(src2, child, genePos1, symbolPos1, genePos2 + 1, symbolPos2 + 1)
Genotype.copyLinearStructure(src1, child, genePos2, symbolPos2, nrGenes, geneLen)
List(child)
}
}
|
khernyo/PGEP
|
src/pgep/GeneticOperators/Reproducers/TwoPointCrossover.scala
|
Scala
|
gpl-3.0
| 1,169 |
/*
Copyright 2016-17, Hasso-Plattner-Institut fuer Softwaresystemtechnik GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package de.hpi.ingestion.deduplication.models
/**
* A Data Tuple to hold the values for given argument in precision, recall and fscore
* @param threshold
* @param precision
* @param recall
* @param fscore
*/
case class PrecisionRecallDataTuple(
threshold: Double,
precision: Double,
recall: Double,
fscore: Double
)
|
bpn1/ingestion
|
src/main/scala/de/hpi/ingestion/deduplication/models/PrecisionRecallDataTuple.scala
|
Scala
|
apache-2.0
| 948 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import scala.collection.mutable
import scala.collection.mutable.{HashMap, LinkedHashMap}
import com.google.common.collect.Interners
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor._
import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo}
import org.apache.spark.util.AccumulatorContext
import org.apache.spark.util.collection.OpenHashSet
private[spark] object UIData {
class ExecutorSummary {
var taskTime : Long = 0
var failedTasks : Int = 0
var succeededTasks : Int = 0
var reasonToNumKilled : Map[String, Int] = Map.empty
var inputBytes : Long = 0
var inputRecords : Long = 0
var outputBytes : Long = 0
var outputRecords : Long = 0
var shuffleRead : Long = 0
var shuffleReadRecords : Long = 0
var shuffleWrite : Long = 0
var shuffleWriteRecords : Long = 0
var memoryBytesSpilled : Long = 0
var diskBytesSpilled : Long = 0
var isBlacklisted : Int = 0
}
class JobUIData(
var jobId: Int = -1,
var submissionTime: Option[Long] = None,
var completionTime: Option[Long] = None,
var stageIds: Seq[Int] = Seq.empty,
var jobGroup: Option[String] = None,
var status: JobExecutionStatus = JobExecutionStatus.UNKNOWN,
/* Tasks */
// `numTasks` is a potential underestimate of the true number of tasks that this job will run.
// This may be an underestimate because the job start event references all of the result
// stages' transitive stage dependencies, but some of these stages might be skipped if their
// output is available from earlier runs.
// See https://github.com/apache/spark/pull/3009 for a more extensive discussion.
var numTasks: Int = 0,
var numActiveTasks: Int = 0,
var numCompletedTasks: Int = 0,
var completedIndices: OpenHashSet[(Int, Int)] = new OpenHashSet[(Int, Int)](),
var numSkippedTasks: Int = 0,
var numFailedTasks: Int = 0,
var reasonToNumKilled: Map[String, Int] = Map.empty,
/* Stages */
var numActiveStages: Int = 0,
// This needs to be a set instead of a simple count to prevent double-counting of rerun stages:
var completedStageIndices: mutable.HashSet[Int] = new mutable.HashSet[Int](),
var numSkippedStages: Int = 0,
var numFailedStages: Int = 0
)
class StageUIData {
var numActiveTasks: Int = _
var numCompleteTasks: Int = _
var completedIndices = new OpenHashSet[Int]()
var numFailedTasks: Int = _
var reasonToNumKilled: Map[String, Int] = Map.empty
var executorRunTime: Long = _
var executorCpuTime: Long = _
var inputBytes: Long = _
var inputRecords: Long = _
var outputBytes: Long = _
var outputRecords: Long = _
var shuffleReadTotalBytes: Long = _
var shuffleReadRecords : Long = _
var shuffleWriteBytes: Long = _
var shuffleWriteRecords: Long = _
var memoryBytesSpilled: Long = _
var diskBytesSpilled: Long = _
var isBlacklisted: Int = _
var schedulingPool: String = ""
var description: Option[String] = None
var accumulables = new HashMap[Long, AccumulableInfo]
var taskData = new LinkedHashMap[Long, TaskUIData]
var executorSummary = new HashMap[String, ExecutorSummary]
def hasInput: Boolean = inputBytes > 0
def hasOutput: Boolean = outputBytes > 0
def hasShuffleRead: Boolean = shuffleReadTotalBytes > 0
def hasShuffleWrite: Boolean = shuffleWriteBytes > 0
def hasBytesSpilled: Boolean = memoryBytesSpilled > 0 && diskBytesSpilled > 0
}
/**
* These are kept mutable and reused throughout a task's lifetime to avoid excessive reallocation.
*/
class TaskUIData private(private var _taskInfo: TaskInfo) {
private[this] var _metrics: Option[TaskMetricsUIData] = Some(TaskMetricsUIData.EMPTY)
var errorMessage: Option[String] = None
def taskInfo: TaskInfo = _taskInfo
def metrics: Option[TaskMetricsUIData] = _metrics
def updateTaskInfo(taskInfo: TaskInfo): Unit = {
_taskInfo = TaskUIData.dropInternalAndSQLAccumulables(taskInfo)
}
def updateTaskMetrics(metrics: Option[TaskMetrics]): Unit = {
_metrics = metrics.map(TaskMetricsUIData.fromTaskMetrics)
}
def taskDuration: Option[Long] = {
if (taskInfo.status == "RUNNING") {
Some(_taskInfo.timeRunning(System.currentTimeMillis))
} else {
_metrics.map(_.executorRunTime)
}
}
}
object TaskUIData {
private val stringInterner = Interners.newWeakInterner[String]()
/** String interning to reduce the memory usage. */
private def weakIntern(s: String): String = {
stringInterner.intern(s)
}
def apply(taskInfo: TaskInfo): TaskUIData = {
new TaskUIData(dropInternalAndSQLAccumulables(taskInfo))
}
/**
* We don't need to store internal or SQL accumulables as their values will be shown in other
* places, so drop them to reduce the memory usage.
*/
private[spark] def dropInternalAndSQLAccumulables(taskInfo: TaskInfo): TaskInfo = {
val newTaskInfo = new TaskInfo(
taskId = taskInfo.taskId,
index = taskInfo.index,
attemptNumber = taskInfo.attemptNumber,
launchTime = taskInfo.launchTime,
executorId = weakIntern(taskInfo.executorId),
host = weakIntern(taskInfo.host),
taskLocality = taskInfo.taskLocality,
speculative = taskInfo.speculative
)
newTaskInfo.gettingResultTime = taskInfo.gettingResultTime
newTaskInfo.setAccumulables(taskInfo.accumulables.filter {
accum => !accum.internal && accum.metadata != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER)
})
newTaskInfo.finishTime = taskInfo.finishTime
newTaskInfo.failed = taskInfo.failed
newTaskInfo.killed = taskInfo.killed
newTaskInfo
}
}
case class TaskMetricsUIData(
executorDeserializeTime: Long,
executorDeserializeCpuTime: Long,
executorRunTime: Long,
executorCpuTime: Long,
resultSize: Long,
jvmGCTime: Long,
resultSerializationTime: Long,
memoryBytesSpilled: Long,
diskBytesSpilled: Long,
peakExecutionMemory: Long,
inputMetrics: InputMetricsUIData,
outputMetrics: OutputMetricsUIData,
shuffleReadMetrics: ShuffleReadMetricsUIData,
shuffleWriteMetrics: ShuffleWriteMetricsUIData)
object TaskMetricsUIData {
def fromTaskMetrics(m: TaskMetrics): TaskMetricsUIData = {
TaskMetricsUIData(
executorDeserializeTime = m.executorDeserializeTime,
executorDeserializeCpuTime = m.executorDeserializeCpuTime,
executorRunTime = m.executorRunTime,
executorCpuTime = m.executorCpuTime,
resultSize = m.resultSize,
jvmGCTime = m.jvmGCTime,
resultSerializationTime = m.resultSerializationTime,
memoryBytesSpilled = m.memoryBytesSpilled,
diskBytesSpilled = m.diskBytesSpilled,
peakExecutionMemory = m.peakExecutionMemory,
inputMetrics = InputMetricsUIData(m.inputMetrics),
outputMetrics = OutputMetricsUIData(m.outputMetrics),
shuffleReadMetrics = ShuffleReadMetricsUIData(m.shuffleReadMetrics),
shuffleWriteMetrics = ShuffleWriteMetricsUIData(m.shuffleWriteMetrics))
}
val EMPTY: TaskMetricsUIData = fromTaskMetrics(TaskMetrics.empty)
}
case class InputMetricsUIData(bytesRead: Long, recordsRead: Long)
object InputMetricsUIData {
def apply(metrics: InputMetrics): InputMetricsUIData = {
if (metrics.bytesRead == 0 && metrics.recordsRead == 0) {
EMPTY
} else {
new InputMetricsUIData(
bytesRead = metrics.bytesRead,
recordsRead = metrics.recordsRead)
}
}
private val EMPTY = InputMetricsUIData(0, 0)
}
case class OutputMetricsUIData(bytesWritten: Long, recordsWritten: Long)
object OutputMetricsUIData {
def apply(metrics: OutputMetrics): OutputMetricsUIData = {
if (metrics.bytesWritten == 0 && metrics.recordsWritten == 0) {
EMPTY
} else {
new OutputMetricsUIData(
bytesWritten = metrics.bytesWritten,
recordsWritten = metrics.recordsWritten)
}
}
private val EMPTY = OutputMetricsUIData(0, 0)
}
case class ShuffleReadMetricsUIData(
remoteBlocksFetched: Long,
localBlocksFetched: Long,
remoteBytesRead: Long,
localBytesRead: Long,
fetchWaitTime: Long,
recordsRead: Long,
totalBytesRead: Long,
totalBlocksFetched: Long)
object ShuffleReadMetricsUIData {
def apply(metrics: ShuffleReadMetrics): ShuffleReadMetricsUIData = {
if (
metrics.remoteBlocksFetched == 0 &&
metrics.localBlocksFetched == 0 &&
metrics.remoteBytesRead == 0 &&
metrics.localBytesRead == 0 &&
metrics.fetchWaitTime == 0 &&
metrics.recordsRead == 0 &&
metrics.totalBytesRead == 0 &&
metrics.totalBlocksFetched == 0) {
EMPTY
} else {
new ShuffleReadMetricsUIData(
remoteBlocksFetched = metrics.remoteBlocksFetched,
localBlocksFetched = metrics.localBlocksFetched,
remoteBytesRead = metrics.remoteBytesRead,
localBytesRead = metrics.localBytesRead,
fetchWaitTime = metrics.fetchWaitTime,
recordsRead = metrics.recordsRead,
totalBytesRead = metrics.totalBytesRead,
totalBlocksFetched = metrics.totalBlocksFetched
)
}
}
private val EMPTY = ShuffleReadMetricsUIData(0, 0, 0, 0, 0, 0, 0, 0)
}
case class ShuffleWriteMetricsUIData(
bytesWritten: Long,
recordsWritten: Long,
writeTime: Long)
object ShuffleWriteMetricsUIData {
def apply(metrics: ShuffleWriteMetrics): ShuffleWriteMetricsUIData = {
if (metrics.bytesWritten == 0 && metrics.recordsWritten == 0 && metrics.writeTime == 0) {
EMPTY
} else {
new ShuffleWriteMetricsUIData(
bytesWritten = metrics.bytesWritten,
recordsWritten = metrics.recordsWritten,
writeTime = metrics.writeTime
)
}
}
private val EMPTY = ShuffleWriteMetricsUIData(0, 0, 0)
}
}
|
kissmetrics/spark
|
core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala
|
Scala
|
apache-2.0
| 11,007 |
/*^
===========================================================================
Helios - Core
===========================================================================
Copyright (C) 2013-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.helios.apps
import java.util.regex.Pattern
object Version {
private val VersionPattern = Pattern.compile(raw"(\\d+)\\.(\\d+)(?:\\.(\\d+)(?:\\.(\\d+))?)?(-SNAPSHOT)?")
/**
* Parses a version string
*
* @param versionString The version string. Its format must be:
* <b>major.minor(.build(.release))(-snapshot)</b>
* @return The parsed version
*/
def parse(versionString: String): Version = {
val matcher = VersionPattern.matcher(versionString)
require(matcher.matches(), s"Illegal version string: '${versionString}'")
val major = matcher.group(1).toInt
val minor = matcher.group(2).toInt
val build = Option(matcher.group(3)).getOrElse("0").toInt
val release = Option(matcher.group(4)).getOrElse("0").toInt
val snapshot = matcher.group(5) != null
Version(
major,
minor,
build,
release,
snapshot
)
}
}
/**
* A version class, following MoonDeploy's convention (plus an optional -SNAPSHOT suffix for
* pre-release versions)
*/
case class Version(
major: Int,
minor: Int,
build: Int,
release: Int,
snapshot: Boolean
) {
override val toString: String = {
val buildString =
if (build > 0)
s".${build}"
else
""
val releaseString =
if (release > 0)
s".${release}"
else
""
val snapshotString =
if (snapshot)
"-SNAPSHOT"
else
""
s"${major}.${minor}${buildString}${releaseString}${snapshotString}"
}
}
|
giancosta86/Helios-core
|
src/main/scala/info/gianlucacosta/helios/apps/Version.scala
|
Scala
|
apache-2.0
| 2,589 |
package org.jetbrains.plugins.scala
package codeInspection.collections
import com.intellij.openapi.util.TextRange
import com.intellij.psi.{PsiElement, SmartPointerManager, SmartPsiElementPointer}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import scala.language.implicitConversions
/**
* Nikolay.Tropin
* 5/21/13
*/
case class Simplification(exprToReplace: SmartPsiElementPointer[ScExpression], replacementText: String, hint: String, rangeInParent: TextRange)
class SimplificationBuilder private[collections] (val exprToReplace: ScExpression) {
private var rangeInParent: TextRange = {
val exprToHighlightFrom: ScExpression = exprToReplace match {
case MethodRepr(_, Some(base), _, _) => base
case _ => exprToReplace
}
rightRangeInParent(exprToHighlightFrom, exprToReplace)
}
private var replacementText: String = ""
private var hint: String = ""
def highlightFrom(expr: ScExpression): SimplificationBuilder = {
this.rangeInParent = rightRangeInParent(expr, exprToReplace)
this
}
def highlightAll: SimplificationBuilder = highlightElem(exprToReplace)
def highlightRef: SimplificationBuilder = highlightElem(refNameId(exprToReplace).getOrElse(exprToReplace))
def highlightElem(elem: PsiElement): SimplificationBuilder = {
this.rangeInParent = elem.getTextRange.shiftRight( - exprToReplace.getTextOffset)
this
}
def highlightRange(start: Int, end: Int): SimplificationBuilder = {
this.rangeInParent = new TextRange(start, end).shiftRight( - exprToReplace.getTextOffset)
this
}
def withText(s: String): SimplificationBuilder = {
this.replacementText = s
this
}
def withHint(s: String): SimplificationBuilder = {
this.hint = s
this
}
def toSimplification: Simplification = {
val smartPointer = SmartPointerManager.getInstance(exprToReplace.getProject).createSmartPsiElementPointer(exprToReplace)
Simplification(smartPointer, replacementText, hint, rangeInParent)
}
}
object SimplificationBuilder {
implicit def toSimplification(s: SimplificationBuilder): Simplification = s.toSimplification
}
abstract class SimplificationType {
def hint: String
def description: String = hint
def getSimplification(expr: ScExpression): Option[Simplification] = None
def getSimplifications(expr: ScExpression): Seq[Simplification] = Seq.empty
def replace(expr: ScExpression): SimplificationBuilder = {
new SimplificationBuilder(expr).withHint(hint)
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/collections/Simplifications.scala
|
Scala
|
apache-2.0
| 2,490 |
/*
* Copyright 2013-2016 Tsukasa Kitachi
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configs.testutil.instance
import java.{lang => jl}
import scalaprops.Gen
import scalaprops.ScalapropsScalaz._
import scalaz.{Order, Ordering, std}
import scalaz.syntax.functor._
object anyVal {
implicit lazy val unitOrder: Order[Unit] = std.anyVal.unitInstance
implicit lazy val byteOrder: Order[Byte] = std.anyVal.byteInstance
implicit lazy val shortOrder: Order[Short] = std.anyVal.shortInstance
implicit lazy val intOrder: Order[Int] = std.anyVal.intInstance
implicit lazy val longOrder: Order[Long] = std.anyVal.longInstance
implicit lazy val doubleOrder: Order[Double] = Order.order { (a, b) =>
jl.Double.compare(a, b) match {
case -1 if a != b => Ordering.LT
case 1 if a != b => Ordering.GT
case _ => Ordering.EQ
}
}
implicit lazy val floatOrder: Order[Float] = Order.order { (a, b) =>
jl.Float.compare(a, b) match {
case -1 if a != b => Ordering.LT
case 1 if a != b => Ordering.GT
case _ => Ordering.EQ
}
}
implicit lazy val charOrder: Order[Char] = std.anyVal.char
implicit lazy val booleanOrder: Order[Boolean] = std.anyVal.booleanInstance
implicit lazy val javaByteOrder: Order[jl.Byte] = Order.orderBy(_.byteValue())
implicit lazy val javaShortOrder: Order[jl.Short] = Order.orderBy(_.shortValue())
implicit lazy val javaIntegerOrder: Order[jl.Integer] = Order.orderBy(_.intValue())
implicit lazy val javaLongOrder: Order[jl.Long] = Order.orderBy(_.longValue())
implicit lazy val javaDoubleOrder: Order[jl.Double] = Order.orderBy(_.doubleValue())
implicit lazy val javaFloatOrder: Order[jl.Float] = Order.orderBy(_.floatValue())
implicit lazy val javaCharacterOrder: Order[jl.Character] = Order.orderBy(_.charValue())
implicit lazy val javaBooleanOrder: Order[jl.Boolean] = Order.orderBy(_.booleanValue())
implicit lazy val javaNumberGen: Gen[jl.Number] =
Gen.oneOf(
Gen[jl.Byte].widen[jl.Number],
Gen[jl.Short].widen[jl.Number],
Gen[jl.Integer].widen[jl.Number],
Gen[jl.Long].widen[jl.Number],
Gen[jl.Float].widen[jl.Number],
Gen[jl.Double].widen[jl.Number]
)
lazy val infiniteDoubleGen: Gen[Double] =
Gen.elements(Double.PositiveInfinity, Double.NegativeInfinity)
implicit lazy val charGen: Gen[Char] = {
import jl.{Character => C}
Gen.choose(C.MIN_VALUE, C.MAX_VALUE).map(C.toChars(_)(0))
}
implicit lazy val javaCharacterGen: Gen[jl.Character] =
charGen.map(Char.box)
}
|
kxbmap/configs
|
core/src/test/scala/configs/testutil/instance/anyVal.scala
|
Scala
|
apache-2.0
| 3,068 |
/*
* Part of GDL book_api.
* Copyright (C) 2017 Global Digital Library
*
* See LICENSE
*/
package io.digitallibrary.bookapi.service
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import io.digitallibrary.bookapi.BookApiProperties
import io.digitallibrary.bookapi.controller.NewFeaturedContent
import io.digitallibrary.bookapi.model._
import io.digitallibrary.bookapi.model.api.internal.{ChapterId, NewChapter, NewTranslation, TranslationId}
import io.digitallibrary.bookapi.model.api.{CrowdinException, FeaturedContentId, NotFoundException, TranslateRequest, ValidationMessage}
import io.digitallibrary.bookapi.model.domain._
import io.digitallibrary.bookapi.repository._
import io.digitallibrary.bookapi.service.search.IndexService
import io.digitallibrary.language.model.LanguageTag
import io.digitallibrary.license.model.License
import io.digitallibrary.network.AuthUser
import scala.util.{Failure, Success, Try}
trait WriteService {
this: TransactionHandler
with ConverterService
with ValidationService
with ReadService
with IndexService
with BookRepository
with CategoryRepository
with ChapterRepository
with ContributorRepository
with TranslationRepository
with EducationalAlignmentRepository
with PersonRepository
with PublisherRepository
with FeaturedContentRepository
with InTranslationRepository
=>
val writeService: WriteService
class WriteService extends LazyLogging {
def updateTranslation(translationToUpdate: Translation): Try[Translation] = {
for {
updatedTranslation <- Try(getTranslationRepository.updateTranslation(translationToUpdate))
indexedTranslation <- indexService.updateOrRemoveDocument(updatedTranslation)
} yield indexedTranslation
}
def addPerson(personName: String): Person = {
personRepository.withName(personName) match {
case Some(person) => person
case None => personRepository.add(Person(id = None, revision = None, name = personName, gdlId = None))
}
}
def addTranslatorToTranslation(translationId: Long, person: Person): Contributor = {
contributorRepository.add(Contributor(None, None, person.id.get, translationId, ContributorType.Translator, person))
}
def removeContributor(contributor: Contributor): Unit = {
contributorRepository.remove(contributor)
}
def newFeaturedContent(newFeaturedContent: NewFeaturedContent): Try[FeaturedContentId] = {
newFeaturedContent.category match {
case Some(categoryName) =>
categoryRepository.withName(categoryName) match {
case Some(category) => newFeaturedContentWithCategory(newFeaturedContent, Some(category))
case None => Failure(new api.ValidationException(errors = Seq(ValidationMessage("category", s"No category with name '$categoryName' found"))))
}
case None => newFeaturedContentWithCategory(newFeaturedContent, None)
}
}
def newFeaturedContentWithCategory(newFeaturedContent: NewFeaturedContent, existingCategory: Option[Category]): Try[FeaturedContentId] = {
for {
valid <- validationService.validateFeaturedContent(converterService.toFeaturedContent(newFeaturedContent, existingCategory))
persisted <- Try(featuredContentRepository.addContent(valid))
} yield FeaturedContentId(persisted.id.get)
}
def updateFeaturedContent(content: api.FeaturedContent): Try[FeaturedContentId] = {
content.category match {
case Some(category) if categoryRepository.withId(category.id).isEmpty =>
Failure(new api.ValidationException(errors = Seq(ValidationMessage("category", s"No category with id=${category.id} found"))))
case _ =>
for {
valid <- validationService.validateUpdatedFeaturedContent(content)
persistedId <- featuredContentRepository.updateContent(valid)
} yield persistedId
}
}
def deleteFeaturedContent(id: Long): Try[Unit] = {
featuredContentRepository.deleteContent(id)
}
def updateChapter(chapter: domain.Chapter) = chapterRepository.updateChapter(chapter)
def updateChapter(chapterid: Long, replacementChapter: NewChapter): Option[api.internal.ChapterId] = {
chapterRepository.withId(chapterid).map(existing => {
val updated = chapterRepository.updateChapter(existing.copy(
title = replacementChapter.title,
content = replacementChapter.content,
chapterType = ChapterType.valueOfOrDefault(replacementChapter.chapterType)))
api.internal.ChapterId(updated.id.get)
})
}
def newChapter(translationId: Long, newChapter: api.internal.NewChapter): Try[api.internal.ChapterId] = {
for {
valid <- validationService.validateChapter(converterService.toDomainChapter(newChapter, translationId))
persisted <- Try(chapterRepository.add(valid))
} yield api.internal.ChapterId(persisted.id.get)
}
def updateBook(bookId: Long, bookReplacement: api.internal.NewBook): Try[api.internal.BookId] = {
bookRepository.withId(bookId) match {
case None => Failure(new NotFoundException(s"Book with id $bookId was not found"))
case Some(existingBook) => {
val optPublisher = publisherRepository.withName(bookReplacement.publisher) match {
case Some(x) => Some(x)
case None => Some(Publisher(None, None, bookReplacement.publisher))
}
for {
validLicense <- Try(License(bookReplacement.license))
validPublisher <- validationService.validatePublisher(optPublisher)
persistedBook <- inTransaction { implicit session =>
val persistedPublisher = validPublisher.id match {
case None => Try(publisherRepository.add(validPublisher))
case Some(_) => Success(validPublisher)
}
persistedPublisher.flatMap(p => {
bookRepository.updateBook(existingBook.copy(
publisherId = p.id.get,
license = validLicense,
publisher = p,
source = bookReplacement.source))
})
}
} yield api.internal.BookId(persistedBook.id.get)
}
}
}
def newBook(newBook: api.internal.NewBook): Try[api.internal.BookId] = {
val optPublisher = publisherRepository.withName(newBook.publisher) match {
case Some(x) => Some(x)
case None => Some(Publisher(None, None, newBook.publisher))
}
for {
validLicense <- Try(License(newBook.license))
validPublisher <- validationService.validatePublisher(optPublisher)
persistedBook <- inTransaction { implicit session =>
val persistedPublisher = validPublisher.id match {
case None => Try(publisherRepository.add(validPublisher))
case Some(_) => Success(validPublisher)
}
persistedPublisher.flatMap(p => {
val toAdd = Book(
id = None,
revision = None,
publisherId = p.id.get,
publisher = p,
license = validLicense,
source = newBook.source)
Try(bookRepository.add(toAdd))
})
}
} yield api.internal.BookId(persistedBook.id.get)
}
def newTranslationForBook(originalBookId: Long, language: LanguageTag, translateRequest: domain.TranslateRequest, translationStatus: TranslationStatus.Value = TranslationStatus.IN_PROGRESS): Try[domain.Translation] = {
unFlaggedTranslationsRepository.forBookIdAndLanguage(originalBookId, language) match {
case None => Failure(new NotFoundException())
case Some(translation) => {
val newTranslation = translation.copy(
id = None,
revision = None,
externalId = None,
uuid = UUID.randomUUID().toString,
language = LanguageTag(translateRequest.toLanguage),
translatedFrom = Some(LanguageTag(translateRequest.fromLanguage)),
publishingStatus = PublishingStatus.UNLISTED,
translationStatus = Some(translationStatus))
Try {
inTransaction { implicit session =>
val persistedTranslation = unFlaggedTranslationsRepository.add(newTranslation)
val persistedContributors = translation.contributors.filter(_.`type` != ContributorType.Translator)
.map(ctb => contributorRepository.add(ctb.copy(id = None, revision = None, translationId = persistedTranslation.id.get)))
val persistedChapters = translation.chapters.map(chapterToCopy => {
val newChapter = chapterToCopy.copy(
id = None,
revision = None,
translationId = persistedTranslation.id.get)
chapterRepository.add(newChapter)
})
persistedTranslation.copy(chapters = persistedChapters, contributors = persistedContributors)
}
}
}
}
}
def deleteTranslation(translation: domain.Translation): Unit = inTransaction { implicit session =>
translation.chapters.foreach(chapterRepository.deleteChapter)
translation.contributors.foreach(contributorRepository.remove)
unFlaggedTranslationsRepository.deleteTranslation(translation)
}
def newTranslationForBook(bookId: Long, newTranslation: api.internal.NewTranslation): Try[api.internal.TranslationId] = {
validationService.validateNewTranslation(newTranslation).map(validNewTranslation => {
val domainTranslation = converterService.toDomainTranslation(validNewTranslation, bookId)
val categories = validNewTranslation.categories.map(cat => {
categoryRepository.withName(cat.name) match {
case Some(category) => category
case None => Category(None, None, cat.name)
}
})
val contributerToPerson = validNewTranslation.contributors.map(ctb => {
personRepository.withName(ctb.person.name) match {
case Some(person) => (ctb, person)
case None => (ctb, Person(None, None, ctb.person.name, None))
}
})
val translation = inTransaction { implicit session =>
val persistedCategories = categories.map {
case x if x.id.isEmpty => categoryRepository.add(x)
case y => y
}
val optPersistedEA = domainTranslation.educationalAlignment.flatMap(ea => {
educationalAlignmentRepository.add(ea).id
})
val translation = unFlaggedTranslationsRepository.add(
domainTranslation.copy(
categoryIds = persistedCategories.map(_.id.get),
eaId = optPersistedEA)
)
val persistedContributorsToPersons = contributerToPerson.map {
case (ctb, persisted) if persisted.id.isDefined => (ctb, persisted)
case (ctb, unpersisted) => (ctb, personRepository.add(unpersisted))
}
val persistedContributors = persistedContributorsToPersons.map { case (ctb, person) => {
contributorRepository.add(
Contributor(
None,
None,
person.id.get,
translation.id.get,
ContributorType.valueOf(ctb.`type`).get,
person))
}
}
indexService.indexDocument(translation)
translation
}
val persistedChapters: Seq[Try[ChapterId]] = validNewTranslation.chapters.map(chapter => {
newChapter(translation.id.get, chapter)
})
api.internal.TranslationId(translation.id.get)
})
}
def updateTranslationForBook(bookId: Long, translationId: Long, translationReplacement: NewTranslation): Option[Try[api.internal.TranslationId]] = {
unFlaggedTranslationsRepository.withId(translationId).map(existing => {
validationService.validateNewTranslation(translationReplacement).map(validTranslationReplacement => {
val replacement = converterService.toDomainTranslation(validTranslationReplacement, bookId)
val categories = validTranslationReplacement.categories.map(cat => {
categoryRepository.withName(cat.name) match {
case Some(category) => category
case None => Category(None, None, cat.name)
}
})
val contributerToPerson = validTranslationReplacement.contributors.map(ctb => {
personRepository.withName(ctb.person.name) match {
case Some(person) => (ctb, person)
case None => (ctb, Person(None, None, ctb.person.name, None))
}
})
inTransaction { implicit session =>
val persistedCategories = categories.map {
case x if x.id.isEmpty => categoryRepository.add(x)
case y => y
}
val optPersistedEA = (existing.educationalAlignment, replacement.educationalAlignment) match {
case (Some(existingEa), Some(replacementEa)) => educationalAlignmentRepository.updateEducationalAlignment(existingEa.copy(
alignmentType = replacementEa.alignmentType,
educationalFramework = replacementEa.educationalFramework,
targetDescription = replacementEa.targetDescription,
targetName = replacementEa.targetName,
targetUrl = replacementEa.targetUrl)).id
case (Some(existingEa), None) =>
educationalAlignmentRepository.remove(existingEa.id)
None
case (None, Some(ea)) => educationalAlignmentRepository.add(ea).id
case (None, None) => None
}
val translation = unFlaggedTranslationsRepository.updateTranslation(
existing.copy(
categoryIds = persistedCategories.map(_.id.get),
eaId = optPersistedEA,
title = replacement.title,
about = replacement.about,
numPages = replacement.numPages,
language = replacement.language,
datePublished = replacement.datePublished,
dateCreated = replacement.dateCreated,
coverphoto = replacement.coverphoto,
tags = replacement.tags,
isBasedOnUrl = replacement.isBasedOnUrl,
educationalUse = replacement.educationalUse,
educationalRole = replacement.educationalRole,
timeRequired = replacement.timeRequired,
typicalAgeRange = replacement.typicalAgeRange,
readingLevel = replacement.readingLevel,
interactivityType = replacement.interactivityType,
learningResourceType = replacement.learningResourceType,
accessibilityApi = replacement.accessibilityApi,
accessibilityControl = replacement.accessibilityControl,
accessibilityFeature = replacement.accessibilityFeature,
accessibilityHazard = replacement.accessibilityHazard,
bookFormat = replacement.bookFormat,
pageOrientation = replacement.pageOrientation,
additionalInformation = replacement.additionalInformation
)
)
val persistedContributorsToPersons = contributerToPerson.map {
case (ctb, persisted) if persisted.id.isDefined => (ctb, persisted)
case (ctb, unpersisted) => (ctb, personRepository.add(unpersisted))
}
val persistedContributors = persistedContributorsToPersons.map { case (ctb, person) => {
contributorRepository.add(
Contributor(
None,
None,
person.id.get,
translation.id.get,
ContributorType.valueOf(ctb.`type`).get,
person))
}
}
existing.contributors.foreach(contributorRepository.remove)
validTranslationReplacement.chapters.map(chapter => {
chapterRepository.forTranslationWithSeqNo(translation.id.get, chapter.seqNo) match {
case Some(existingChapter) => updateChapter(existingChapter.id.get, chapter)
case None => newChapter(translation.id.get, chapter)
}
})
// Remove exceeding chapters if the update contains fewer chapters than the existing version
chapterRepository.deleteChaptersExceptGivenSeqNumbers(translation.id.get, validTranslationReplacement.chapters.map(_.seqNo))
indexService.indexDocument(translation)
api.internal.TranslationId(translation.id.get)
}
})
})
}
def addInTransportMark(book: api.Book): Try[Unit] = {
unFlaggedTranslationsRepository.forBookIdAndLanguage(book.id, LanguageTag(book.language.code)) match {
case None => Failure(new NotFoundException())
case Some(inTransport) if inTransport.inTransport => Failure(CrowdinException("Book is currently being transported to Translation system"))
case Some(translation) if !translation.inTransport => Try(unFlaggedTranslationsRepository.updateTranslation(translation.copy(inTransport = true)))
}
}
def removeInTransportMark(book: api.Book): Try[Unit] = {
unFlaggedTranslationsRepository
.forBookIdAndLanguage(book.id, LanguageTag(book.language.code))
.filter(_.inTransport)
.map(tr => Try(unFlaggedTranslationsRepository.updateTranslation(tr.copy(inTransport = false)))) match {
case None => Success()
case Some(Success(_)) => Success()
case Some(Failure(err)) => Failure(err)
}
}
private def getTranslationRepository: TranslationRepository = {
if(AuthUser.hasRole(BookApiProperties.RoleWithWriteAccess)) {
allTranslationsRepository
} else {
unFlaggedTranslationsRepository
}
}
}
}
|
GlobalDigitalLibraryio/book-api
|
src/main/scala/io/digitallibrary/bookapi/service/WriteService.scala
|
Scala
|
apache-2.0
| 18,247 |
package sml.instructions
import sml.Machine
/**
* Add the values in 2 registers and store
* result in specified register
*/
case class AddInstruction(label: String, opcode: String, result: Int, op1: Int, op2: Int) extends MathInstruction {
/**
* @see Instruction#execute(m: Machine)
*/
override def execute(m: Machine): Unit =
m.regs(result) = m.regs(op1) + m.regs(op2)
/**
* @see Instruction#toString()
*/
override def toString: String =
super.toString + s" $op1 + $op2 to $result \n"
}
object AddInstruction {
def apply(label: String, result: Int, op1: Int, op2: Int): MathInstruction =
new AddInstruction(label, "add", result, op1, op2)
}
|
BBK-PiJ-2015-67/sdp-portfolio
|
coursework/cw-one/src/main/scala/sml/instructions/AddInstruction.scala
|
Scala
|
unlicense
| 691 |
package org.skycastle.client.wrappers
import org.scalaprops.Bean
import com.jme3.math.Vector3f
/**
* Wraps a 3D vector, allowing it to be read from configuration files.
*/
class Vec3 extends Bean {
val x = p('x, 0f)
val y = p('y, 0f)
val z = p('z, 0f)
// setBeanName('Vec3)
def this(x: Float, y: Float, z: Float) {
this()
this.x := x
this.y := y
this.z := z
}
def this(vec3: Vector3f) {
this()
this.x := vec3.x
this.y := vec3.y
this.z := vec3.z
}
def toVector3f: Vector3f = new Vector3f(x(), y(), z())
}
|
zzorn/skycastle
|
src/main/scala/org/skycastle/client/wrappers/Vec3.scala
|
Scala
|
gpl-2.0
| 561 |
package com.olivergg.starttabs.scalaservice
import com.olivergg.starttabs.dto.Friend
object FriendsService {
// Some fake testing data
private var friends: Array[Friend] = Array(
Friend(0, "Ben Sparrow", "Enjoys drawing things", "https://pbs.twimg.com/profile_images/514549811765211136/9SgAuHeY.png"),
Friend(1, "Max Lynx", "Odd obsession with everything", "https://avatars3.githubusercontent.com/u/11214?v=3&s=460"),
Friend(2, "Andrew Jostlen", "Wears a sweet leather Jacket. I\'m a bit jealous", "https://pbs.twimg.com/profile_images/609810148769427456/dhzhuaNA.jpg"),
Friend(3, "Adam Bradleyson", "I think he needs to buy a boat", "https://pbs.twimg.com/profile_images/479090794058379264/84TKj_qa.jpeg"),
Friend(4, "Perry Governor", "Just the nicest guy", "https://pbs.twimg.com/profile_images/467390551830970368/80rkMI5v.jpeg")
)
def all(): Array[Friend] = {
println("calling all in FriendService")
friends
}
def get(id: Int): Friend = {
println(s"calling get in FriendService for id = $id")
friends(id)
}
}
|
olivergg/scalajs-ionic-starttabs
|
app-js/src/main/scala/com/olivergg/starttabs/scalaservice/FriendsService.scala
|
Scala
|
gpl-2.0
| 1,068 |
package com.twitter.ostrich.admin
/**
* This allows us to turn on and off Finagle's tracing.
*
* See: https://github.com/twitter/finagle
*/
class FinagleTracing(klass: Class[_]) {
private val enableM = klass.getDeclaredMethod("enable")
private val disableM = klass.getDeclaredMethod("disable")
def enable() { enableM.invoke(null) }
def disable() { disableM.invoke(null) }
}
object FinagleTracing {
val instance: Option[FinagleTracing] = {
val loader = ClassLoader.getSystemClassLoader()
try {
Some(new FinagleTracing(loader.loadClass("com.twitter.finagle.tracing.Trace")))
} catch {
case _: ClassNotFoundException =>
None
}
}
}
|
hydro2k/ostrich
|
src/main/scala/com/twitter/ostrich/admin/FinagleTracing.scala
|
Scala
|
apache-2.0
| 684 |
/*
* Copyright 2008-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package jpademo {
package snippet {
import scala.xml.{NodeSeq,Text}
import net.liftweb.http.{RequestVar,S,SHtml}
import net.liftweb.util.Helpers
import net.liftweb.common.Loggable
import S._
import Helpers._
import net.liftweb.jpademo.model._
import Model._
import javax.persistence.{EntityExistsException,PersistenceException}
class AuthorOps extends Loggable {
def list (xhtml : NodeSeq) : NodeSeq = {
val authors = Model.createNamedQuery[Author]("findAllAuthors").getResultList()
authors.flatMap(author =>
bind("author", xhtml,
"name" -> Text(author.name),
"count" -> SHtml.link("/books/search.html", {() =>
BookOps.resultVar(Model.createNamedQuery[Book]("findBooksByAuthor", "id" ->author.id).getResultList().toList)
}, Text(author.books.size().toString)),
"edit" -> SHtml.link("add.html", () => authorVar(author), Text(?("Edit")))))
}
// Set up a requestVar to track the author object for edits and adds
object authorVar extends RequestVar(new Author())
def author = authorVar.is
def add (xhtml : NodeSeq) : NodeSeq = {
def doAdd () = {
if (author.name.length == 0) {
error("emptyAuthor", "The author's name cannot be blank")
} else {
try {
Model.mergeAndFlush(author)
redirectTo("list.html")
} catch {
case ee : EntityExistsException => error("That author already exists.")
case pe : PersistenceException => error("Error adding author"); logger.error("Author add failed", pe)
}
}
}
// Hold a val here so that the "id" closure holds it when we re-enter this method
val currentId = author.id
bind("author", xhtml,
"id" -> SHtml.hidden(() => author.id = currentId),
"name" -> SHtml.text(author.name, author.name = _),
"submit" -> SHtml.submit(?("Save"), doAdd))
}
}
}
}
}
|
wsaccaco/lift
|
examples/JPADemo/JPADemo-web/src/main/scala/net/liftweb/jpademo/snippet/Author.scala
|
Scala
|
apache-2.0
| 2,436 |
package pl.writeony.son2.scallop
import pl.writeonly.scalaops.specs.WhiteAssertSpec
class FancyThingsSpec extends WhiteAssertSpec {
"A Conf" should {
"xxx" in {
// Conf.props("key1") should equal (Some("value1"))
// Conf.firstListName() should equal ("first")
// Conf.secondListName() should equal ("second")
// Conf.firstList() should equal (List(1,2,3))
// Conf.secondList() should equal (List[Double](4,5,6))
}
}
}
|
writeonly/son2
|
scallions-clis/scallions-llop/src/test/scala/pl/writeony/son2/scallop/FancyThingsSpec.scala
|
Scala
|
apache-2.0
| 489 |
package net.mkowalski.sparkfim.util
import org.apache.log4j.Logger
trait Logging {
lazy val LOG = Logger.getLogger(this.getClass.getName)
}
|
mjkowalski/spark-fim
|
src/main/scala/net/mkowalski/sparkfim/util/Logging.scala
|
Scala
|
mit
| 146 |
package com.shorrockin.narrator.utils
import se.scalablesolutions.akka.actor.Actor
import java.util.UUID
trait UniqueId {
this:Actor =>
id = UUID.randomUUID.toString
}
|
shorrockin/narrator
|
src/main/scala/utils/UniqueId.scala
|
Scala
|
apache-2.0
| 181 |
package com.coveo.blitz.server
trait GameException extends Exception {
def message: String
override def getMessage: String = s"Blitz - $message"
}
case class GeneratorException(message: String) extends GameException
case class NotFoundException(message: String) extends GameException
case class UserNotFoundException(message: String) extends GameException
case class RuleViolationException(message: String) extends GameException
case class TimeoutException(message: String) extends GameException
case class GameFinishedException(reason: Status.Finish) extends GameException {
def message = reason.toString
}
case class MapParseException(message: String) extends GameException
case class UtterFailException(message: String) extends GameException
|
coveord/Blitz2016-Server
|
server/app/exception.scala
|
Scala
|
mit
| 760 |
class Ticket1909 (x: Int) {
def this() = this({
def bar() = 5
bar()
})
}
object Test extends App {
new Ticket1909()
}
|
lampepfl/dotty
|
tests/run/t1909b.scala
|
Scala
|
apache-2.0
| 132 |
/* Copyright 2009-2018 EPFL, Lausanne */
package inox
package solvers
package unrolling
import utils._
import scala.collection.mutable.{Map => MutableMap}
trait TemplateGenerator { self: Templates =>
import context._
import program._
import program.trees._
import program.symbols._
protected type TemplateClauses = (
Map[Variable, Encoded],
Map[Variable, Encoded],
Map[Variable, Set[Variable]],
Map[Variable, Seq[Expr]],
Seq[Expr],
Types,
Equalities,
Seq[LambdaTemplate],
Seq[QuantificationTemplate]
)
protected def emptyClauses: TemplateClauses =
(Map.empty, Map.empty, Map.empty, Map.empty, Seq.empty, Map.empty, Map.empty, Seq.empty, Seq.empty)
protected implicit class ClausesWrapper(clauses: TemplateClauses) {
def ++(that: TemplateClauses): TemplateClauses = {
val (thisConds, thisExprs, thisTree, thisGuarded, thisEqs, thisTps, thisEqualities, thisLambdas, thisQuants) = clauses
val (thatConds, thatExprs, thatTree, thatGuarded, thatEqs, thatTps, thatEqualities, thatLambdas, thatQuants) = that
(thisConds ++ thatConds, thisExprs ++ thatExprs, thisTree merge thatTree,
thisGuarded merge thatGuarded, thisEqs ++ thatEqs, thisTps merge thatTps,
thisEqualities merge thatEqualities, thisLambdas ++ thatLambdas, thisQuants ++ thatQuants)
}
def +(pair: (Variable, Expr)): TemplateClauses = {
val (thisConds, thisExprs, thisTree, thisGuarded, thisEqs, thisTps, thisEqualities, thisLambdas, thisQuants) = clauses
(thisConds, thisExprs, thisTree, thisGuarded merge pair, thisEqs, thisTps, thisEqualities, thisLambdas, thisQuants)
}
def proj: (
Map[Variable, Encoded],
Map[Variable, Encoded],
Map[Variable, Set[Variable]],
Types,
Equalities,
Seq[LambdaTemplate],
Seq[QuantificationTemplate]
) = {
val (thisConds, thisExprs, thisTree, _, _, thisTypes, thisEqualities, thisLambdas, thisQuants) = clauses
(thisConds, thisExprs, thisTree, thisTypes, thisEqualities, thisLambdas, thisQuants)
}
}
private def isSimple(expr: Expr): Boolean = {
exprOps.isSimple(expr) && !exprOps.exists {
case Equals(e1, e2) => unrollEquality(e1.getType)
case _ => false
} (expr)
}
def mkClauses(pathVar: Variable, expr: Expr, substMap: Map[Variable, Encoded], polarity: Option[Boolean] = None): TemplateClauses = {
val (p, tmplClauses) = mkExprClauses(pathVar, expr, substMap, polarity)
tmplClauses + (pathVar -> p)
}
def mkClauses(pathVar: Variable, tpe: Type, expr: Expr, substMap: Map[Variable, Encoded])
(implicit generator: TypingGenerator): TemplateClauses = {
val (p, tmplClauses) = mkTypeClauses(pathVar, tpe, expr, substMap)
tmplClauses + (pathVar -> p)
}
def mergeCalls(pathVar: Variable, condVar: Variable, substMap: Map[Variable, Encoded],
thenClauses: TemplateClauses, elseClauses: TemplateClauses): TemplateClauses = {
val builder = new Builder(pathVar, substMap)
builder ++= thenClauses
builder ++= elseClauses
// Clear all guardedExprs in builder since we're going to transform them by merging calls.
// The transformed guardedExprs will be added to builder at the end of the function.
builder.guardedExprs = Map.empty
def collectCalls(expr: Expr): Set[FunctionInvocation] =
exprOps.collect { case fi: FunctionInvocation => Set(fi) case _ => Set.empty[FunctionInvocation] }(expr)
def countCalls(expr: Expr): Int =
exprOps.count { case fi: FunctionInvocation => 1 case _ => 0}(expr)
def replaceCall(call: FunctionInvocation, newExpr: Expr)(e: Expr): Expr =
exprOps.replace(Map(call -> newExpr), e)
def getCalls(guardedExprs: Map[Variable, Seq[Expr]]): Map[TypedFunDef, Seq[(FunctionInvocation, Set[Variable])]] =
(for { (b, es) <- guardedExprs.toSeq; e <- es; fi <- collectCalls(e) } yield (b -> fi))
.groupBy(_._2)
.view.mapValues(_.map(_._1).toSet)
.toSeq
.groupBy(_._1.tfd)
.view.mapValues(_.toList.distinct.sortBy(p => countCalls(p._1))) // place inner calls first
.toMap
var thenGuarded = thenClauses._4
var elseGuarded = elseClauses._4
val thenCalls = getCalls(thenGuarded)
val elseCalls = getCalls(elseGuarded)
// We sort common function calls in order to merge nested calls first.
var toMerge: Seq[((FunctionInvocation, Set[Variable]), (FunctionInvocation, Set[Variable]))] =
(thenCalls.keySet & elseCalls.keySet)
.flatMap(tfd => thenCalls(tfd) zip elseCalls(tfd))
.toSeq
.sortBy(p => countCalls(p._1._1) + countCalls(p._2._1))
while (toMerge.nonEmpty) {
val ((thenCall, thenBlockers), (elseCall, elseBlockers)) = toMerge.head
toMerge = toMerge.tail
val newExpr: Variable = Variable.fresh("call", thenCall.tfd.getType, true)
builder.storeExpr(newExpr)
val replaceThen = replaceCall(thenCall, newExpr) _
val replaceElse = replaceCall(elseCall, newExpr) _
thenGuarded = thenGuarded.view.mapValues(_.map(replaceThen)).toMap
elseGuarded = elseGuarded.view.mapValues(_.map(replaceElse)).toMap
toMerge = toMerge.map(p => (
(replaceThen(p._1._1).asInstanceOf[FunctionInvocation], p._1._2),
(replaceElse(p._2._1).asInstanceOf[FunctionInvocation], p._2._2)
))
val newBlocker: Variable = Variable.fresh("bm", BooleanType(), true)
builder.storeConds(thenBlockers ++ elseBlockers, newBlocker)
builder.iff(orJoin((thenBlockers ++ elseBlockers).toSeq), newBlocker)
val newArgs = (thenCall.args zip elseCall.args).map { case (thenArg, elseArg) =>
val (newArg, argClauses) = mkExprClauses(newBlocker, ifExpr(condVar, thenArg, elseArg), builder.localSubst)
builder ++= argClauses
newArg
}
val newCall = thenCall.tfd.applied(newArgs)
builder.storeGuarded(newBlocker, Equals(newExpr, newCall))
}
for ((b, es) <- thenGuarded; e <- es) builder.storeGuarded(b, e)
for ((b, es) <- elseGuarded; e <- es) builder.storeGuarded(b, e)
builder.result
}
protected def mkExprStructure(
pathVar: Variable,
expr: Expr,
substMap: Map[Variable, Encoded],
onlySimple: Boolean = false
): (Expr, TemplateStructure, Map[Variable, Encoded]) = {
val (struct, depsByScope) = normalizeStructure(expr)
val deps = depsByScope.map { case (v, e, _) => v -> e }.toMap
val (depSubst, depContents) =
depsByScope.foldLeft(substMap, TemplateContents.empty(pathVar -> substMap(pathVar), Seq())) {
case ((depSubst, contents), (v, expr, conditions)) =>
if (isSimple(expr)) {
// Note that we can ignore conditions in this case as the underlying
// solver is able to find satisfying assignments for all simple terms
val encoder = mkEncoder(depSubst) _
val ePointers = Template.lambdaPointers(encoder)(expr)
(depSubst + (v -> encoder(expr)), contents.copy(pointers = contents.pointers ++ ePointers))
} else if (!isSimple(expr) && conditions.isEmpty) {
// We optimize for the case where conditions is empty as the quantifier
// instantiation procedure relies on path condition variables staying the
// same whenever possible.
val (e, cls) = mkExprClauses(pathVar, expr, depSubst)
// setup the full encoding substMap
val (conds, exprs, tree, types, equals, lmbds, quants) = cls.proj
val clauseSubst: Map[Variable, Encoded] = depSubst ++ conds ++ exprs ++
lmbds.map(_.ids) ++ quants.flatMap(_.mapping) ++ equals.flatMap(_._2.map(_.symbols))
val (eCalls, eApps, eMatchers, ePointers) = Template.extractCalls(e, clauseSubst)
val (clsClauses, clsCalls, clsApps, clsMatchers, clsPointers, _) =
Template.encode(pathVar -> substMap(pathVar), Seq.empty, cls, clauseSubst)
(depSubst + (v -> mkEncoder(clauseSubst)(e)), contents merge (
conds, exprs, tree, clsClauses, types,
clsCalls merge Map(substMap(pathVar) -> eCalls),
clsApps merge Map(substMap(pathVar) -> eApps),
clsMatchers merge Map(substMap(pathVar) -> eMatchers),
equals, lmbds, quants, clsPointers ++ ePointers
))
} else {
val condVar = Variable.fresh("p", BooleanType())
val exprVar = Variable.fresh("r", v.getType)
val localSubst = depSubst + (condVar -> encodeSymbol(condVar)) + (exprVar -> encodeSymbol(exprVar))
val cls = mkClauses(pathVar, Equals(condVar, andJoin(conditions)), localSubst) ++
mkClauses(condVar, Equals(exprVar, expr), localSubst)
// setup the full encoding substMap
val (conds, exprs, tree, types, equals, lmbds, quants) = cls.proj
val clauseSubst: Map[Variable, Encoded] = localSubst ++ conds ++ exprs ++
lmbds.map(_.ids) ++ quants.flatMap(_.mapping) ++ equals.flatMap(_._2.map(_.symbols))
val (clsClauses, clsCalls, clsApps, clsMatchers, clsPointers, _) =
Template.encode(pathVar -> substMap(pathVar), Seq.empty, cls, clauseSubst)
(depSubst + (v -> localSubst(exprVar)), contents merge (
conds + (condVar -> localSubst(condVar)),
exprs + (exprVar -> localSubst(exprVar)),
tree merge Map(pathVar -> Set(condVar)),
clsClauses, types, clsCalls, clsApps, clsMatchers, equals, lmbds, quants, clsPointers
))
}
}
val sortedDeps = exprOps.variablesOf(struct).map(v => v -> deps(v)).toSeq.sortBy(_._1.id)
val dependencies = sortedDeps.map(p => depSubst(p._1))
val structure = new TemplateStructure(struct, dependencies, depContents)
val freshSubst = exprOps.variablesOf(struct).map(v => v -> v.freshen).toMap
val freshDeps = depSubst.map { case (v, e) => freshSubst.getOrElse(v, v) -> e }
(exprOps.replaceFromSymbols(freshSubst, struct), structure, freshDeps)
}
final private class Builder(pathVar: Variable, substMap: Map[Variable, Encoded]) {
var condVars = Map[Variable, Encoded]()
var condTree = Map[Variable, Set[Variable]](pathVar -> Set.empty)
def storeCond(pathVar: Variable, id: Variable): Unit = {
condVars += id -> encodeSymbol(id)
condTree += pathVar -> (condTree.getOrElse(pathVar, Set.empty) + id)
}
def storeConds(pathVars: Set[Variable], id: Variable): Unit = {
condVars += id -> encodeSymbol(id)
for (pathVar <- pathVars) {
condTree += pathVar -> (condTree.getOrElse(pathVar, Set.empty) + id)
}
}
@inline def encodedCond(id: Variable): Encoded = substMap.getOrElse(id, condVars(id))
var exprVars = Map[Variable, Encoded]()
@inline def storeExpr(id: Variable): Unit = exprVars += id -> encodeSymbol(id)
// Represents clauses of the form:
// id => expr && ... && expr
var guardedExprs = Map[Variable, Seq[Expr]]()
def storeGuarded(guardVar: Variable, expr: Expr): Unit = {
assert(expr.getType == BooleanType(), expr.asString + " is not of type Boolean. " + explainTyping(expr))
val prev = guardedExprs.getOrElse(guardVar, Nil)
guardedExprs += guardVar -> (expr +: prev)
}
var types = Map[Encoded, Set[Typing]]()
def storeType(pathVar: Variable, tpe: Type, arg: Expr)(implicit generator: TypingGenerator): Expr = {
val b = encodedCond(pathVar)
val encoder = mkEncoder(localSubst) _
val closures = typeOps.variablesOf(tpe).toSeq.sortBy(_.id).map(encoder).map(Left(_))
val (result, typing) = generator match {
case FreeGenerator | ContractGenerator=>
val typeCall: Variable = Variable.fresh("tp", BooleanType(), true)
storeExpr(typeCall)
(typeCall, Typing(tpe, encoder(arg), Constraint(exprVars(typeCall), closures, generator == FreeGenerator)))
case CaptureGenerator(container, containerType) =>
// @nv: note that we only store the non-dependent type here as we don't need
// to consider dependent types when looking at captures
(BooleanLiteral(true), Typing(tpe.getType, encoder(arg), Capture(container, containerType)))
}
types += b -> (types.getOrElse(b, Set.empty) + typing)
result
}
// Represents equations (simple formulas)
var equations = Seq[Expr]()
@inline def iff(e1: Expr, e2: Expr): Unit = equations :+= Equals(e1, e2)
var lambdas = Seq[LambdaTemplate]()
@inline def registerLambda(lambda: LambdaTemplate): Unit = lambdas :+= lambda
var quantifications = Seq[QuantificationTemplate]()
@inline def registerQuantification(quantification: QuantificationTemplate): Unit =
quantifications :+= quantification
var equalities = Map[Encoded, Set[Equality]]()
@inline def storeEquality(guardVar: Variable, e1: Expr, e2: Expr): Unit = {
val b = encodedCond(guardVar)
val prev: Set[Equality] = equalities.getOrElse(b, Set.empty)
val encoder: Expr => Encoded = mkEncoder(localSubst)
equalities += b -> (prev + Equality(e1.getType, encoder(e1), encoder(e2)))
}
@inline def localSubst: Map[Variable, Encoded] =
substMap ++ condVars ++ exprVars ++ lambdas.map(_.ids)
def result: TemplateClauses =
(condVars, exprVars, condTree, guardedExprs, equations, types, equalities, lambdas, quantifications)
def ++=(that: TemplateClauses): this.type = {
val (conds, exprs, tree, guarded, eqs, tpes, equls, lmbds, quants) = that
condVars ++= conds
exprVars ++= exprs
condTree = condTree merge tree
guardedExprs = guardedExprs merge guarded
equations ++= eqs
types = types merge tpes
equalities ++= equls
lambdas ++= lmbds
quantifications ++= quants
this
}
}
protected def mkExprClauses(
pathVar: Variable,
expr: Expr,
substMap: Map[Variable, Encoded],
polarity: Option[Boolean] = None
): (Expr, TemplateClauses) = {
val builder = new Builder(pathVar, substMap)
import builder._
def rec(pathVar: Variable, expr: Expr, pol: Option[Boolean]): Expr = expr match {
case a @ Assume(cond, body) =>
val e = rec(pathVar, cond, Some(true))
storeGuarded(pathVar, e)
rec(pathVar, body, pol)
case c @ Choose(res, pred) =>
val newExpr = res.toVariable.freshen
storeExpr(newExpr)
val (tpeExpr, tmplClauses) = mkTypeClauses(pathVar, res.tpe, newExpr, localSubst)(FreeGenerator)
storeGuarded(pathVar, tpeExpr)
builder ++= tmplClauses
val p = rec(pathVar, exprOps.replace(Map(res.toVariable -> newExpr), pred), Some(true))
storeGuarded(pathVar, p)
newExpr
case l @ Let(i, e: Lambda, b) =>
val re = rec(pathVar, e, None) // guaranteed variable!
val rb = rec(pathVar, exprOps.replace(Map(i.toVariable -> re), b), pol)
rb
case l @ Let(i, e, b) =>
val newExpr: Variable = Variable.fresh("lt", i.getType, true)
storeExpr(newExpr)
val re = rec(pathVar, e, None)
storeGuarded(pathVar, Equals(newExpr, re))
val rb = rec(pathVar, exprOps.replace(Map(i.toVariable -> newExpr), b), pol)
rb
case n @ Not(e) =>
Not(rec(pathVar, e, pol.map(!_)))
case i @ Implies(lhs, rhs) =>
if (!isSimple(i)) {
rec(pathVar, Or(Not(lhs), rhs), pol)
} else {
implies(rec(pathVar, lhs, None), rec(pathVar, rhs, None))
}
case a @ And(parts) =>
val partitions = SeqUtils.groupWhile(parts)(isSimple)
partitions.map(andJoin) match {
case Seq(e) => e
case seq =>
val newExpr: Variable = Variable.fresh("e", BooleanType(), true)
storeExpr(newExpr)
def recAnd(pathVar: Variable, partitions: Seq[Expr]): Unit = partitions match {
case x :: Nil =>
storeGuarded(pathVar, Equals(newExpr, rec(pathVar, x, pol)))
case x :: xs =>
val newRes: Variable = Variable.fresh("res", BooleanType(), true)
storeExpr(newRes)
val xrec = rec(pathVar, x, pol)
storeGuarded(pathVar, Equals(newRes, xrec))
val newBool: Variable = Variable.fresh("b", BooleanType(), true)
storeCond(pathVar, newBool)
storeGuarded(pathVar, implies(not(newRes), not(newExpr)))
iff(and(pathVar, newRes), newBool)
recAnd(newBool, xs)
case Nil => scala.sys.error("Should never happen!")
}
recAnd(pathVar, seq)
newExpr
}
case o @ Or(parts) =>
val partitions = SeqUtils.groupWhile(parts)(isSimple)
partitions.map(orJoin) match {
case Seq(e) => e
case seq =>
val newExpr: Variable = Variable.fresh("e", BooleanType(), true)
storeExpr(newExpr)
def recOr(pathVar: Variable, partitions: Seq[Expr]): Unit = partitions match {
case x :: Nil =>
storeGuarded(pathVar, Equals(newExpr, rec(pathVar, x, pol)))
case x :: xs =>
val newRes: Variable = Variable.fresh("res", BooleanType(), true)
storeExpr(newRes)
val xrec = rec(pathVar, x, None)
storeGuarded(pathVar, Equals(newRes, xrec))
val newBool: Variable = Variable.fresh("b", BooleanType(), true)
storeCond(pathVar, newBool)
storeGuarded(pathVar, implies(newRes, newExpr))
iff(and(pathVar, not(newRes)), newBool)
recOr(newBool, xs)
case Nil => scala.sys.error("Should never happen!")
}
recOr(pathVar, seq)
newExpr
}
case i @ IfExpr(cond, thenn, elze) => {
if (isSimple(i)) {
i
} else {
val newBool1 : Variable = Variable.fresh("b", BooleanType(), true)
val newBool2 : Variable = Variable.fresh("b", BooleanType(), true)
val newExpr : Variable = Variable.fresh("e", i.getType, true)
val condVar : Variable = Variable.fresh("c", BooleanType(), true)
storeCond(pathVar, newBool1)
storeCond(pathVar, newBool2)
storeExpr(newExpr)
storeExpr(condVar)
val crec = rec(pathVar, cond, None)
storeGuarded(pathVar, Equals(condVar, crec))
iff(and(pathVar, condVar), newBool1)
iff(and(pathVar, not(condVar)), newBool2)
val (trec, tClauses) = mkExprClauses(newBool1, thenn, localSubst, pol)
val (erec, eClauses) = mkExprClauses(newBool2, elze, localSubst, pol)
builder ++= mergeCalls(pathVar, condVar, localSubst,
tClauses + (newBool1 -> Equals(newExpr, trec)),
eClauses + (newBool2 -> Equals(newExpr, erec)))
newExpr
}
}
case l: Lambda =>
val template = LambdaTemplate(pathVar -> encodedCond(pathVar), l, localSubst)
builder.registerLambda(template)
template.ids._1
case f: Forall =>
val (assumptions, without: Forall) = liftAssumptions(f)
for (a <- assumptions) {
rec(pathVar, a, Some(true))
}
val TopLevelAnds(conjuncts) = without.body
val conjunctQs = conjuncts.map { conj =>
val vars = exprOps.variablesOf(conj)
val conjArgs = without.params.filter(vd => vars(vd.toVariable) || hasInstance(vd.tpe) != Some(true))
if (conjArgs.isEmpty) {
rec(pathVar, conj, pol)
} else {
val forall = Forall(conjArgs, conj)
pol match {
case Some(p) =>
val (res, template) = QuantificationTemplate(pathVar -> encodedCond(pathVar), p, forall, localSubst)
registerQuantification(template)
res
case None =>
val (res, negTemplate) = QuantificationTemplate(pathVar -> encodedCond(pathVar), false, forall, localSubst)
val inst = Variable.fresh("neg-inst", BooleanType(), true)
storeExpr(inst)
iff(inst, res)
val (_, posTemplate) = QuantificationTemplate(inst -> exprVars(inst), true, forall, localSubst, defer = true)
registerQuantification(negTemplate)
registerQuantification(posTemplate)
res
}
}
}
andJoin(conjunctQs)
case Equals(e1, e2) if unrollEquality(e1.getType) =>
val (v, _) = equalitySymbol(e1.getType)
val re1 = rec(pathVar, e1, pol)
val re2 = rec(pathVar, e2, pol)
storeEquality(pathVar, re1, re2)
Application(v, Seq(re1, re2))
case Operator(as, r) => r(as.map(a => rec(pathVar, a, None)))
}
val p = rec(pathVar, expr, polarity)
(p, builder.result)
}
/** Generates the clauses and other bookkeeping relevant to a type unfolding template. */
protected def mkTypeClauses(
pathVar: Variable,
tpe: Type,
expr: Expr,
substMap: Map[Variable, Encoded]
)(implicit generator: TypingGenerator): (Expr, TemplateClauses) = {
val builder = new Builder(pathVar, substMap)
import builder._
case class RecursionState(
recurseAdt: Boolean, // visit adt children/fields
recurseMap: Boolean, // unroll map definition
recurseSet: Boolean, // unroll set definition
recurseBag: Boolean // unroll bag definition
)
def rec(pathVar: Variable, tpe: Type, expr: Expr, state: RecursionState): Expr = tpe match {
case tpe if !(generator unroll tpe) => BooleanLiteral(true) // nothing to do here!
case (_: FunctionType | _: PiType) => storeType(pathVar, tpe, expr)
case tp: TypeParameter if generator == FreeGenerator => typesManager.storeTypeParameter(tp)
case RefinementType(vd, pred) =>
val newExpr: Variable = Variable.fresh("lt", vd.getType, true)
storeExpr(newExpr)
storeGuarded(pathVar, Equals(newExpr, expr))
val (p, predClauses) = mkExprClauses(pathVar,
exprOps.replaceFromSymbols(Map(vd -> newExpr), pred), localSubst)
builder ++= predClauses
and(rec(pathVar, vd.tpe, expr, state), p)
case SigmaType(params, to) =>
val (newExprs, recParams) = params.zipWithIndex.map { case (vd, i) =>
val newExpr: Variable = Variable.fresh("lt", vd.getType, true)
storeExpr(newExpr)
storeGuarded(pathVar, Equals(newExpr, TupleSelect(expr, i + 1)))
(newExpr, rec(pathVar, vd.tpe, newExpr, state))
}.unzip
val recTo = rec(pathVar,
typeOps.replaceFromSymbols((params zip newExprs).toMap, to),
TupleSelect(expr, params.size + 1), state)
andJoin(recParams :+ recTo)
case adt: ADTType =>
val sort = adt.getSort
and(
sort.invariant
.filter(_ => generator == FreeGenerator)
.map(_.applied(Seq(expr)))
.getOrElse(BooleanLiteral(true)),
if (sort.definition.isInductive && !state.recurseAdt) {
storeType(pathVar, tpe, expr)
} else {
val newExpr = Variable.fresh("e", BooleanType(), true)
storeExpr(newExpr)
val stored = for (tcons <- sort.constructors) yield {
if (tcons.fields.exists(vd => generator unroll vd.tpe)) {
val newBool: Variable = Variable.fresh("b", BooleanType(), true)
storeCond(pathVar, newBool)
val recProp = andJoin(for (vd <- tcons.fields) yield {
rec(newBool, vd.tpe, ADTSelector(expr, vd.id), state.copy(recurseAdt = false))
})
iff(and(pathVar, isCons(expr, tcons.id)), newBool)
storeGuarded(newBool, Equals(newExpr, recProp))
true
} else {
false
}
}
if (stored.foldLeft(false)(_ || _)) {
newExpr
} else {
BooleanLiteral(true)
}
}
)
case TupleType(tpes) =>
andJoin(for ((tpe, idx) <- tpes.zipWithIndex) yield {
rec(pathVar, tpe, TupleSelect(expr, idx + 1), state)
})
case MapType(from, to) =>
if (!state.recurseMap) {
storeType(pathVar, tpe, expr)
} else {
val newBool1 : Variable = Variable.fresh("mapIsEmpty", BooleanType(), true)
val newBool2 : Variable = Variable.fresh("mapNonEmpty", BooleanType(), true)
storeCond(pathVar, newBool1)
storeCond(pathVar, newBool2)
val condVar : Variable = Variable.fresh("condMapEmpty", BooleanType(), true)
val newExpr = Variable.fresh("mapRes", BooleanType(), true)
val keyExpr: Variable = Variable.fresh("mapKey", from, true)
val valueExpr: Variable = Variable.fresh("mapValue", to, true)
val defaultExpr: Variable = Variable.fresh("mapDefault", to, true)
val restExpr: Variable = Variable.fresh("mapRest", tpe, true)
storeExpr(condVar)
storeExpr(newExpr)
storeExpr(keyExpr)
storeExpr(valueExpr)
storeExpr(defaultExpr)
storeExpr(restExpr)
storeGuarded(pathVar, Equals(condVar, Equals(expr, FiniteMap(Seq(), defaultExpr, from, to))))
iff(and(pathVar, condVar), newBool1)
iff(and(pathVar, not(condVar)), newBool2)
storeGuarded(newBool1, newExpr)
storeGuarded(newBool2, Equals(expr, MapUpdated(restExpr, keyExpr, valueExpr)))
storeGuarded(newBool2, Equals(newExpr, and(
rec(newBool2, tpe, restExpr, state.copy(recurseMap = false)),
rec(newBool2, from, keyExpr, state),
rec(newBool2, to, valueExpr, state)
)))
and(rec(pathVar, to, defaultExpr, state), newExpr)
}
case SetType(base) =>
if (!state.recurseSet) {
storeType(pathVar, tpe, expr)
} else {
val newBool1 : Variable = Variable.fresh("setIsEmpty", BooleanType(), true)
val newBool2 : Variable = Variable.fresh("setNonEmpty", BooleanType(), true)
storeCond(pathVar, newBool1)
storeCond(pathVar, newBool2)
val condVar : Variable = Variable.fresh("condSetEmpty", BooleanType(), true)
val newExpr = Variable.fresh("setRes", BooleanType(), true)
val elemExpr: Variable = Variable.fresh("setElem", base, true)
val restExpr: Variable = Variable.fresh("setRest", tpe, true)
storeExpr(condVar)
storeExpr(newExpr)
storeExpr(elemExpr)
storeExpr(restExpr)
storeGuarded(pathVar, Equals(condVar, Equals(expr, FiniteSet(Seq(), base))))
iff(and(pathVar, condVar), newBool1)
iff(and(pathVar, not(condVar)), newBool2)
storeGuarded(newBool1, newExpr)
storeGuarded(newBool2, Equals(expr, SetUnion(FiniteSet(Seq(elemExpr), base), restExpr)))
storeGuarded(newBool2, Equals(newExpr, and(
rec(newBool2, tpe, restExpr, state.copy(recurseSet = false)),
rec(newBool2, base, elemExpr, state)
)))
newExpr
}
case BagType(base) =>
if (!state.recurseBag) {
storeType(pathVar, tpe, expr)
} else {
val newBool1 : Variable = Variable.fresh("bagIsEmpty", BooleanType(), true)
val newBool2 : Variable = Variable.fresh("bagNonEmpty", BooleanType(), true)
storeCond(pathVar, newBool1)
storeCond(pathVar, newBool2)
val condVar : Variable = Variable.fresh("condBagEmpty", BooleanType(), true)
val newExpr = Variable.fresh("bagRes", BooleanType(), true)
val elemExpr: Variable = Variable.fresh("bagElem", base, true)
val multExpr: Variable = Variable.fresh("bagMult", IntegerType(), true)
val restExpr: Variable = Variable.fresh("bagRest", tpe, true)
storeExpr(condVar)
storeExpr(newExpr)
storeExpr(elemExpr)
storeExpr(multExpr)
storeExpr(restExpr)
storeGuarded(pathVar, Equals(condVar, Equals(expr, FiniteBag(Seq(), base))))
iff(and(pathVar, condVar), newBool1)
iff(and(pathVar, not(condVar)), newBool2)
storeGuarded(newBool1, newExpr)
storeGuarded(newBool2, Equals(expr, BagUnion(FiniteBag(Seq((elemExpr, multExpr)), base), restExpr)))
storeGuarded(newBool2, GreaterThan(multExpr, IntegerLiteral(0)))
storeGuarded(newBool2, Equals(newExpr, and(
rec(newBool2, tpe, restExpr, state.copy(recurseBag = false)),
rec(newBool2, base, elemExpr, state),
rec(newBool2, IntegerType(), multExpr, state)
)))
newExpr
}
case _ => throw new InternalSolverError(s"Unexpected unrollable: ${tpe.asString}")
}
val p = rec(pathVar, tpe, expr, RecursionState(true, true, true, true))
(p, builder.result)
}
}
|
epfl-lara/inox
|
src/main/scala/inox/solvers/unrolling/TemplateGenerator.scala
|
Scala
|
apache-2.0
| 29,380 |
package org.jetbrains.plugins.scala
package gotoclass
import com.intellij.navigation.{ChooseByNameContributor, NavigationItem}
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiClass
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.stubs.StubIndex
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import scala.collection.JavaConverters._
/**
* Nikolay.Tropin
* 12/19/13
*/
class ScalaGoToClassContributor extends ChooseByNameContributor {
def getNames(project: Project, includeNonProjectItems: Boolean): Array[String] = {
val classNames = StubIndex.getInstance().getAllKeys(ScalaIndexKeys.NOT_VISIBLE_IN_JAVA_SHORT_NAME_KEY, project)
val packageObjectNames = StubIndex.getInstance().getAllKeys(ScalaIndexKeys.PACKAGE_OBJECT_SHORT_NAME_KEY, project)
(classNames.asScala ++ packageObjectNames.asScala).toArray
}
def getItemsByName(name: String, pattern: String, project: Project, includeNonProjectItems: Boolean): Array[NavigationItem] = {
val scope = if (includeNonProjectItems) GlobalSearchScope.allScope(project) else GlobalSearchScope.projectScope(project)
val cleanName = ScalaNamesUtil.cleanFqn(name)
val classes = StubIndex.getElements(ScalaIndexKeys.NOT_VISIBLE_IN_JAVA_SHORT_NAME_KEY, cleanName, project, scope, classOf[PsiClass])
val packageObjects = StubIndex.getElements(ScalaIndexKeys.PACKAGE_OBJECT_SHORT_NAME_KEY, cleanName, project, scope, classOf[PsiClass])
(classes.asScala ++ packageObjects.asScala).toArray
}
}
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/gotoclass/ScalaGoToClassContributor.scala
|
Scala
|
apache-2.0
| 1,615 |
package sds.util
class MultiArgsStringBuilder {
val build: StringBuilder = new StringBuilder();
def this(init: String) {
this()
build.append(init)
}
def append(elements: Any*): Unit = elements.foreach(build.append)
override def toString(): String = build.toString()
}
|
g1144146/sds_for_scala
|
src/main/scala/sds/util/MultiArgsStringBuilder.scala
|
Scala
|
apache-2.0
| 310 |
package scala.offheap
package internal
package macros
import scala.reflect.macros.blackbox
class Layout(val c: blackbox.Context) extends Common {
import c.universe.{weakTypeOf => wt, _}
import c.internal._, decorators._
def inLayout(tpe: Type)(f: => Tree) = {
tpe.typeSymbol.updateAttachment(Clazz.InLayout())
val res = f
tpe.typeSymbol.removeAttachment[Clazz.InLayout]
f
}
def field[C: WeakTypeTag, T: WeakTypeTag](after: Tree, annots: Tree) = inLayout(wt[C]) {
val tpe = wt[T]
val isEmbed = annots.collect { case q"new $c" if c.symbol == EmbedClass => c }.nonEmpty
val alignment =
if (isEmbed) {
assertEmbeddable(tpe)
assertNotInLayout(tpe.typeSymbol, "illegal recursive embedding")
alignmentOfEmbed(tpe)
} else alignmentOf(tpe)
val baseoffset = after match {
case q"" => 0
case _ =>
val q"${prev: Field}" = ExtractField.unapply(c.typecheck(after).symbol).get.head
prev.offset + prev.size
}
q"${padded(baseoffset, alignment)}"
}
def markComplete[C: WeakTypeTag] = {
wt[C].typeSymbol.updateAttachment(Clazz.LayoutComplete())
q"()"
}
}
|
adamwy/scala-offheap
|
macros/src/main/scala/offheap/internal/macros/Layout.scala
|
Scala
|
bsd-3-clause
| 1,171 |
/*
* Copyright 2013 - 2015, Daniel Krzywicki <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package pl.edu.agh.scalamas.solutions
/**
* Created by mateusz on 14.11.16.
*/
trait SolutionsComponent {
type SolutionsType
/**
* A handle to current best solutions
*/
def solutions: Solutions[SolutionsType]
def solutionsFormatter: (SolutionsType) => String
}
|
ros3n/IntOb
|
core/src/main/scala/pl/edu/agh/scalamas/solutions/SolutionsComponent.scala
|
Scala
|
mit
| 1,438 |
package $organization$.$name;format="lower,word"$
import org.scalatest._
import org.nd4j.linalg.factory.Nd4j
class AppSpec extends FlatSpec with Matchers {
"This project" should "have some tests" in {
(2 + 2) should be (4)
}
it should "Be able to create NDArrays on the CPU or GPU" in {
val a = Nd4j.ones(2, 2)
val b = Nd4j.ones(2, 2)
(a add b).getDouble(0L, 0L) should be (2.0)
}
}
|
wmeddie/dl4j-scala.g8
|
src/main/g8/src/test/scala/AppSpec.scala
|
Scala
|
apache-2.0
| 411 |
package ai.agnos.sparql.stream.client
import akka.http.scaladsl.model.MediaType.NotCompressible
import akka.http.scaladsl.model.{ContentType, HttpCharsets, MediaType}
import org.eclipse.rdf4j.model.{ModelFactory, ValueFactory}
import org.eclipse.rdf4j.model.impl.{LinkedHashModelFactory, SimpleValueFactory}
/* */
/* CONSTANTS */
/* --------- */
/**
* Sparql Client Constants
*
* The ones referring to parameters and paths are all standardised and should work
* for any triple store.
* @see [[https://www.w3.org/TR/sparql11-protocol/]]
*
* Tested with Jena Fuseki 2.4.0 and Stardog 3.4.X through 4.2.2
*/
object SparqlClientConstants {
val QUERY_URI_PART = "/query"
val QUERY_PARAM_NAME = "query"
val REASONING_PARAM_NAME = "reasoning"
val UPDATE_URI_PART = "/update"
val UPDATE_PARAM_NAME = "update"
val GRAPH_PARAM_NAME = "graph"
val DEFAULT_PARAM_NAME = "default"
val FORM_MIME_TYPE = "x-www-form-urlencoded"
val SPARQL_RESULTS_MIME_TYPE = "sparql-results+json"
val TEXT_BOOLEAN_MIME_TYPE = "boolean"
/**
* Media type for Form upload
*/
val `application/x-www-form-urlencoded`: ContentType.NonBinary =
MediaType.applicationWithFixedCharset(
FORM_MIME_TYPE,
HttpCharsets.`UTF-8`
).toContentType
/**
* Media type for Sparql JSON protocol
*/
val `application/sparql-results+json`: ContentType.NonBinary =
MediaType.applicationWithFixedCharset(
SPARQL_RESULTS_MIME_TYPE,
HttpCharsets.`UTF-8`
).toContentType
/**
* Media type for text/boolean
*/
val `text/boolean`: ContentType.NonBinary = {
MediaType.text(TEXT_BOOLEAN_MIME_TYPE).toContentType(HttpCharsets.`UTF-8`)
}
/**
* Content Type for application/ld+json
*/
val `application/ld+json`: ContentType.NonBinary = {
MediaType.applicationWithFixedCharset("ld+json", HttpCharsets.`UTF-8`).toContentType
}
/**
* Content Types for text/x-nquads and application/n-quads
*/
val `text/x-nquads`: ContentType.NonBinary = {
MediaType.text("x-nquads").toContentType(HttpCharsets.`UTF-8`)
}
val `application/n-quads`: ContentType.NonBinary = {
MediaType.applicationWithFixedCharset("n-quads", HttpCharsets.`UTF-8`).toContentType
}
/**
* Content Type for application/n-triples
*/
val `application/n-triples`: ContentType.NonBinary = {
MediaType.applicationWithFixedCharset("n-triples", HttpCharsets.`UTF-8`).toContentType
}
/**
* Content Type for application/n-triples
*/
val `application/octet-stream`: ContentType.Binary = {
MediaType.applicationBinary("octet-stream", NotCompressible).toContentType
}
/**
* Content Type for text/turtle
*/
val `text/turtle`: ContentType.NonBinary = {
MediaType.text("turtle").toContentType(HttpCharsets.`UTF-8`)
}
/**
* Checks the number of available CPU cores from the JVM runtime. Used parallelise async stream operations.
*/
lazy val numberOfCpuCores: Int = sys.runtime.availableProcessors()
/**
* RDF4J Value Factory
*/
lazy val valueFactory: ValueFactory = SimpleValueFactory.getInstance()
lazy val modelFactory: ModelFactory = new LinkedHashModelFactory()
}
|
modelfabric/reactive-sparql
|
src/main/scala/ai/agnos/sparql/stream/client/SparqlClientConstants.scala
|
Scala
|
mit
| 3,210 |
package com.databricks.spark.avro
import java.io.File
import java.nio.ByteBuffer
import java.sql.Timestamp
import scala.collection.JavaConversions._
import org.apache.avro.Schema
import org.apache.avro.Schema.{Type, Field}
import org.apache.avro.file.DataFileWriter
import org.apache.avro.generic.{GenericData, GenericRecord, GenericDatumWriter}
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.TestSQLContext
import org.apache.spark.sql.test.TestSQLContext._
import org.apache.spark.sql.types._
import org.scalatest.FunSuite
class AvroSuite extends FunSuite {
val episodesFile = "src/test/resources/episodes.avro"
val testFile = "src/test/resources/test.avro"
test("request no fields") {
val df = TestSQLContext.read.avro(episodesFile)
df.registerTempTable("avro_table")
assert(TestSQLContext.sql("select count(*) from avro_table").collect().head === Row(8))
}
test("convert formats") {
TestUtils.withTempDir { dir =>
val df = TestSQLContext.read.avro(episodesFile)
df.write.parquet(dir.getCanonicalPath)
assert(TestSQLContext.read.parquet(dir.getCanonicalPath).count() === df.count)
}
}
test("rearrange internal schema") {
TestUtils.withTempDir { dir =>
val df = TestSQLContext.read.avro(episodesFile)
df.select("doctor", "title").write.avro(dir.getCanonicalPath)
}
}
test("test NULL avro type") {
TestUtils.withTempDir { dir =>
val fields = Seq(new Field("null", Schema.create(Type.NULL), "doc", null))
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("null", null)
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
intercept[SchemaConversionException] {
TestSQLContext.read.avro(s"$dir.avro")
}
}
}
test("Incorrect Union Type") {
TestUtils.withTempDir { dir =>
val BadUnionType = Schema.createUnion(List(Schema.create(Type.INT),Schema.create(Type.STRING)))
val fixedSchema = Schema.createFixed("fixed_name", "doc", "namespace", 20)
val fixedUnionType = Schema.createUnion(List(fixedSchema,Schema.create(Type.NULL)))
val fields = Seq(new Field("field1", BadUnionType, "doc", null),
new Field("fixed", fixedUnionType, "doc", null),
new Field("bytes", Schema.create(Type.BYTES), "doc", null))
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", "Hope that was not load bearing")
avroRec.put("bytes", ByteBuffer.wrap(Array[Byte]()))
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
intercept[SchemaConversionException] {
TestSQLContext.read.avro(s"$dir.avro")
}
}
}
test("Lots of nulls") {
TestUtils.withTempDir { dir =>
val schema = StructType(Seq(
StructField("binary", BinaryType, true),
StructField("timestamp", TimestampType, true),
StructField("array", ArrayType(ShortType), true),
StructField("map", MapType(StringType, StringType), true),
StructField("struct", StructType(Seq(StructField("int", IntegerType, true))))))
val rdd = sparkContext.parallelize(Seq[Row](
Row(null, new Timestamp(1), Array[Short](1,2,3), null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null)))
val df = TestSQLContext.createDataFrame(rdd, schema)
df.write.avro(dir.toString)
assert(TestSQLContext.read.avro(dir.toString).count == rdd.count)
}
}
test("Struct field type") {
TestUtils.withTempDir { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("short", ShortType, true),
StructField("byte", ByteType, true),
StructField("boolean", BooleanType, true)
))
val rdd = sparkContext.parallelize(Seq(
Row(1f, 1.toShort, 1.toByte, true),
Row(2f, 2.toShort, 2.toByte, true),
Row(3f, 3.toShort, 3.toByte, true)
))
val df = TestSQLContext.createDataFrame(rdd, schema)
df.write.avro(dir.toString)
assert(TestSQLContext.read.avro(dir.toString).count == rdd.count)
}
}
test("Array data types") {
TestUtils.withTempDir { dir =>
val testSchema = StructType(Seq(
StructField("byte_array", ArrayType(ByteType), true),
StructField("short_array", ArrayType(ShortType), true),
StructField("float_array", ArrayType(FloatType), true),
StructField("bool_array", ArrayType(BooleanType), true),
StructField("long_array", ArrayType(LongType), true),
StructField("double_array", ArrayType(DoubleType), true),
StructField("decimal_array", ArrayType(DecimalType(5, 5)), true),
StructField("bin_array", ArrayType(BinaryType), true),
StructField("timestamp_array", ArrayType(TimestampType), true),
StructField("array_array", ArrayType(ArrayType(StringType), true), true),
StructField("struct_array", ArrayType(StructType(Seq(StructField("name", StringType, true)))))))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val rdd = sparkContext.parallelize(Seq(
Row(arrayOfByte, Array[Short](1,2,3,4), Array[Float](1f, 2f, 3f, 4f),
Array[Boolean](true, false, true, false), Array[Long](1L, 2L), Array[Double](1.0, 2.0),
Array[BigDecimal](BigDecimal.valueOf(3)), Array[Array[Byte]](arrayOfByte, arrayOfByte),
Array[Timestamp](new Timestamp(0)),
Array[Array[String]](Array[String]("CSH, tearing down the walls that divide us", "-jd")),
Array[Row](Row("Bobby G. can't swim")))))
val df = TestSQLContext.createDataFrame(rdd, testSchema)
df.write.avro(dir.toString)
assert(TestSQLContext.read.avro(dir.toString).count == rdd.count)
}
}
test("write with compression") {
TestUtils.withTempDir { dir =>
val AVRO_COMPRESSION_CODEC = "spark.sql.avro.compression.codec"
val AVRO_DEFLATE_LEVEL = "spark.sql.avro.deflate.level"
val uncompressDir = s"$dir/uncompress"
val deflateDir = s"$dir/deflate"
val snappyDir = s"$dir/snappy"
val fakeDir = s"$dir/fake"
val df = TestSQLContext.read.avro(testFile)
TestSQLContext.setConf(AVRO_COMPRESSION_CODEC, "uncompressed")
df.write.avro(uncompressDir)
TestSQLContext.setConf(AVRO_COMPRESSION_CODEC, "deflate")
TestSQLContext.setConf(AVRO_DEFLATE_LEVEL, "9")
df.write.avro(deflateDir)
TestSQLContext.setConf(AVRO_COMPRESSION_CODEC, "snappy")
df.write.avro(snappyDir)
val uncompressSize = FileUtils.sizeOfDirectory(new File(uncompressDir))
val deflateSize = FileUtils.sizeOfDirectory(new File(deflateDir))
val snappySize = FileUtils.sizeOfDirectory(new File(snappyDir))
assert(uncompressSize > deflateSize)
assert(snappySize > deflateSize)
}
}
test("dsl test") {
val results = TestSQLContext.read.avro(episodesFile).select("title").collect()
assert(results.length === 8)
}
test("support of various data types") {
// This test uses data from test.avro. You can see the data and the schema of this file in
// test.json and test.avsc
val all = TestSQLContext.read.avro(testFile).collect()
assert(all.length == 3)
val str = TestSQLContext.read.avro(testFile).select("string").collect()
assert(str.map(_(0)).toSet.contains("Terran is IMBA!"))
val simple_map = TestSQLContext.read.avro(testFile).select("simple_map").collect()
assert(simple_map(0)(0).getClass.toString.contains("Map"))
assert(simple_map.map(_(0).asInstanceOf[Map[String, Some[Int]]].size).toSet == Set(2, 0))
val union0 = TestSQLContext.read.avro(testFile).select("union_string_null").collect()
assert(union0.map(_(0)).toSet == Set("abc", "123", null))
val union1 = TestSQLContext.read.avro(testFile).select("union_int_long_null").collect()
assert(union1.map(_(0)).toSet == Set(66, 1, null))
val union2 = TestSQLContext.read.avro(testFile).select("union_float_double").collect()
assert(union2.map(x => new java.lang.Double(x(0).toString)).exists(p => Math.abs(p - Math.PI) < 0.001))
val fixed = TestSQLContext.read.avro(testFile).select("fixed3").collect()
assert(fixed.map(_(0).asInstanceOf[Array[Byte]]).exists(p => p(1) == 3))
val enum = TestSQLContext.read.avro(testFile).select("enum").collect()
assert(enum.map(_(0)).toSet == Set("SPADES", "CLUBS", "DIAMONDS"))
val record = TestSQLContext.read.avro(testFile).select("record").collect()
assert(record(0)(0).getClass.toString.contains("Row"))
assert(record.map(_(0).asInstanceOf[Row](0)).contains("TEST_STR123"))
val array_of_boolean = TestSQLContext.read.avro(testFile).select("array_of_boolean").collect()
assert(array_of_boolean.map(_(0).asInstanceOf[Seq[Boolean]].size).toSet == Set(3, 1, 0))
val bytes = TestSQLContext.read.avro(testFile).select("bytes").collect()
assert(bytes.map(_(0).asInstanceOf[Array[Byte]].length).toSet == Set(3, 1, 0))
}
test("sql test") {
sql(
s"""
|CREATE TEMPORARY TABLE avroTable
|USING com.databricks.spark.avro
|OPTIONS (path "$episodesFile")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM avroTable").collect().length === 8)
}
test("conversion to avro and back") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
TestUtils.withTempDir { dir =>
val avroDir = s"$dir/avro"
TestSQLContext.read.avro(testFile).write.avro(avroDir)
TestUtils.checkReloadMatchesSaved(testFile, avroDir)
}
}
test("conversion to avro and back with namespace") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
TestUtils.withTempDir { tempDir =>
val name = "AvroTest"
val namespace = "com.databricks.spark.avro"
val parameters = Map("recordName" -> name, "recordNamespace" -> namespace)
val avroDir = tempDir + "/namedAvro"
TestSQLContext.read.avro(testFile).write.options(parameters).avro(avroDir)
TestUtils.checkReloadMatchesSaved(testFile, avroDir)
// Look at raw file and make sure has namespace info
val rawSaved = TestSQLContext.sparkContext.textFile(avroDir)
val schema = rawSaved.collect().mkString("")
assert(schema.contains(name))
assert(schema.contains(namespace))
}
}
test("converting some specific sparkSQL types to avro") {
TestUtils.withTempDir { tempDir =>
val testSchema = StructType(Seq(
StructField("Name", StringType, false),
StructField("Length", IntegerType, true),
StructField("Time", TimestampType, false),
StructField("Decimal", DecimalType(10, 10), true),
StructField("Binary", BinaryType, false)))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val cityRDD = sparkContext.parallelize(Seq(
Row("San Francisco", 12, new Timestamp(666), null, arrayOfByte),
Row("Palo Alto", null, new Timestamp(777), null, arrayOfByte),
Row("Munich", 8, new Timestamp(42), Decimal(3.14), arrayOfByte)))
val cityDataFrame = TestSQLContext.createDataFrame(cityRDD, testSchema)
val avroDir = tempDir + "/avro"
cityDataFrame.write.avro(avroDir)
assert(TestSQLContext.read.avro(avroDir).collect().length == 3)
// TimesStamps are converted to longs
val times = TestSQLContext.read.avro(avroDir).select("Time").collect()
assert(times.map(_(0)).toSet == Set(666, 777, 42))
// DecimalType should be converted to string
val decimals = TestSQLContext.read.avro(avroDir).select("Decimal").collect()
assert(decimals.map(_(0)).contains("3.14"))
// There should be a null entry
val length = TestSQLContext.read.avro(avroDir).select("Length").collect()
assert(length.map(_(0)).contains(null))
val binary = TestSQLContext.read.avro(avroDir).select("Binary").collect()
for (i <- arrayOfByte.indices) {
assert(binary(1)(0).asInstanceOf[Array[Byte]](i) == arrayOfByte(i))
}
}
}
test("support of globbed paths") {
val e1 = TestSQLContext.read.avro("*/test/resources/episodes.avro").collect()
assert(e1.length == 8)
val e2 = TestSQLContext.read.avro("src/*/*/episodes.avro").collect()
assert(e2.length == 8)
}
test("reading from invalid path throws exception") {
// Directory given has no avro files
intercept[AvroRelationException] {
TestUtils.withTempDir(dir => TestSQLContext.read.avro(dir.getCanonicalPath))
}
intercept[AvroRelationException] {
TestSQLContext.read.avro("very/invalid/path/123.avro")
}
// In case of globbed path that can't be matched to anything, another exception is thrown (and
// exception message is helpful)
intercept[AvroRelationException] {
TestSQLContext.read.avro("*/*/*/*/*/*/*/something.avro")
}
intercept[NoAvroFilesException] {
TestUtils.withTempDir { dir =>
FileUtils.touch(new File(dir, "test"))
TestSQLContext.read.avro(dir.toString)
}
}
}
test("SQL test insert overwrite") {
TestUtils.withTempDir { tempDir =>
val tempEmptyDir = s"$tempDir/sqlOverwrite"
// Create a temp directory for table that will be overwritten
new File(tempEmptyDir).mkdirs()
sql(
s"""
|CREATE TEMPORARY TABLE episodes
|USING com.databricks.spark.avro
|OPTIONS (path "$episodesFile")
""".stripMargin.replaceAll("\\n", " "))
sql(s"""
|CREATE TEMPORARY TABLE episodesEmpty
|(name string, air_date string, doctor int)
|USING com.databricks.spark.avro
|OPTIONS (path "$tempEmptyDir")
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM episodes").collect().length === 8)
assert(sql("SELECT * FROM episodesEmpty").collect().isEmpty)
sql(
s"""
|INSERT OVERWRITE TABLE episodesEmpty
|SELECT * FROM episodes
""".stripMargin.replaceAll("\\n", " "))
assert(sql("SELECT * FROM episodesEmpty").collect().length == 8)
}
}
test("test save and load") {
// Test if load works as expected
TestUtils.withTempDir { tempDir =>
val df = TestSQLContext.read.avro(episodesFile)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.avro(tempSaveDir)
val newDf = TestSQLContext.read.avro(tempSaveDir)
assert(newDf.count == 8)
}
}
}
|
JDrit/spark-avro
|
src/test/scala/com/databricks/spark/avro/AvroSuite.scala
|
Scala
|
apache-2.0
| 15,510 |
object Test {
def idiv(x: Int): Unit = x / 0
def ldiv(x: Long): Unit = x / 0
def irem(x: Int): Unit = x % 0
def lrem(x: Long): Unit = x % 0
def check(x: => Any) = try { x; sys.error("failed to throw divide by zero!") } catch { case _: ArithmeticException => }
def main(args: Array[String]) {
check(idiv(1))
check(ldiv(1L))
check(irem(1))
check(lrem(1L))
}
}
|
felixmulder/scala
|
test/files/run/t8601.scala
|
Scala
|
bsd-3-clause
| 390 |
package edison.cli
class OptionParserForTests extends EdisonOptionParser {
var errors: String = ""
override def reportError(msg: String): Unit = errors += msg + "\\n"
override def reportWarning(msg: String): Unit = failure("warnings are not expected in this test")
override def showTryHelp: Unit = ()
}
|
pawel-wiejacha/edison
|
service/src/test/scala/edison/cli/OptionParserForTests.scala
|
Scala
|
mit
| 312 |
package com.hooboy.etjo.game
import org.newdawn.slick._
import org.newdawn.slick.geom._
import com.hooboy.etjo.render._
class Player(private val etjo: ETJO, private val config: PlayerConfig, private val map: GameMap) extends WorldObject {
private var x = config.startx
private var y = config.starty
private var prevx = x
private var prevy = y
private var velocityx = 0F
private var velocityy = 0F
private val acceleration = 0.0001F
private val radius = 0.1F
override def draw(r: Renderer) {
r.drawCircle(config.color, x - radius, y - radius, 2 * radius, y + 0.999F * radius)
}
override def tick(container: GameContainer, delta: Int) {
if (config.isUpPressed(container.getInput)) {
velocityy -= acceleration * delta
}
if (config.isDownPressed(container.getInput)) {
velocityy += acceleration * delta
}
if (config.isRightPressed(container.getInput)) {
velocityx += acceleration * delta
}
if (config.isLeftPressed(container.getInput)) {
velocityx -= acceleration * delta
}
prevx = x
prevy = y
val fricction = map.getTileType(x.floor.toInt, y.floor.toInt).getFricction
y += velocityy * delta
if (WorldObjectRegistry.isBlocked(this, etjo)) {
velocityy *= -(1F / fricction)
y = prevy
}
x += velocityx * delta
if (WorldObjectRegistry.isBlocked(this, etjo)) {
velocityx *= -(1F / fricction)
x = prevx
}
velocityx *= fricction
velocityy *= fricction
println(x)
println(y)
}
override def boundingBox = Option(new Ellipse(x, y, radius, radius))
override def collideWith(other: WorldObject) {}
}
|
Z6fans/etjo
|
src/com/hooboy/etjo/game/Player.scala
|
Scala
|
mit
| 1,656 |
// Copyright 2011 Kiel Hodges
package sample2
import replicant._
import experiment1._
class MockWidgetRepository extends MockGenericRepository[Widget] { self =>
override val testDouble: WidgetRepository = new TestDouble with WidgetRepository {
def findByPartNumber(partNumber: String): Widget = self.findByPartNumber(partNumber).response
}
val findByPartNumber = method("findByPartNumber", testDouble.findByPartNumber _)
}
|
greenbar/replicant
|
scala/src/test/scala/sample2/MockWidgetRepository.scala
|
Scala
|
mit
| 442 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.recommendation
import scala.util.Random
import org.jblas.DoubleMatrix
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.util.MLUtils
/**
* Generate RDD(s) containing data for Matrix Factorization.
*
* This method samples training entries according to the oversampling factor
* 'trainSampFact', which is a multiplicative factor of the number of
* degrees of freedom of the matrix: rank*(m+n-rank).
*
* It optionally samples entries for a testing matrix using
* 'testSampFact', the percentage of the number of training entries
* to use for testing.
*
* This method takes the following inputs:
* sparkMaster (String) The master URL.
* outputPath (String) Directory to save output.
* m (Int) Number of rows in data matrix.
* n (Int) Number of columns in data matrix.
* rank (Int) Underlying rank of data matrix.
* trainSampFact (Double) Oversampling factor.
* noise (Boolean) Whether to add gaussian noise to training data.
* sigma (Double) Standard deviation of added gaussian noise.
* test (Boolean) Whether to create testing RDD.
* testSampFact (Double) Percentage of training data to use as test data.
*/
object MFDataGenerator{
def main(args: Array[String]) {
if (args.length < 2) {
println("Usage: MFDataGenerator " +
"<master> <outputDir> [m] [n] [rank] [trainSampFact] [noise] [sigma] [test] [testSampFact]")
System.exit(1)
}
val sparkMaster: String = args(0)
val outputPath: String = args(1)
val m: Int = if (args.length > 2) args(2).toInt else 100
val n: Int = if (args.length > 3) args(3).toInt else 100
val rank: Int = if (args.length > 4) args(4).toInt else 10
val trainSampFact: Double = if (args.length > 5) args(5).toDouble else 1.0
val noise: Boolean = if (args.length > 6) args(6).toBoolean else false
val sigma: Double = if (args.length > 7) args(7).toDouble else 0.1
val test: Boolean = if (args.length > 8) args(8).toBoolean else false
val testSampFact: Double = if (args.length > 9) args(9).toDouble else 0.1
val sc = new SparkContext(sparkMaster, "MFDataGenerator")
val A = DoubleMatrix.randn(m, rank)
val B = DoubleMatrix.randn(rank, n)
val z = 1 / (scala.math.sqrt(scala.math.sqrt(rank)))
A.mmuli(z)
B.mmuli(z)
val fullData = A.mmul(B)
val df = rank * (m + n - rank)
val sampSize = scala.math.min(scala.math.round(trainSampFact * df),
scala.math.round(.99 * m * n)).toInt
val rand = new Random()
val mn = m * n
val shuffled = rand.shuffle(1 to mn toIterable)
val omega = shuffled.slice(0, sampSize)
val ordered = omega.sortWith(_ < _).toArray
val trainData: RDD[(Int, Int, Double)] = sc.parallelize(ordered)
.map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1)))
// optionally add gaussian noise
if (noise) {
trainData.map(x => (x._1, x._2, x._3 + rand.nextGaussian * sigma))
}
trainData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath)
// optionally generate testing data
if (test) {
val testSampSize = scala.math
.min(scala.math.round(sampSize * testSampFact),scala.math.round(mn - sampSize)).toInt
val testOmega = shuffled.slice(sampSize, sampSize + testSampSize)
val testOrdered = testOmega.sortWith(_ < _).toArray
val testData: RDD[(Int, Int, Double)] = sc.parallelize(testOrdered)
.map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1)))
testData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath)
}
sc.stop()
}
}
|
windeye/spark
|
mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
|
Scala
|
apache-2.0
| 4,579 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.