code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package sample.stream.experiments
import rx.lang.scala.Observable
import scala.concurrent.duration._
object AmbExample {
def main(args: Array[String]): Unit = {
val observable1 = Observable.from(1 to 5)
val observable2 = observable1.map(_ + 10).delay(1.seconds)
val observable3 = observable1.map(_ + 20)
val observable = observable2.amb(observable3)
observable.subscribe { x: Int =>
println(x)
}
Thread.sleep(10000)
}
}
| pallavig/akka-examples | src/main/scala/sample/stream/experiments/AmbExample.scala | Scala | cc0-1.0 | 468 |
package Chapter11
object Operators {
// topics:
// identifiers
// infix operators
// unary operators
// assignment operators
// precedence
// associativity
// the apply and update methods
// extractors
// extractors with one or no arguments
// the unapplySeq method
// dynamic invocation
// implementing your own operators;
// operators + implicit conversions = Domain Specific Language;
// special methods: apply, update, unapply;
// unary and binary operators are method calls;
// operator precedence depends on the first char, associativity on the last;
// extractors extract tuples or sequences of values; or single value or just bool;
// types with Dynamic trait can inspect/dispatch methods names and arguments at runtime;
// identifiers
def identifiers = {
// names of variables, functions, classes, etc
// in identifiers:
// unicode chars are allowed;
// operator chars are allowed: the ASCII characters
// ! # % & * + - / : < = > ? @ \\ ^ | ~
// that are not letters, digits, underscore,
// the .,; punctuation marks, parentheses () [] {}, or quotation marks ' ` "
val √ = scala.math.sqrt _
println(s"sqrt of 2: ${√(2)}")
// The identifiers that reserved in the specification
// @ # : = _ => <- <: <% >: ⇒ ←
// you can include just about any id in backquotes
val `val` = 42
val `yield` = () => java.lang.Thread.`yield`()
}
// infix operators
def infixOperators = {
// binary operators
// a id b : operator between the arguments
// method with two parameters: implicit + explicit
val `1to10` = 1 to 10
val actually = 1.to(10)
// def to(end: Int): Range.Inclusive = Range.inclusive(self, end)
// operator chars
val ten = 1 -> 10 // tuple
val ten2 = 1.->(10)
// just define a method with desired name
class Fraction(n: Int, d: Int) {
private val num: Int = ???
private val den: Int = ???
def *(other: Fraction) = new Fraction(num * other.num, den * other.den)
}
}
// unary operators
def unaryOperators = {
// operator with one parameter: prefix, postfix
// + - ! ~ allowed as prefix operators
// converted into obj.unary_op
// e.g. -a => a.unary_-
// postfix op can lead to parsing errors
import scala.language.postfixOps
// postfix operator follows its argument
// obj op => obj.op()
val `42` = 42 toString
// 42.toString()
}
// assignment operators
def assignmentOperators = {
// in the form op=
// means mutation
// obj op= obj2 => obj = obj op obj2
// e.g. a += b => a = a + b
// <=, >=, != are NOT assignment operators
// operator starting with '=' is never an assignment op (==, ===, =/=, etc)
// if obj has a method 'op=' that method is called directly
}
// precedence
def precedence = {
// except for assignment operators, the precedence is determined by
// the FIRST character of the operator
/* postfix operators have lower precedence than infix operators
highest: an op character (# ? @ \\ ~) other then those below
* / %
+ -
:
< >
! =
&
^
|
a char that is not an operator char (alphanumeric)
lowest: assignment operators (op=)
*/
// postfix operators have lower precedence than infix operators
// a infix b postfix => (a infix b) postfix
}
// associativity
def associativity = {
// evaluated left-to-right or right-to-left
// left-associative
val left = 17 - 2 - 9 // => (17 - 2) - 9
// all operators are left-associative
// except: 'op:' // end in a colon;
// 'op=' // assignment operators
val right = 1 :: 2 :: Nil // => 1 :: (2 :: Nil)
// def ::[B >: A] (x: B): List[B] = new scala.collection.immutable.::(x, this)
// right-associative binary operator is a method of its second argument
// 2 :: Nil => Nil.::(2)
}
// the apply and update methods
def theApplyAndUpdateMethods = {
// 'function call' syntax
// f(args) => f.apply(args)
// or, if f(args) is a left side of an assignment
// f(args) = v => f.update(args, v)
val scores = scala.collection.mutable.HashMap.empty[String, Int]
scores("bob") = 100; scores.update("bob", 100)
var bobsScore = scores("bob"); bobsScore = scores.apply("bob")
// apply in companion objects: factory method
class Fraction(n: Int, d: Int) { def *(other: Fraction): Fraction = ??? }
object Fraction {
// factory
def apply(n: Int, d: Int): Fraction = new Fraction(n, d)
}
// very convenient:
val res = Fraction(3,4) * Fraction(2,5)
}
// extractors
def extractors = {
// object with 'unapply' method
// extract values, check conditions;
// pattern matching facilitator
// example
class Fraction(n: Int, d: Int) { val num: Int = ???; val den: Int = ???; def *(other: Fraction): Fraction = ??? }
object Fraction {
// factory
def apply(n: Int, d: Int): Fraction = new Fraction(n, d)
// extractor's method // n.b. Option
def unapply(arg: Fraction): Option[(Int, Int)] =
if (arg.den == 0) None else Some((arg.num, arg.den))
}
// use in variable definition: extract 'a' and 'b'
var Fraction(a, b) = Fraction(3,4) * Fraction(2,5)
// or pattern match
Fraction(1,2) match {
// n, d are bound to num,den of object
case Fraction(n, d) => println(s"nominator: ${n}, denominator: ${d}")
case _ => sys.error("oops")
}
// you can use extractors to extract information from any object (if appropriate unapply defined)
// first, last name // no Name class, just extractor (object)
object Name {
def unapply(arg: String): Option[(String, String)] = {
val pos = arg.indexOf(" ")
if (pos < 0) None
else Some((arg take pos, arg drop pos+1))
// val parts = arg.split("""\\s+""")
// if (parts.length > 1) Some((parts.head, parts.tail.mkString(" ")))
// else None
}
}
val Name(first, last) = "Cay Horstmann" // Name.unapply
println(s"$first, $last")
// case class
// every 'case class' automatically has companion with 'apply' and 'unapply'
case class Currency(value: Double, unit: String)
val money = Currency(29.95, "EUR") // apply
money match {
case Currency(amount, "USD") => println(s"$$$amount") // unapply
}
}
// extractors with one or no arguments
def extractorsWithOneOrNoArguments = {
// extractors for one component should return Option[value]
object Number {
def unapply(arg: String): Option[Int] = scala.util.Try { arg.trim.toInt }.toOption
}
val Number(n) = "123"
println(s"number $n")
// extractor for testing: return a bool
object IsCompound {
def unapply(arg: String): Boolean = arg.contains(" ")
}
object Name {
def unapply(arg: String): Option[(String, String)] = {val pos = arg.indexOf(" ");
if (pos < 0) None else Some((arg take pos, arg drop pos+1)) } }
// n.b. 'IsCompound()' syntax, no 'last' name
"John van der Linden" match {
case Name(first, IsCompound()) => println(s"first: $first, last is compound")
}
}
// the unapplySeq method
def theUnapplySeqMethod = {
// extract arbitrary sequence of values: unapplySeq => Option[Seq[T]]
object Name {
def unapplySeq(arg: String): Option[Seq[String]] =
if (arg.trim == "") None else Some(arg.trim.split("""\\s+"""))
// !do not supply both: unapply and unapplySeq with the same argument types!
// def unapply(arg: String): Option[(String, String)] = ???
}
"John van der Linden" match {
case Name(first, last) => println(s"2: $first $last")
case Name(first, middle, last) => println(s"3: $first $middle $last")
case Name(first, "van", "der", last) => println(s"van der: $first $last")
case _ => sys.error("oops")
}
}
// dynamic invocation
def dynamicInvocation = {
// best used with restraint, like operator overloading
// strongly typed language but it's possible to build dynamic dispatch subsystem;
// common problem for ORM libs: person.lastName = "Doe";
// trait scala.Dynamic: calls routed to special methods
import scala.language.dynamics // exotic feature
class Dynamite extends scala.Dynamic {
def log(msg: String): Unit = println(msg)
// * foo.method("blah") ~~> foo.applyDynamic("method")("blah")
// * foo.method(x = "blah") ~~> foo.applyDynamicNamed("method")(("x", "blah"))
// * foo.method(x = 1, 2) ~~> foo.applyDynamicNamed("method")(("x", 1), ("", 2))
// * foo.field ~~> foo.selectDynamic("field")
// * foo.varia = 10 ~~> foo.updateDynamic("varia")(10)
// * foo.arr(10) = 13 ~~> foo.selectDynamic("arr").update(10, 13)
def applyDynamic(method: String)(param: String): String = {
println(s"method: $method, parameter: $param")
"def applyDynamic(method: String)(param: String): String"
}
def applyDynamicNamed(method: String)(argpairs: (String, String)*): String = {
println(s"""method: $method, params: ${argpairs.toList.mkString(";")}""")
"def applyDynamicNamed(method: String)(argpairs: (String, String)*): String"
}
def updateDynamic(fldname: String)(value: String): Unit = {
println(s"update field: $fldname = $value")
}
def selectDynamic(fldname: String): String = {
println(s"read field: $fldname")
"def selectDynamic(fldname: String): String"
}
}
val obj = new Dynamite
// call log method
obj.log("foo")
// unnamed arguments: call applyDynamic("getFoo")(arg)
obj.getFoo("bar")
// named arguments, at least one: call applyDynamicNamed
obj.getFooExtended(file = "bar", section = "baz", "42")
// field assignment: call updateDynamic
obj.filename = "fileard"
// field accessor: call selectDynamic
val fn = obj.filename
// person.lastName = "Doe"
// val name = person.lastName
// val does = people.findByLastName("Doe")
// val johnDoes = people.find(lastName = "Doe", firstName = "John")
// book example
class DynamicProps(val props: java.util.Properties) extends Dynamic {
private def norm(name: String ) = name.replaceAll("_", ".")
def updateDynamic(name: String)(value: String) = props.setProperty(norm(name), value)
def selectDynamic(name: String) = props.getProperty(norm(name))
def applyDynamicNamed(name: String)(args: (String, String)*) = {
if (name != "add") throw new IllegalArgumentException
for ((k, v) <- args) props.setProperty(norm(k), v)
}
}
val sysProps = new DynamicProps(System.getProperties)
sysProps.username = "Fred"
val home = sysProps.java_home
sysProps.add(username="Fred", password="secret")
}
}
object Operators_Exercises {
// 1. According to the precedence rules, how are 3 + 4 -> 5 and 3 -> 4 + 5 evaluated?
def ex1 = {
/* postfix operators have lower precedence than infix operators
highest: an op character (# ? @ \\ ~) other then those below
* / %
+ -
:
< >
! =
&
^
|
a char that is not an operator char (alphanumeric)
lowest: assignment operators (op=)
*/
// + - have the same precedence // (3 + 4) -> 5 == (7, 5)
val res = 3 + 4 -> 5
assert(res == (7, 5))
// 3 -> 4 + 5 = (3, 4) + 5 = type mismatch
}
// 2. The BigInt class has a pow method, not an operator.
// Why didn’t the Scala library designers choose ** (as in Fortran) or ^ (as in Pascal) for a power operator?
def ex2 = {
val bi: BigInt = 2
val res = 2 + bi pow 2
// + precedence is higher than 'p' => (2+2) pow 2 = 16
assert(res == 16)
// if (pow = **) => res = 2 + (2 ** 2) == 6
// ^ conflicts with bitwise XOR operator
}
// 3. Implement the Fraction class with operations + - * /
// Normalize fractions,
// for example, turning 15/–6 into –5/2.
// Divide by the greatest common divisor, like this:
// class Fraction(n: Int, d: Int) {
// private val num: Int = if (d == 0) 1 else n * sign(d) / gcd(n, d);
// private val den: Int = if (d == 0) 0 else d * sign(d) / gcd(n, d);
// override def toString = s"$num/$den"
// def sign(a: Int) = if (a > 0) 1 else if (a < 0) -1 else 0
// def gcd(a: Int, b: Int): Int = if (b == 0) abs(a) else gcd(b, a % b)
// ...
// }
def ex3 = {
import scala.annotation.tailrec
class Fraction(n: Int, d: Int) {
import Fraction._
private[this] val _gcd = gcd(n.abs, d.abs)
val num: Int = if (d == 0) 1 else n * d.signum / _gcd
val den: Int = if (d == 0) 0 else d * d.signum / _gcd
override def toString = s"$num/$den"
def +(other: Fraction): Fraction = add(other)
def -(other: Fraction): Fraction = subtract(other)
def *(other: Fraction): Fraction = product(other)
def /(other: Fraction): Fraction = divide(other)
def add(other: Fraction): Fraction = _add(other)
def subtract(other: Fraction): Fraction = _add(other, plus=false)
def product(other: Fraction): Fraction = Fraction(num * other.num, den * other.den)
def divide(other: Fraction): Fraction = Fraction(num * other.den, den * other.num)
private def _add(other: Fraction, plus: Boolean = true) = {
val sign = if (plus) 1 else -1
Fraction(num*other.den + sign * (other.num * den), den * other.den)
}
}
object Fraction {
def apply(n: Int, d: Int): Fraction = new Fraction(n, d)
@tailrec def gcd(a: Int, b: Int): Int = if (b == 0) scala.math.abs(a) else gcd(b, a % b)
}
val res = Fraction(1,2) + Fraction(3,4) - Fraction(5,6) * Fraction(7,8) / Fraction(9,10)
assert(res.toString == "95/216")
}
// 4. Implement a class Money with fields for dollars and cents.
// Supply + - operators as well as comparison operators == and <
// For example, Money(1, 75) + Money(0, 50) == Money(2, 25) should be true.
// Should you also supply * and / operators? Why or why not?
def ex4 = {
// Should you also supply * and / operators? Why or why not?
// $1.25 multiply by $3.33 ? or $4.2 divide by $3.42 ? it's meaningless
def v1 = {
class Money($dollars: Int, $cents: Int) {
private val totalCents = Money.totalCents($dollars, $cents)
require(totalCents >= 0, "no support for negative amounts")
val dollars: Int = totalCents / 100
val cents: Int = totalCents % 100
def +(other: Money): Money = Money(dollars + other.dollars, cents + other.cents)
def -(other: Money): Money = Money(dollars - other.dollars, cents - other.cents)
def ==(other: Money): Boolean = totalCents == other.totalCents
def <(other: Money): Boolean = totalCents < other.totalCents
}
object Money {
def apply(dollars: Int, cents: Int): Money = new Money(dollars, cents)
def totalCents(dollars: Int, cents: Int): Int = dollars * 100 + cents
}
}
def v2 = {
class Money(private val totalCents: Int) {
val (dollars, cents) = Money.dollarsAndCents(totalCents)
def +(other: Money): Money = new Money(totalCents + other.totalCents)
def -(other: Money): Money = new Money(totalCents - other.totalCents)
def <(other: Money): Boolean = totalCents < other.totalCents
// def ==(other: Money): Boolean = totalCents == other.totalCents
final override def equals(other: Any): Boolean = other match {
case that: Money => this.totalCents == that.totalCents
case _ => false
}
override def hashCode(): Int = totalCents
}
object Money {
def apply(dollars: Int, cents: Int): Money = new Money(totalCents(dollars, cents))
def totalCents(dollars: Int, cents: Int): Int = dollars * 100 + cents
def dollarsAndCents(cents: Int): (Int, Int) = (cents / 100, cents % 100)
}
assert( Money(1, 75) + Money(0, 50) == Money(2, 25) )
}
}
// 5. Provide operators that construct an HTML table. For example,
// Table() | "Java" | "Scala" || "Gosling" | "Odersky" || "JVM" | "JVM, .NET"
// should produce
// <table><tr><td>Java</td><td>Scala</td></tr><tr><td>Gosling...
def ex5 = {
def simple = {
class Table {
private var rows = ""
override def toString: String = "<table>" + rows + closeRow + "</table>"
def |(cell: String): Table = { rows += openRow + "<td>" + cell + "</td>"; this }
def ||(cell: String): Table = { rows += openRow + "</tr><tr>"; this | cell }
private def openRow = if (rows.isEmpty) "<tr>" else ""
private def closeRow = if (rows.isEmpty) "" else "</tr>"
}
object Table { def apply(): Table = new Table() }
}
def immutable = {
case class Row(cells: Seq[String] = Seq.empty) {
def append(cell: String): Row = Row(cells :+ cell)
}
class Table(private val completeRows: Seq[Row] = Seq.empty,
private val currentRow: Row = Row(Seq.empty)) {
// interface
def |(cell: String): Table = addCell2CurrentRow(cell)
def ||(cell: String): Table = addCell2NewRow(cell)
override def toString: String = htmlTable
// implementation
def addCell2CurrentRow(cell: String): Table = {
new Table(completeRows, currentRow.append(cell))
}
def addCell2NewRow(cell: String): Table = {
new Table(completeRows :+ currentRow, Row().append(cell))
}
def htmlTable: String = { Table.htmlTable(completeRows :+ currentRow) }
}
object Table {
// interface
def apply() = new Table
// implementation
def htmlCell(cell: String): String = { "<td>" + cell + "</td>" }
def htmlRow(row: Row): String = {
val cells = for {
cell <- row.cells
} yield htmlCell(cell)
"<tr>" + cells.mkString + "</tr>"
}
def htmlTable(table: Seq[Row]): String = {
val rows = for {
row <- table
if row.cells.nonEmpty
} yield htmlRow(row)
"<table>" + rows.mkString + "</table>"
}
}
val res = Table() | "Java" | "Scala" || "Gosling" | "Odersky" || "JVM" | "JVM, .NET"
val expected =
"""
|<table>
|<tr><td>Java</td><td>Scala</td></tr>
|<tr><td>Gosling</td><td>Odersky</td></tr>
|<tr><td>JVM</td><td>JVM, .NET</td></tr>
|</table>
""".stripMargin.replaceAll("""\\n\\s*""", "").trim
assert(res.toString == expected)
}
}
// 6. Provide a class ASCIIArt whose objects contain figures such as
// /\\_/\\
// ( ' ' )
// ( - )
// | | |
// (__|__)
// Supply operators for combining two ASCIIArt figures horizontally
// /\\_/\\ -----
// ( ' ' ) / Hello \\
// ( - ) < Scala |
// | | | \\ Coder /
// (__|__) -----
//or vertically. Choose operators with appropriate precedence.
def ex6 = {
class ASCIIArt(private val art: Seq[String] = Seq.empty) {
override def toString: String = art.mkString("\\n")
// alphanumeric operators with same precedence
def before(other: ASCIIArt): ASCIIArt = { new ASCIIArt(ASCIIArt.concatLines(this.art, other.art)) }
def over(other: ASCIIArt): ASCIIArt = { new ASCIIArt(this.art ++ other.art) }
}
object ASCIIArt {
def apply(art: String = ""): ASCIIArt = {
if (art.isEmpty) new ASCIIArt() else new ASCIIArt(art.split("""\\n"""))
}
def concatLines(left: Seq[String], right: Seq[String]): Seq[String] = {
val maxlen = left.map(_.length).max
val alignedleft = left.map(_.padTo(maxlen, ' '))
val emptyleft = " ".padTo(maxlen, ' ')
val pairs = alignedleft.zipAll(right, emptyleft, "")
pairs.map { case (l, r) => l + r}
}
}
val a = ASCIIArt(""" /\\_/\\
( ' ' )
( - )
| | |
(__|__)""")
val b = ASCIIArt(""" -----
/ Hello \\
< Scala |
\\ Coder /
-----""")
println(a before b)
println(a over b)
}
// 7. Implement a class BitSequence that stores a sequence of 64 bits packed in a Long value.
// Supply apply and update operators to get and set an individual bit.
def ex7 = {
// bit manipulation in java
class BitSequence(private var bits: Long = 0) {
// interface
def apply(bit: Int): Boolean = get(bit)
def update(bit: Int, value: Boolean): Unit = set(bit, value)
// implementation
private def get(bit: Int): Boolean = {
require(bit >= 0 && bit < 64, "bit must be be between 0 and 63 inclusive")
(mask(bit) & bits) != 0L
}
private def set(bit: Int, value: Boolean): Unit = {
require(bit >= 0 && bit < 64, "bit must be be between 0 and 63 inclusive")
if (value) bits = bits | mask(bit)
else bits = bits & ~mask(bit)
}
private def mask(bit: Int): Long = 1L << bit
}
// test
val bits = new BitSequence()
for (i <- 0 until 64) {
for (j <- 0 until i) assert(bits(j))
for (j <- i until 64) assert(!bits(j))
bits(i) = true
for (j <- 0 to i) assert(bits(j))
for (j <- i+1 until 64) assert(!bits(j))
}
for (i <- 0 until 64) {
for (j <- 0 until i) assert(!bits(j))
for (j <- i until 64) assert(bits(j))
bits(i) = false
for (j <- 0 to i) assert(!bits(j))
for (j <- i+1 until 64) assert(bits(j))
}
}
// 8. Provide a class Matrix. Choose whether you want to implement 2 × 2 matrices,
// square matrices of any size, or m × n matrices.
// Supply operations + and *
// The latter should also work with scalars, for example, mat * 2.
// A single element should be accessible as mat(row, col)
def ex8 = {
// not effective but funny and generic enough;
// Double can be replaced with type parameter
implicit def int2double(i: Int): Double = i.toDouble
object Matrix {
private case class Row(values: IndexedSeq[Double]) {
override def toString: String = values.mkString(" ")
def +(other: Row): Row = Row(values.zip(other.values).map { case (a, b) => a + b })
}
def apply(matrix: Seq[Seq[Double]]): Matrix = new Matrix(matrix.map(r =>
Row(r.toArray[Double])).toArray[Row])
}
class Matrix(private val m: IndexedSeq[Matrix.Row]) {
import Matrix._
def nrows: Int = m.length
def ncols: Int = m.head.values.length
require(nrows > 0 && m.forall(row => row.values.length == ncols),
"must have some rows and rows must have the same length")
override def toString: String = m.mkString("\\n")
def apply(row: Int, col: Int): Double = {
assert(row >= 0 && row < nrows && col >= 0 && col < ncols)
m(row).values(col)
}
def +(other: Matrix): Matrix = {
require(nrows == other.nrows && ncols == other.ncols)
val newrows = m.zip(other.m).map { case (rowA, rowB) => rowA + rowB }
new Matrix(newrows)
}
def *(scalar: Double): Matrix = {
val newrows = m.map(row => Row(row.values.map(v => v * scalar)))
new Matrix(newrows)
}
def *(other: Matrix): Matrix = {
require(ncols == other.nrows)
// compute output row
def rowDotMatrix(row: Row) = {
// compute one value in row, column n
def rowDotCol(ncol: Int) = row.values.indices.map(nrow =>
row.values(nrow) * other(nrow, ncol)).sum
// produce value for each column
for (ncol <- 0 until other.ncols) yield rowDotCol(ncol)
}
// for each row in A compute row in C (for A dot B = C)
new Matrix(m.map(row => Row(rowDotMatrix(row))))
}
}
// test // TODO: add full-fledged test suite
val a = Matrix(Seq(
Seq(0, 4, -2),
Seq(-4, -3, 0)
))
val b = Matrix(Seq(
Seq(0, 1),
Seq(1, -1),
Seq(2, 3)
))
val prod = a * b
println(s"product: \\n${prod}")
// product:
//0.0 -10.0
//-3.0 -1.0
val aa = Matrix(Seq(
Seq(1, 2),
Seq(3, 4)
))
val bb = Matrix(Seq(
Seq(5, 6),
Seq(7, 8)
))
val sum = aa + bb
println(s"sum: \\n${sum}")
// sum:
//6.0 8.0
//10.0 12.0
}
// 9. Define an object PathComponents with an unapply operation class that extracts the
// directory path and file name from an java.nio.file.Path.
// For example, the file /home/cay/readme.txt has directory path /home/cay and file name readme.txt
def ex9 = {
import java.nio.file.{Path, FileSystems}
import scala.util.{Try, Success, Failure}
object PathComponents {
def unapply(path: Path): Option[(String, String)] = {
val dirpath = Option(path.getParent)
val filename = Option(path.getFileName)
dirpath.flatMap(d => filename.map(f => (d.toString, f.toString)))
}
}
// test
val PathComponents(dirpath, filename) = FileSystems.getDefault.getPath("/home/cay/readme.txt")
assert(dirpath == "/home/cay" && filename == "readme.txt")
// catch failures
FileSystems.getDefault.getPath("readme.txt") match {
case PathComponents(dirpath, filename) => println(s"dir: '${dirpath}', file: '${filename}' ")
case _ => println("oops, wrong dir/file pair")
}
Try {
val PathComponents(dirpath, filename) = FileSystems.getDefault.getPath("readme.txt")
(dirpath, filename)
} match {
case Success((dirpath, filename)) => println(s"dir: '${dirpath}', file: '${filename}' ")
case Failure(err) => println(s"error: ${err.getMessage}")
}
}
// 10. Modify the PathComponents object of the preceding exercise to instead define an
// unapplySeq operation that extracts all path segments.
// For example, for the file /home/cay/readme.txt, you should produce a sequence of three segments:
// home, cay, and readme.txt
def ex10 = {
import java.nio.file.{Path, FileSystems}
import scala.collection.JavaConverters._
object PathComponents {
def unapplySeq(path: Path): Option[Seq[String]] = {
val res = path.iterator.asScala.map(p => p.toString)
Some(res.toList)
}
}
// test
FileSystems.getDefault.getPath("/home/cay/readme.txt") match {
case PathComponents(one, two, file) => println(s"three components: ${one}, $two, $file")
case _ => println("oops, can't find match")
}
val PathComponents(root, _*) = FileSystems.getDefault.getPath("/home/cay/readme.txt")
println(s"""root: $root""")
}
// 11. Improve the dynamic property selector in Section 11.11, “Dynamic Invocation,” on page 150
// so that one doesn’t have to use underscores.
// For example, sysProps.java.home should select the property with key "java.home".
// Use a helper class, also extending Dynamic, that contains partially completed paths.
def ex11 = {
// TODO: implement updateDynamic, applyDynamicNamed, e.g:
// sysProps.username = "Fred"
// sysProps.add(username="Fred", password="secret")
import scala.language.dynamics
class DynamicProperty(val props: java.util.Properties, val name: String) extends Dynamic {
override def toString: String = Option(props.getProperty(name)).getOrElse("")
def selectDynamic(name: String): DynamicProperty = new DynamicProperty(props, s"${this.name}.$name")
}
class DynamicProps(val props: java.util.Properties) extends Dynamic {
import scala.collection.JavaConverters._
for ((k,v) <- props.asScala) println(s" '${k}' -> '${v}' ")
def selectDynamic(name: String): DynamicProperty = new DynamicProperty(props, name)
}
// test
val sysProps = new DynamicProps(System.getProperties)
val home = sysProps.java.home
println(s"java.home='${home.toString}'")
assert(home.toString == sysProps.props.getProperty("java.home"))
assert(sysProps.java.io.tmpdir.toString == sysProps.props.getProperty("java.io.tmpdir"))
}
// 12. Define a class XMLElement that models an XML element with a name, attributes, and child
// elements. Using dynamic selection and method calls, make it possible to select paths such as
// rootElement.html.body.ul(id="42").li
// which should return all li elements inside ul with id attribute 42 inside body inside html
def ex12 = {
// simple, no fancy stuff, only to implement dynamic features
import scala.language.dynamics
case class Attribute(name: String, value: String) {
override def toString: String = s"Attribute: name: ${name}, value: $value"
}
case class XMLElementSeq(list: Seq[XMLElement] = Seq.empty)
extends Dynamic with Iterable[XMLElement]{
override def iterator: Iterator[XMLElement] = list.iterator
override def toString(): String = "XMLElementSeq: " + list.mkString("\\n")
def selectDynamic(name: String): XMLElementSeq = {
val found = list.flatMap(_.children.filter(el =>
el.name.toLowerCase == name.toLowerCase))
XMLElementSeq(found)
}
def applyDynamicNamed(name: String)(args: (String, String)*): XMLElementSeq = {
val found = list.flatMap(_.children.filter(el =>
el.name.toLowerCase == name.toLowerCase &&
el.filterAttribs(args.toList).nonEmpty ))
XMLElementSeq(found)
}
}
class XMLElement(val name: String,
val attributes: Seq[Attribute] = Seq.empty,
val children: Seq[XMLElement] = Seq.empty)
extends Dynamic {
override def toString: String =
s"""XMLElement: name: $name, attributes: ${attributes.mkString(",")};
children: ${children.mkString("\\n\\t")} """.trim
def selectDynamic(name: String): XMLElementSeq =
XMLElementSeq(children.filter(_.name.toLowerCase == name.toLowerCase))
def filterAttribs(kvs: Seq[(String, String)]): Seq[Attribute] = {
attributes.filter(a => kvs.exists { case (k, v) =>
a.name.toLowerCase == k.toLowerCase && a.value.toLowerCase == v.toLowerCase
})
}
}
// TODO: add test suite
// test
val root = {
val uls = Seq(
new XMLElement("ul", Seq(Attribute("id", "42")), Seq(new XMLElement("li", Seq(Attribute("id", "37"))))),
new XMLElement("ul", Seq(Attribute("id", "43")), Seq(new XMLElement("li", Seq(Attribute("id", "73")))))
)
val body = new XMLElement("body", children = uls)
val html = new XMLElement("html", children = Seq(body))
new XMLElement("root", children = Seq(html))
}
val res = root.html.body.ul(id="42").li.toList
assert(res.length == 1 &&
res.head.name == "li" &&
res.head.attributes.head.name == "id" &&
res.head.attributes.head.value == "37")
println(s"root: ${root}")
println(s"found li: ${res}")
}
// 13. Provide an XMLBuilder class for dynamically building XML elements, as
// builder.ul(id="42", style="list-style: lower-alpha;")
// where the method name becomes the element name and the named arguments become the attributes.
// Come up with a convenient way of building nested elements.
def ex13 = {
import scala.language.dynamics
case class Attribute(name: String, value: String) {
override def toString: String = s"Attribute: name: $name, value: $value"
}
case class XMLElement(name: String,
attributes: Seq[Attribute] = Seq.empty,
children: Seq[XMLElement] = Seq.empty) {
override def toString: String = s"""XMLElement: name: $name, attributes: ${attributes.mkString(",")};
children: ${children.mkString("\\n\\t")} """.trim
// or using this.applyDynamicNamed: call builder and append to children
// or using builder.applyDynamicNamed unnamed arguments to pass children to elem
def append(child: XMLElement): XMLElement = this.copy(children = children :+ child)
}
class XMLBuilder extends Dynamic {
def applyDynamicNamed(name: String)(args: (String, String)*): XMLElement = {
XMLElement(name, args.toList.map {case(k,v) => Attribute(k, v)})
}
}
// TODO: add test suite
// test
val bldr = new XMLBuilder
val elem = bldr.ul(id="42", style="list-style: lower-alpha;")
.append(bldr.li(id="37"))
.append(bldr.li(id="73"))
println(s"ul + 2 li: ${elem}")
assert(elem.name == "ul" && elem.children.length == 2 && elem.attributes.length == 2)
}
}
| vasnake/scala-for-the-impatient | src/main/scala/Chapter11/Operators.scala | Scala | gpl-3.0 | 36,560 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.cache
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeatureType
object CacheKeyGenerator {
/**
* Encodes the simple feature type fo use as a cache key. Note that this encodes all user data, not just
* geomesa-prefixed ones
*
* @param sft simple feature type
* @return
*/
def cacheKey(sft: SimpleFeatureType): String =
s"${sft.getName};${SimpleFeatureTypes.encodeType(sft)}${SimpleFeatureTypes.encodeUserData(sft.getUserData)}"
/**
* Restores a simple feature type from a cache key
*
* @param key cache key
* @return
*/
def restore(key: String): SimpleFeatureType = {
val i = key.indexOf(';')
SimpleFeatureTypes.createImmutableType(key.substring(0, i), key.substring(i + 1))
}
}
| aheyne/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/cache/CacheKeyGenerator.scala | Scala | apache-2.0 | 1,312 |
package models.util
import play.api.libs.concurrent.Akka
import play.api.Play.current
import scala.concurrent.ExecutionContext
object ThreadPools {
implicit val analysisExecutionContext: ExecutionContext = Akka.system.dispatchers.lookup("akka.actor.analysis")
implicit val importExecutionContext: ExecutionContext = Akka.system.dispatchers.lookup("akka.actor.import")
}
| haffla/stream-compare | app/models/util/ThreadPools.scala | Scala | gpl-3.0 | 377 |
package com.sageserpent.americium
import cats.Traverse
import com.sageserpent.americium.java.CaseFactory
import _root_.java.time.Instant
import scala.collection.Factory
trait TrialsApi {
def delay[Case](delayed: => Trials[Case]): Trials[Case]
def only[Case](onlyCase: Case): Trials[Case]
def choose[Case](
firstChoice: Case,
secondChoice: Case,
otherChoices: Case*
): Trials[Case]
def choose[Case](choices: Iterable[Case]): Trials[Case]
def chooseWithWeights[Case](
firstChoice: (Int, Case),
secondChoice: (Int, Case),
otherChoices: (Int, Case)*
): Trials[Case]
def chooseWithWeights[Case](choices: Iterable[(Int, Case)]): Trials[Case]
def alternate[Case](
firstAlternative: Trials[Case],
secondAlternative: Trials[Case],
otherAlternatives: Trials[Case]*
): Trials[Case]
def alternate[Case](alternatives: Iterable[Trials[Case]]): Trials[Case]
def alternateWithWeights[Case](
firstAlternative: (Int, Trials[Case]),
secondAlternative: (Int, Trials[Case]),
otherAlternatives: (Int, Trials[Case])*
): Trials[Case]
def alternateWithWeights[Case](
alternatives: Iterable[(Int, Trials[Case])]
): Trials[Case]
def sequences[Case, Sequence[_]: Traverse](
sequenceOfTrials: Sequence[Trials[Case]]
)(implicit
factory: Factory[Case, Sequence[Case]]
): Trials[Sequence[Case]]
def complexities: Trials[Int]
def stream[Case](factory: CaseFactory[Case]): Trials[Case]
def streamLegacy[Case](factory: Long => Case): Trials[Case]
def bytes: Trials[Byte]
def integers: Trials[Int]
def integers(lowerBound: Int, upperBound: Int): Trials[Int]
def integers(
lowerBound: Int,
upperBound: Int,
shrinkageTarget: Int
): Trials[Int]
def nonNegativeIntegers: Trials[Int]
def longs: Trials[Long]
def longs(lowerBound: Long, upperBound: Long): Trials[Long]
def longs(
lowerBound: Long,
upperBound: Long,
shrinkageTarget: Long
): Trials[Long]
def nonNegativeLongs: Trials[Long]
def doubles: Trials[Double]
def booleans: Trials[Boolean]
def characters: Trials[Char]
def characters(lowerBound: Char, upperBound: Char): Trials[Char]
def characters(
lowerBound: Char,
upperBound: Char,
shrinkageTarget: Char
): Trials[Char]
def instants: Trials[Instant]
def strings: Trials[String]
}
| sageserpent-open/americium | src/main/scala/com/sageserpent/americium/TrialsApi.scala | Scala | mit | 2,405 |
package me.laiseca.restcale.http
object HttpMethod {
val DELETE = "DELETE"
val GET = "GET"
val PATCH = "PATCH"
val POST = "POST"
val PUT = "PUT"
} | xabierlaiseca/restcale | shared/src/main/scala/me/laiseca/restcale/http/HttpMethod.scala | Scala | apache-2.0 | 157 |
/*
* RichFunction0.scala
*
* To change this template, choose Tools | Template Manager
* and open the template in the editor.
*/
package scala.actors.controlflow
import scala.actors._
import scala.actors.controlflow.ControlFlow._
/**
* An extension of a <code>Function0</code> that provides support for
* asynchronous operations.
*/
trait RichFunction0[+R] extends Function0[R] {
/**
* Applies this function, capturing the result as a <code>FunctionResult</code>.
*/
def resultApply: FunctionResult[R]
/**
* Creates an asynchronous version of this function.
*/
def toAsyncFunction: AsyncFunction0[R]
}
| bingoyang/scala-otp | controlflow/src/main/scala/scala/actors/controlflow/RichFunction0.scala | Scala | bsd-3-clause | 635 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.really.json.collection
import play.api.libs.json._
import reactivemongo.api._
import reactivemongo.api.collections._
import reactivemongo.bson.BSONDocument
import reactivemongo.bson.buffer._
import reactivemongo.core.commands.{ GetLastError, LastError }
import scala.concurrent.{ ExecutionContext, Future }
/**
* A Collection that interacts with the Play JSON library, using `Reads` and `Writes`.
*/
object `package` {
implicit object JSONCollectionProducer extends GenericCollectionProducer[JsObject, Reads, Writes, JSONCollection] {
def apply(db: DB, name: String, failoverStrategy: FailoverStrategy) = new JSONCollection(db, name, failoverStrategy)
}
}
trait JSONGenericHandlers extends GenericHandlers[JsObject, Reads, Writes] {
import io.really.json.BSONFormats._
object StructureBufferReader extends BufferReader[JsObject] {
def read(buffer: ReadableBuffer) = {
Json.toJson(BSONDocument.read(buffer)).as[JsObject]
}
}
object StructureBufferWriter extends BufferWriter[JsObject] {
def write[B <: reactivemongo.bson.buffer.WritableBuffer](document: JsObject, buffer: B): B = {
BSONDocument.write(Json.fromJson[BSONDocument](document).get, buffer)
buffer
}
}
case class BSONStructureReader[T](reader: Reads[T]) extends GenericReader[JsObject, T] {
def read(doc: JsObject) = reader.reads(doc) match {
case success: JsSuccess[T] => success.get
case error: JsError => throw new NoSuchElementException(error.toString)
}
}
case class BSONStructureWriter[T](writer: Writes[T]) extends GenericWriter[T, JsObject] {
def write(t: T) = writer.writes(t).as[JsObject]
}
def StructureReader[T](reader: Reads[T]) = BSONStructureReader(reader)
def StructureWriter[T](writer: Writes[T]): GenericWriter[T, JsObject] = BSONStructureWriter(writer)
}
object JSONGenericHandlers extends JSONGenericHandlers
case class JSONDocumentReaderAsBufferReader[T](reader: Reads[T]) extends BufferReader[T] {
def read(buffer: ReadableBuffer) = reader.reads(JSONGenericHandlers.StructureBufferReader.read(buffer)).get
}
/**
* A Collection that interacts with the Play JSON library, using `Reads` and `Writes`.
*/
case class JSONCollection(
db: DB,
name: String,
failoverStrategy: FailoverStrategy
) extends GenericCollection[JsObject, Reads, Writes] with JSONGenericHandlers with CollectionMetaCommands {
def genericQueryBuilder: GenericQueryBuilder[JsObject, Reads, Writes] =
JSONQueryBuilder(this, failoverStrategy)
/**
* Inserts the document, or updates it if it already exists in the collection.
*
* @param doc The document to save.
*/
def save(doc: JsObject)(implicit ec: ExecutionContext): Future[LastError] =
save(doc, GetLastError())
/**
* Inserts the document, or updates it if it already exists in the collection.
*
* @param doc The document to save.
* @param writeConcern the [[reactivemongo.core.commands.GetLastError]] command message to send in order to control how the document is inserted. Defaults to GetLastError().
*/
def save(doc: JsObject, writeConcern: GetLastError)(implicit ec: ExecutionContext): Future[LastError] = {
import reactivemongo.bson._
import io.really.json.BSONFormats
(doc \\ "_id" match {
case _: JsUndefined => insert(doc + ("_id" -> BSONFormats.BSONObjectIDFormat.writes(BSONObjectID.generate)), writeConcern)
case id => update(Json.obj("_id" -> id), doc, writeConcern, upsert = true)
})
}
/**
* Inserts the document, or updates it if it already exists in the collection.
*
* @param doc The document to save.
* @param writeConcern the [[reactivemongo.core.commands.GetLastError]] command message to send in order to control how the document is inserted. Defaults to GetLastError().
*/
def save[T](doc: T, writeConcern: GetLastError = GetLastError())(implicit ec: ExecutionContext, writer: Writes[T]): Future[LastError] =
save(writer.writes(doc).as[JsObject], writeConcern)
}
case class JSONQueryBuilder(
collection: Collection,
failover: FailoverStrategy,
queryOption: Option[JsObject] = None,
sortOption: Option[JsObject] = None,
projectionOption: Option[JsObject] = None,
hintOption: Option[JsObject] = None,
explainFlag: Boolean = false,
snapshotFlag: Boolean = false,
commentString: Option[String] = None,
options: QueryOpts = QueryOpts()
) extends GenericQueryBuilder[JsObject, Reads, Writes] with JSONGenericHandlers {
import reactivemongo.utils.option
type Self = JSONQueryBuilder
private def empty = Json.obj()
protected def writeStructureIntoBuffer[B <: WritableBuffer](document: JsObject, buffer: B): B = {
JSONGenericHandlers.StructureBufferWriter.write(document, buffer)
}
object structureReader extends Reads[JsObject] {
def reads(json: JsValue): JsResult[JsObject] = json.validate[JsObject]
}
protected def toStructure[T](writer: Writes[T], subject: T) = writer.writes(subject)
def convert[T](reader: Reads[T]): BufferReader[T] = JSONDocumentReaderAsBufferReader(reader)
def copy(queryOption: Option[JsObject], sortOption: Option[JsObject], projectionOption: Option[JsObject], hintOption: Option[JsObject], explainFlag: Boolean, snapshotFlag: Boolean, commentString: Option[String], options: QueryOpts, failover: FailoverStrategy): JSONQueryBuilder =
JSONQueryBuilder(collection, failover, queryOption, sortOption, projectionOption, hintOption, explainFlag, snapshotFlag, commentString, options)
def merge: JsObject = {
if (!sortOption.isDefined && !hintOption.isDefined && !explainFlag && !snapshotFlag && !commentString.isDefined)
queryOption.getOrElse(Json.obj())
else {
Json.obj("$query" -> (queryOption.getOrElse(empty): JsObject)) ++
sortOption.map(o => Json.obj("$orderby" -> o)).getOrElse(empty) ++
hintOption.map(o => Json.obj("$hint" -> o)).getOrElse(empty) ++
commentString.map(o => Json.obj("$comment" -> o)).getOrElse(empty) ++
option(explainFlag, JsBoolean(true)).map(o => Json.obj("$explain" -> o)).getOrElse(empty) ++
option(snapshotFlag, JsBoolean(true)).map(o => Json.obj("$snapshot" -> o)).getOrElse(empty)
}
}
} | reallylabs/really | modules/really-core/src/main/scala/io/really/json/collection.scala | Scala | apache-2.0 | 6,845 |
package dmtest
import java.nio.file.{Path, Paths}
import dmtest.stack.{Memory, Direct, Pool}
import org.scalatest._
trait DMTestSuite extends FunSuite with BeforeAndAfterEach with BeforeAndAfterAll {
def isDebugMode: Boolean = Config.rootConfig.isEmpty
implicit class Compare[A](a: A) {
def `<>`(b: A): A = if (isDebugMode) b else a
}
override def withFixture(test: NoArgTest) = {
logger.info(s"[TEST] ${test.name}")
test()
}
def slowDevice(size: Sector): Stack = {
if (isDebugMode) {
Memory(size)
} else {
Pool.S(slowPool, size)
}
}
def fastDevice(size: Sector): Stack = {
if (isDebugMode) {
Memory(size)
} else {
Pool.S(fastPool, size)
}
}
private var slowPool: Pool = _
private var fastPool: Pool = _
override def beforeAll = {
if (!isDebugMode) {
val config = Config.rootConfig.get
slowPool = Pool(Direct(config.slowDevice))
fastPool = Pool(Direct(config.fastDevice))
}
}
override def afterAll = {
}
private var _numMount: Int = 0
private var _numTable: Int = 0
def numMount: Int = {
Shell("mount", quiet=true).split("\\n").size
}
def numTable: Int = {
Shell("dmsetup table", quiet=true).split("\\n").size
}
override def beforeEach = {
_numMount = numMount
_numTable = numTable
TempFile.mount()
}
override def afterEach = {
TempFile.umount()
if (numMount != _numMount)
logger.error("mount inconsistent before and after test")
if (numTable != _numTable)
logger.error("dmsetup table inconsistent before and after test")
}
}
| akiradeveloper/dmtest | src/test/scala/dmtest/DMTestSuite.scala | Scala | apache-2.0 | 1,617 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.services.queries
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.nodes.NodeInfo
import com.normation.rudder.domain.queries.Query
import com.normation.ldap.sdk.LDAPEntry
import net.liftweb.common.Box
import scala.collection.Seq
trait QueryProcessor {
/**
* Process a query and (hopefully) return the list of entry that match it.
* @param query - the query to process
* @param select - attributes to fetch in the ldap entry. If empty, all attributes are fetched
* @return
*/
def process(query:Query) : Box[Seq[NodeInfo]]
}
trait QueryChecker {
/**
* Each server denoted by its id is tested against query to see if it
* matches. Return the list of matching server ids.
* @param Query
* the query to test
* @param Seq[NodeId]
* list of server which have to be tested for query
* @return
* Empty or Failure in case of a error during the process
* Full(seq) with seq being the list of nodeId which verify
* query.
*/
def check(query:Query, nodeIds:Seq[NodeId]) : Box[Seq[NodeId]]
} | armeniaca/rudder | rudder-core/src/main/scala/com/normation/rudder/services/queries/QueryProcessor.scala | Scala | gpl-3.0 | 2,805 |
package io.ino.solrs
import io.ino.solrs.AsyncSolrClientMocks.mockDoRequest
import io.ino.solrs.CloudSolrServers.WarmupQueries
import io.ino.solrs.SolrMatchers.hasBaseUrlOf
import io.ino.solrs.SolrMatchers.hasQuery
import io.ino.time.Clock
import org.apache.solr.client.solrj.SolrQuery
import org.apache.solr.client.solrj.embedded.JettySolrRunner
import org.apache.solr.client.solrj.impl.CloudSolrClient
import org.apache.solr.client.solrj.impl.HttpSolrClient
import org.apache.solr.client.solrj.request.QueryRequest
import org.apache.solr.client.solrj.response.QueryResponse
import org.apache.solr.common.SolrInputDocument
import org.apache.solr.common.params.ShardParams.SHARDS
import org.apache.solr.common.params.ShardParams._ROUTE_
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Eventually.eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.Millis
import org.scalatest.time.Span
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration._
/**
* Test that starts ZK, solrRunners and our Class Under Test before all tests.
*/
class CloudSolrServersIntegrationSpec extends StandardFunSpec {
private implicit val awaitTimeout: FiniteDuration = 2 seconds
private implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = scaled(Span(20000, Millis)),
interval = scaled(Span(1000, Millis)))
private type AsyncSolrClient = io.ino.solrs.AsyncSolrClient[Future]
private var solrRunner: SolrCloudRunner = _
private def zkConnectString = solrRunner.zkAddress
private def solrServerUrls = solrRunner.solrCoreUrls
private def solrServerUrlsEnabled = solrServerUrls.map(SolrServer(_, Enabled))
private var solrJClient: CloudSolrClient = _
private var asyncSolrClients: Map[JettySolrRunner, AsyncSolrClient] = _
private var cut: CloudSolrServers[Future] = _
import io.ino.solrs.SolrUtils._
override def beforeAll(): Unit = {
// create a 2 node cluster with one collection that has 2 shards with 2 replicas
solrRunner = SolrCloudRunner.start(
numServers = 4,
collections = List(SolrCollection("collection1", replicas = 2, shards = 2)),
defaultCollection = Some("collection1")
)
solrJClient = solrRunner.solrJClient
asyncSolrClients = solrRunner.jettySolrRunners.map(jetty => jetty -> AsyncSolrClient(s"${jetty.getBaseUrl}/collection1")).toMap
eventually(Timeout(10 seconds)) {
solrJClient.deleteByQuery("*:*")
}
import scala.jdk.CollectionConverters._
solrJClient.add(someDocs.asJavaCollection)
solrJClient.commit()
}
override protected def beforeEach(): Unit = {
super.beforeEach()
// ensure that all nodes are running, and none's left in stopped state
solrRunner.jettySolrRunners.foreach { jetty =>
if(jetty.isStopped) SolrRunner.startJetty(jetty)
}
}
override def afterEach(): Unit = {
cut.shutdown()
super.afterEach()
}
override def afterAll(): Unit = {
for (asyncSolrClient <- asyncSolrClients.values) {
asyncSolrClient.shutdown()
}
solrJClient.close()
solrRunner.shutdown()
}
describe("CloudSolrServers") {
it("should list available solr instances") {
cut = new CloudSolrServers(zkConnectString, clusterStateUpdateInterval = 100 millis)
cut.setAsyncSolrClient(mockDoRequest(mock[AsyncSolrClient])(Clock.mutable))
eventually {
import Equalities.solrServerStatusEquality
cut.all should contain theSameElementsAs solrServerUrlsEnabled
}
asyncSolrClients.foreach { case(_, client) =>
eventually {
// don't use Int.MaxValue to get all docs with distributed queries,
// see also https://stackoverflow.com/questions/32046716/solr-to-get-all-records
val response = client.query(new SolrQuery("*:*").setRows(1000)).map(getIds)
await(response) should contain theSameElementsAs someDocsIds
}
}
}
it("should update available solr instances") {
cut = new CloudSolrServers(zkConnectString, clusterStateUpdateInterval = 100 millis)
cut.setAsyncSolrClient(mockDoRequest(mock[AsyncSolrClient])(Clock.mutable))
import Equalities.solrServerStatusEquality
val expectedSolrServers = solrServerUrlsEnabled
eventually {
cut.all should contain theSameElementsAs expectedSolrServers
}
SolrRunner.stopJetty(solrRunner.jettySolrRunners.head)
expectedSolrServers.head.status = Failed
eventually {
cut.all should contain theSameElementsAs expectedSolrServers
}
SolrRunner.startJetty(solrRunner.jettySolrRunners.head)
expectedSolrServers.head.status = Enabled
eventually {
cut.all should contain theSameElementsAs expectedSolrServers
}
}
it("should route requests according to _route_ param") {
cut = new CloudSolrServers(zkConnectString, defaultCollection = Some("collection1"), clusterStateUpdateInterval = 100 millis)
cut.setAsyncSolrClient(mockDoRequest(mock[AsyncSolrClient])(Clock.mutable))
import Equalities.solrServerStatusEquality
val docs = indexShardedDocs(shardKey = docNr => s"KEY$docNr")
// for each document determine in which shard replica (core) it's stored, because this reflects the decision
// of Solrs internal routing logic.
// we only want to query these replicas, i.e. route the request to them
def serverContainsDoc(url: String, id: String): Boolean = {
val client = new HttpSolrClient.Builder(url).withHttpClient(solrJClient.getHttpClient).build()
// restrict search to exactly this shard replica
client.query(new SolrQuery(s"""id:"$id"""").setParam(SHARDS, url)).getResults.getNumFound > 0
}
val expectedServersByDoc: Map[SolrInputDocument, List[String]] = docs.map { doc =>
val id = doc.getFieldValue("id").toString
val expectedServers = solrServerUrls.filter(serverContainsDoc(_, id))
doc -> expectedServers
}.toMap
expectedServersByDoc.foreach { case (doc, expectedServers) =>
val id = doc.getFieldValue("id").toString
val route = id.substring(0, id.indexOf('!') + 1)
val request = new QueryRequest(new SolrQuery("*:*").setParam(_ROUTE_, route))
cut.matching(request).get should contain theSameElementsAs expectedServers.map(SolrServer(_, Enabled))
}
// now stop a server
val solrServers = solrServerUrlsEnabled
SolrRunner.stopJetty(solrRunner.jettySolrRunners.head)
solrServers.head.status = Failed
eventually {
cut.all should contain theSameElementsAs solrServers
}
// ensure that the returned servers per route also contain the expected status
expectedServersByDoc.foreach { case (doc, expectedServers) =>
val id = doc.getFieldValue("id").toString
val route = id.substring(0, id.indexOf('!') + 1)
val request = new QueryRequest(new SolrQuery("*:*").setParam(_ROUTE_, route))
val expectedServersWithStatus = expectedServers.map {
case serverUrl if serverUrl == solrServers.head.baseUrl => SolrServer(serverUrl, Failed)
case serverUrl => SolrServer(serverUrl, Enabled)
}
cut.matching(request).get should contain theSameElementsAs expectedServersWithStatus
}
}
it("should test solr instances according to the WarmupQueries") {
val queries = Seq(new SolrQuery("foo"))
val warmupQueries = WarmupQueries(queriesByCollection = _ => queries, count = 2)
cut = new CloudSolrServers(zkConnectString, warmupQueries = Some(warmupQueries))
val standardResponsePromise = futureFactory.newPromise[QueryResponse]
val standardResponse = standardResponsePromise.future
val asyncSolrClient = mockDoRequest(mock[AsyncSolrClient], standardResponse)
cut.setAsyncSolrClient(asyncSolrClient)
// initially the list of servers should be empty
assert(cut.all.isEmpty)
// as soon as the response is set the LB should provide the servers...
standardResponsePromise.success(new QueryResponse())
eventually {
import Equalities.solrServerStatusEquality
cut.all should contain theSameElementsAs solrServerUrlsEnabled
}
// and the servers should have been tested with queries
solrServerUrlsEnabled.foreach { solrServer =>
warmupQueries.queriesByCollection("col1").foreach { q =>
verify(asyncSolrClient, times(warmupQueries.count)).doExecute[QueryResponse](hasBaseUrlOf(solrServer), hasQuery(q))(any())
}
}
}
// SOLR-5359 CloudSolrServer tries to connect to zookeeper forever when ensemble is unavailable
// + SOLR-4044 CloudSolrServer early connect problems
// -> start with zk down, it should recover at some time
// SOLR-6086 Replica active during Warming
// -> AsyncSolrClient test: query solr, restart node, all the time docs should be found as expected
// Support collection alias created after ZkStateReader has been constructed
// Solrs should serve queries when ZK is not available
// -> AsyncSolrClient test
}
private def indexShardedDocs(shardKey: Int => String): List[SolrInputDocument] = {
eventually(Timeout(10 seconds)) {
solrJClient.deleteByQuery("*:*")
}
val docs = (1 to 10).map { i =>
newInputDoc(s"${shardKey(i)}!id$i", s"doc$i", s"cat$i", i)
}.toList
import scala.jdk.CollectionConverters._
solrJClient.add(docs.asJavaCollection)
solrJClient.commit()
eventually {
val response = asyncSolrClients.values.head.query(new SolrQuery("*:*").setRows(10)).map(getIds)
await(response) should contain theSameElementsAs docs.map(_.getFieldValue("id").toString)
}
docs
}
}
| inoio/solrs | src/test/scala/io/ino/solrs/CloudSolrServersIntegrationSpec.scala | Scala | apache-2.0 | 10,039 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.util
import java.nio.ByteBuffer
import java.io.{IOException, ObjectOutputStream, EOFException, ObjectInputStream}
import java.nio.channels.Channels
/**
* A wrapper around a java.nio.ByteBuffer that is serializable through Java serialization, to make
* it easier to pass ByteBuffers in case class messages.
*/
private[spark]
class SerializableBuffer(@transient var buffer: ByteBuffer) extends Serializable {
def value = buffer
private def readObject(in: ObjectInputStream) {
val length = in.readInt()
buffer = ByteBuffer.allocate(length)
var amountRead = 0
val channel = Channels.newChannel(in)
while (amountRead < length) {
val ret = channel.read(buffer)
if (ret == -1) {
throw new EOFException("End of file before fully reading buffer")
}
amountRead += ret
}
buffer.rewind() // Allow us to read it later
}
private def writeObject(out: ObjectOutputStream) {
out.writeInt(buffer.limit())
if (Channels.newChannel(out).write(buffer) != buffer.limit()) {
throw new IOException("Could not fully write buffer to output stream")
}
buffer.rewind() // Allow us to write it again later
}
}
| rjpower/spark | core/src/main/scala/spark/util/SerializableBuffer.scala | Scala | apache-2.0 | 1,995 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import java.net.URL
/**
* This file has types that are used in parsing command line arguments to Runner.
*
* @author Bill Venners
*/
private[tools] sealed abstract class ReporterConfiguration
private[tools] case class GraphicReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class StandardOutReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class StandardErrReporterConfiguration(configSet: Set[ReporterConfigParam]) extends ReporterConfiguration
private[tools] case class FileReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class JunitXmlReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class DashboardReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String, numOldFilesToKeep: Int) extends ReporterConfiguration
private[tools] case class XmlReporterConfiguration(configSet: Set[ReporterConfigParam], fileName: String) extends ReporterConfiguration
private[tools] case class HtmlReporterConfiguration(configSet: Set[ReporterConfigParam], directory: String, cssFileName: Option[URL]) extends ReporterConfiguration
private[tools] case class CustomReporterConfiguration(configSet: Set[ReporterConfigParam], reporterClass: String) extends ReporterConfiguration
private[tools] case class XmlSocketReporterConfiguration(host: String, port: Int) extends ReporterConfiguration
private[tools] case class SocketReporterConfiguration(host: String, port: Int) extends ReporterConfiguration
// If there were no fileReporterSpecList or customReporterSpecList specified, you get Nil
// If there were no graphicReporterSpec, standardOutReporterSpec, or standardErrReporterSpec, you get None
private[tools] case class ReporterConfigurations(
val graphicReporterConfiguration: Option[GraphicReporterConfiguration],
val fileReporterConfigurationList: List[FileReporterConfiguration],
val junitXmlReporterConfigurationList: List[JunitXmlReporterConfiguration],
val dashboardReporterConfigurationList: List[DashboardReporterConfiguration],
val xmlReporterConfigurationList: List[XmlReporterConfiguration],
val standardOutReporterConfiguration: Option[StandardOutReporterConfiguration],
val standardErrReporterConfiguration: Option[StandardErrReporterConfiguration],
val htmlReporterConfigurationList: List[HtmlReporterConfiguration],
val customReporterConfigurationList: List[CustomReporterConfiguration],
val xmlSocketReporterConfigurationList: List[XmlSocketReporterConfiguration],
val socketReporterConfigurationList: List[SocketReporterConfiguration]
) extends Seq[ReporterConfiguration] {
val reporterConfigurationList =
List.concat[ReporterConfiguration](
graphicReporterConfiguration.toList,
fileReporterConfigurationList,
junitXmlReporterConfigurationList,
dashboardReporterConfigurationList,
xmlReporterConfigurationList,
standardOutReporterConfiguration.toList,
standardErrReporterConfiguration.toList,
htmlReporterConfigurationList,
customReporterConfigurationList,
xmlSocketReporterConfigurationList,
socketReporterConfigurationList
)
// Need to add the null pointer checks, or later, NotNull
override def length = reporterConfigurationList.length
// override def elements = reporterConfigurationList.iterator
override def iterator = reporterConfigurationList.iterator // For 2.8
override def apply(i: Int) = reporterConfigurationList(i)
}
| svn2github/scalatest | src/main/scala/org/scalatest/tools/ReporterConfiguration.scala | Scala | apache-2.0 | 4,304 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.dependencies
import java.io.{File, PrintStream}
import java.net.{URI, URL}
import java.nio.file.Files
import scala.util.Try
import org.apache.toree.utils.FileUtils
abstract class DependencyDownloader {
/**
* Retrieves the dependency and all of its dependencies as jars.
*
* @param groupId The group id associated with the main dependency
* @param artifactId The id of the dependency artifact
* @param version The version of the main dependency
* @param transitive If true, downloads all dependencies of the specified
* dependency
* @param excludeBaseDependencies If true, will exclude any dependencies
* included in the build of the kernel
* @param ignoreResolutionErrors If true, ignores any errors on resolving
* dependencies and attempts to download all
* successfully-resolved dependencies
* @param extraRepositories Additional repositories to use only for this
* dependency
* @param verbose If true, prints out additional information
* @param trace If true, prints trace of download process
*
* @return The sequence of URIs represented downloaded (even from cache)
* dependencies
*/
def retrieve(
groupId: String,
artifactId: String,
version: String,
transitive: Boolean = true,
excludeBaseDependencies: Boolean = true,
ignoreResolutionErrors: Boolean = true,
extraRepositories: Seq[(URL, Option[Credentials])] = Nil,
verbose: Boolean = false,
trace: Boolean = false
): Seq[URI]
/**
* Sets the printstream to log to.
*
* @param printStream The new print stream to use for output logging
*/
def setPrintStream(printStream: PrintStream): Unit
/**
* Adds the specified resolver url as an additional search option.
*
* @param url The url of the repository
*/
def addMavenRepository(url: URL, credentials: Option[Credentials]): Unit
/**
* Remove the specified resolver url from the search options.
*
* @param url The url of the repository
*/
def removeMavenRepository(url: URL): Unit
/**
* Returns a list of all repositories used by the downloader.
*
* @return The list of repositories as URIs
*/
def getRepositories: Seq[URI]
/**
* Sets the directory where all downloaded jars will be stored.
*
* @param directory The directory to use
*
* @return True if successfully set directory, otherwise false
*/
def setDownloadDirectory(directory: File): Boolean
/**
* Returns the current directory where dependencies will be downloaded.
*
* @return The directory as a string
*/
def getDownloadDirectory: String
/**
* Assigns credentials to the right repository and build tuples
*
* @param repository Sequence of repository urls
* @param credentials Sequence of credential filenames
* @return
*/
def resolveRepositoriesAndCredentials(repository: List[String], credentials: List[String]): List[(URL, Option[Credentials])] = {
val extraRepositories = repository.map(u => (u, Try(new URL(u))))
// Print error information
// extraRepositories.filter(_._2.isFailure).map(_._1)
// .foreach(u => printStream.println(s"Ignoring invalid URL $u"))
// match up credentials with repositories
val repositories = extraRepositories.flatMap(_._2.toOption)
val authentication = credentials
.map(f => new File(f))
.map(Credentials(_))
.map(c => (c.host, c)).toMap
val repositoriesWithCreds = repositories.map(u => (u, authentication.get(u.getHost)))
repositoriesWithCreds
}
}
object DependencyDownloader {
/** Default Maven repository to use with downloaders. */
val DefaultMavenRepository = new URL("https://repo1.maven.org/maven2")
/** Default download directory for dependencies. */
val DefaultDownloadDirectory =
FileUtils.createManagedTempDirectory("toree-dependency-downloads")
}
| chipsenkbeil/incubator-toree | kernel-api/src/main/scala/org/apache/toree/dependencies/DependencyDownloader.scala | Scala | apache-2.0 | 4,858 |
package de.ljfa.advbackport.logic
import net.minecraft.block.Block
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagList
import net.minecraftforge.common.util.Constants
object ItemLogic {
def canDestroy(tool: ItemStack, block: Block): Boolean = checkList("CanDestroy", tool, block)
def canPlaceOn(stack: ItemStack, block: Block): Boolean = checkList("CanPlaceOn", stack, block)
def checkList(listName: String, stack: ItemStack, block: Block): Boolean = {
stack.getTagCompound match {
case null => false
case tag if tag.hasKey(listName, Constants.NBT.TAG_LIST) => {
val list = asStringList(tag.getTagList(listName, Constants.NBT.TAG_STRING))
list exists { Block.getBlockFromName(_) eq block }
}
case _ => false
}
}
def asStringList(nbtList: NBTTagList) = for(i <- 0 until nbtList.tagCount) yield nbtList getStringTagAt i
}
| ljfa-ag/Adventure-Backport | src/main/scala/de/ljfa/advbackport/logic/ItemLogic.scala | Scala | mit | 968 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.guacamole.variants
import com.esotericsoftware.kryo.io.{ Input, Output }
import com.esotericsoftware.kryo.{ Kryo, Serializer }
import org.bdgenomics.guacamole.Bases
import org.bdgenomics.guacamole.Bases.BasesOrdering
case class Allele(refBases: Seq[Byte], altBases: Seq[Byte]) extends Ordered[Allele] {
lazy val isVariant = refBases != altBases
override def toString: String = "Allele(%s,%s)".format(Bases.basesToString(refBases), Bases.basesToString(altBases))
override def equals(other: Any): Boolean = other match {
case otherAllele: Allele => refBases == otherAllele.refBases && altBases == otherAllele.altBases
case _ => false
}
def ==(other: Allele): Boolean = equals(other)
override def compare(that: Allele): Int = {
BasesOrdering.compare(refBases, that.refBases) match {
case 0 => BasesOrdering.compare(altBases, that.altBases)
case x => x
}
}
}
class AlleleSerializer extends Serializer[Allele] {
def write(kryo: Kryo, output: Output, obj: Allele) = {
output.writeInt(obj.refBases.length, true)
output.writeBytes(obj.refBases.toArray)
output.writeInt(obj.altBases.length, true)
output.writeBytes(obj.altBases.toArray)
}
def read(kryo: Kryo, input: Input, klass: Class[Allele]): Allele = {
val referenceBasesLength = input.readInt(true)
val referenceBases: Seq[Byte] = input.readBytes(referenceBasesLength)
val alternateLength = input.readInt(true)
val alternateBases: Seq[Byte] = input.readBytes(alternateLength)
Allele(referenceBases, alternateBases)
}
}
trait HasAlleleSerializer {
lazy val alleleSerializer: AlleleSerializer = new AlleleSerializer
}
| ryan-williams/guacamole | src/main/scala/org/bdgenomics/guacamole/variants/Allele.scala | Scala | apache-2.0 | 2,495 |
import sbt._
import sbt.Keys._
import java.util.Properties
object ScctBuild extends Build {
lazy val root = Project(id = "scct", base = file(".")) settings (instrumentSettings :_*)
val scctReportDir = SettingKey[File]("scct-report-dir")
lazy val Scct = config("scct")
lazy val ScctTest = config("scct-test") extend Scct
lazy val instrumentSettings =
inConfig(Scct)(Defaults.compileSettings) ++
inConfig(ScctTest)(Defaults.testSettings) ++
Seq(
scctReportDir <<= crossTarget / "coverage-report",
ivyConfigurations ++= Seq(Scct, ScctTest),
sources in Scct <<= (sources in Compile),
sourceDirectory in Scct <<= (sourceDirectory in Compile),
scalacOptions in Scct <++= (name in Scct, baseDirectory in Scct, artifactPath in packageBin in Compile, packageBin in Compile) map { (n, b, jar, _) => Seq(
"-Xplugin:"+jar.getAbsolutePath,
"-P:scct:projectId:"+n,
"-P:scct:basedir:"+b
)},
sources in ScctTest <<= (sources in Test),
sourceDirectory in ScctTest <<= (sourceDirectory in Test),
externalDependencyClasspath in Scct <<= (externalDependencyClasspath in Compile),
externalDependencyClasspath in ScctTest <<= Classpaths.concat(externalDependencyClasspath in ScctTest, externalDependencyClasspath in Test),
internalDependencyClasspath in Scct <<= (internalDependencyClasspath in Compile),
internalDependencyClasspath in ScctTest <<= (internalDependencyClasspath in Test, internalDependencyClasspath in ScctTest, classDirectory in Compile) map { (testDeps, scctDeps, oldClassDir) =>
scctDeps ++ testDeps.filter(_.data != oldClassDir)
},
testOptions in ScctTest <+= (name in Scct, baseDirectory in Scct, scalaSource in Scct, classDirectory in ScctTest, scctReportDir) map { (n, base, src, testClassesDir, reportDir) =>
Tests.Setup { () =>
val props = new Properties()
props.setProperty("scct.basedir", base.getAbsolutePath)
props.setProperty("scct.report.hook", "system.property")
props.setProperty("scct.project.name", n)
props.setProperty("scct.report.dir", reportDir.getAbsolutePath)
props.setProperty("scct.source.dir", src.getAbsolutePath)
val out = testClassesDir / "scct.properties"
IO.write(props, "Env for scct test run and report generation", out)
}
},
testOptions in ScctTest <+= (state, name in Scct) map { (s, n) =>
Tests.Cleanup { () =>
val reportProperty = "scct.%s.fire.report".format(n)
System.setProperty(reportProperty, "true")
val maxSleep = compat.Platform.currentTime + 60L*1000L
while (sys.props(reportProperty) != "done" && compat.Platform.currentTime < maxSleep) Thread.sleep(200L)
if (sys.props(reportProperty) != "done") println("Timed out waiting for scct coverage report")
}
},
Keys.test in Scct <<= (Keys.test in ScctTest)
)
}
| non/scct | project/Build.scala | Scala | apache-2.0 | 2,987 |
package com.scalaAsm.x86
package Instructions
package General
// Description: Convert Byte to Word
// Category: general/conver
trait CBW extends InstructionDefinition {
val mnemonic = "CBW"
}
object CBW extends ZeroOperands[CBW] with CBWImpl
trait CBWImpl extends CBW {
implicit object _0 extends NoOp{
val opcode: OneOpcode = 0x98
override def hasImplicitOperand = true
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/General/CBW.scala | Scala | apache-2.0 | 397 |
package mesosphere.marathon
package core.deployment.impl
import akka.Done
import akka.actor.{Actor, ActorRef, Cancellable, PoisonPill, Props, UnhandledMessage}
import akka.stream.scaladsl.Source
import akka.testkit.TestActorRef
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.condition.Condition
import mesosphere.marathon.core.condition.Condition.{Killed, Running}
import mesosphere.marathon.core.deployment.{DeploymentPlan, DeploymentStep}
import mesosphere.marathon.core.event._
import mesosphere.marathon.core.health.{MarathonHttpHealthCheck, PortReference}
import mesosphere.marathon.core.instance.Instance.InstanceState
import mesosphere.marathon.core.instance.update.InstanceChangedEventsGenerator
import mesosphere.marathon.core.instance.{Goal, GoalChangeReason, Instance, TestInstanceBuilder}
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.readiness.{ReadinessCheck, ReadinessCheckExecutor, ReadinessCheckResult}
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.raml.Raml
import mesosphere.marathon.state._
import mesosphere.marathon.util.CancellableOnce
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Span}
import scala.concurrent.{Future, Promise}
class TaskReplaceActorTest extends AkkaUnitTest with Eventually {
"TaskReplaceActor" should {
"replace old tasks without health checks" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 5,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
upgradeStrategy = UpgradeStrategy(0.0)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 5) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
for (_ <- 0 until newApp.instances)
ref ! f.instanceChanged(newApp, Running)
promise.future.futureValue
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"not kill new and already started tasks" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 5,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
upgradeStrategy = UpgradeStrategy(0.0)
)
val instanceA = f.runningInstance(app)
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 4) returns Future.successful(Done)
val instanceC = f.runningInstance(newApp)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val ref = f.replaceActor(newApp, promise)
watch(ref)
// Report all remaining instances as running.
for (_ <- 0 until (newApp.instances - 1))
ref ! f.instanceChanged(newApp, Running)
promise.future.futureValue
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker, never).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"replace old tasks with health checks" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 5,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(0.0)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 5) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
for (_ <- 0 until newApp.instances)
ref ! f.healthChanged(newApp, healthy = true)
promise.future.futureValue
verify(f.queue).resetDelay(newApp)
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"replace and scale down from more than new minCapacity" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 2,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
upgradeStrategy = UpgradeStrategy(minimumHealthCapacity = 1.0)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
ref ! f.instanceChanged(newApp, Running)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
ref ! f.instanceChanged(newApp, Running)
eventually { app: AppDefinition => verify(f.queue, times(2)).add(app) }
promise.future.futureValue
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
verify(f.queue).resetDelay(newApp)
expectTerminated(ref)
}
"replace tasks with minimum running number of tasks" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 3,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(0.5)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 3) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// all new tasks are queued directly
eventually { app: AppDefinition => verify(f.queue, times(3)).add(app) }
// ceiling(minimumHealthCapacity * 3) = 2 are left running
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
// first new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
// second new task becomes healthy and the last old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
// third new task becomes healthy
ref ! f.healthChanged(newApp, healthy = true)
promise.future.futureValue
// all old tasks are killed
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"respect upgrade strategy" in {
Given("An app with health checks, 1 task of new version already started but not passing health checks yet")
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 2,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(0.50, 0)
)
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
val instanceA = f.healthyInstance(app)
// instance B was already started during deployment started by a previous leader but is not healthy yet
val instanceB = f.healthyInstance(newApp, healthy = false)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
val promise = Promise[Unit]()
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// deployment should not complete within 1s, that's also a good way to wait for 1s to check for next assertion
assert(promise.future.isReadyWithin(timeout = Span(1000, Millis)) == false)
// that's the core of this test: we haven't replaced task yet, see MARATHON-8716
verify(f.tracker, never).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
// we can now make this new instance healthy
ref ! f.healthChanged(newApp, healthy = true)
// and we can check deployment continue as usual
eventually {
verify(f.tracker, times(1)).setGoal(any, any, any)
()
}
eventually {
verify(f.queue, times(1)).add(newApp, 1)
()
}
// and we don't need to wait for end of deployment
}
"replace tasks during rolling upgrade *without* over-capacity" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 3,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(0.5, 0.0)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// only one task is queued directly
val queueOrder = org.mockito.Mockito.inOrder(f.queue)
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// ceiling(minimumHealthCapacity * 3) = 2 are left running
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
// first new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// second new task becomes healthy and the last old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// third new task becomes healthy
ref ! f.healthChanged(newApp, healthy = true)
promise.future.futureValue
// all old tasks are killed
verify(f.queue).resetDelay(newApp)
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"replace tasks during rolling upgrade *with* minimal over-capacity" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 3,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(1.0, 0.0) // 1 task over-capacity is ok
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// only one task is queued directly, all old still running
val queueOrder = org.mockito.Mockito.inOrder(f.queue)
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// first new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// second new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// third new task becomes healthy and last old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
queueOrder.verify(f.queue, never).add(_: AppDefinition, 1)
promise.future.futureValue
// all old tasks are killed
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"replace tasks during rolling upgrade with 2/3 over-capacity" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 3,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(1.0, 0.7)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(eq(newApp), any) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// two tasks are queued directly, all old still running
val queueOrder = org.mockito.Mockito.inOrder(f.queue)
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 2)
}
// first new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
// second new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
queueOrder.verify(f.queue, never).add(_: AppDefinition, 1)
// third new task becomes healthy and last old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
queueOrder.verify(f.queue, never).add(_: AppDefinition, 1)
promise.future.futureValue
// all old tasks are killed
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
expectTerminated(ref)
}
"downscale tasks during rolling upgrade with 1 over-capacity" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 3,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck(portIndex = Some(PortReference(0)))),
upgradeStrategy = UpgradeStrategy(minimumHealthCapacity = 1.0, maximumOverCapacity = 0.3)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
val instanceC = f.runningInstance(app)
val instanceD = f.runningInstance(app)
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB, instanceC, instanceD))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
f.tracker.get(instanceC.instanceId) returns Future.successful(Some(instanceC))
f.tracker.get(instanceD.instanceId) returns Future.successful(Some(instanceD))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// one task is killed directly because we are over capacity
eventually {
verify(f.tracker).setGoal(any, eq(Goal.Decommissioned), eq(GoalChangeReason.Upgrading))
()
}
// the kill is confirmed (see answer above) and the first new task is queued
eventually {
verify(f.queue, times(1)).resetDelay(newApp)
}
// first new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(2)).setGoal(any, any, any)
()
}
eventually {
verify(f.queue, times(2)).add(newApp, 1)
()
}
// second new task becomes healthy and another old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(3)).setGoal(any, any, any)
()
}
eventually {
verify(f.queue, times(3)).add(newApp, 1)
()
}
// third new task becomes healthy and last old task is killed
ref ! f.healthChanged(newApp, healthy = true)
eventually {
verify(f.tracker, times(4)).setGoal(any, any, any)
()
}
promise.future.futureValue
// all remaining old tasks are killed
verify(f.tracker).setGoal(instanceD.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceC.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.queue, times(3)).add(newApp, 1)
expectTerminated(ref)
}
"stop the actor if all tasks are replaced already" in {
Given("An app without health checks and readiness checks, as well as 2 tasks of this version")
val f = new Fixture
val app = AppDefinition(id = AbsolutePathId("/myApp"), instances = 2, role = "*")
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
val promise = Promise[Unit]()
When("The replace actor is started")
val ref = f.replaceActor(app, promise)
watch(ref)
Then("The replace actor finishes immediately")
expectTerminated(ref)
promise.future.futureValue
}
"wait for readiness checks if all tasks are replaced already" in {
Given("An app without health checks but readiness checks, as well as 1 task of this version")
val f = new Fixture
val check = ReadinessCheck()
val port = PortDefinition(0, name = Some(check.portName))
val app =
AppDefinition(id = AbsolutePathId("/myApp"), role = "*", instances = 1, portDefinitions = Seq(port), readinessChecks = Seq(check))
val instance = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instance))
f.tracker.get(instance.instanceId) returns Future.successful(Some(instance))
val (_, readyCheck) = f.readinessResults(instance, check.name, ready = true)
f.readinessCheckExecutor.execute(any[ReadinessCheckExecutor.ReadinessCheckSpec]) returns readyCheck
val promise = Promise[Unit]()
When("The replace actor is started")
f.replaceActor(app, promise)
Then("It needs to wait for the readiness checks to pass")
promise.future.futureValue
}
"wait for the readiness checks and health checks if all tasks are replaced already" in {
Given("An app without health checks but readiness checks, as well as 1 task of this version")
val f = new Fixture
val ready = ReadinessCheck()
val port = PortDefinition(0, name = Some(ready.portName))
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 1,
portDefinitions = Seq(port),
readinessChecks = Seq(ready),
healthChecks = Set(MarathonHttpHealthCheck())
)
val instance = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instance))
f.tracker.get(instance.instanceId) returns Future.successful(Some(instance))
val (_, readyCheck) = f.readinessResults(instance, ready.name, ready = true)
f.readinessCheckExecutor.execute(any[ReadinessCheckExecutor.ReadinessCheckSpec]) returns readyCheck
val promise = Promise[Unit]()
When("The replace actor is started")
val ref = f.replaceActor(app, promise)
watch(ref)
ref ! InstanceHealthChanged(instance.instanceId, app.version, app.id, healthy = Some(true))
Then("It needs to wait for the readiness checks to pass")
expectTerminated(ref)
promise.future.futureValue
}
"wait until the tasks are killed" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 5,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
upgradeStrategy = UpgradeStrategy(0.0)
)
val instanceA = f.runningInstance(app)
val instanceB = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA, instanceB))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
f.tracker.get(instanceB.instanceId) returns Future.successful(Some(instanceB))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 5) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
for (_ <- 0 until newApp.instances)
ref ! f.instanceChanged(newApp, Running)
verify(f.queue, timeout(1000)).resetDelay(newApp)
promise.future.futureValue
verify(f.tracker).setGoal(instanceA.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
verify(f.tracker).setGoal(instanceB.instanceId, Goal.Decommissioned, GoalChangeReason.Upgrading)
}
"wait for health and readiness checks for new tasks" in {
val f = new Fixture
val app = AppDefinition(
id = AbsolutePathId("/myApp"),
role = "*",
instances = 1,
versionInfo = VersionInfo.forNewConfig(Timestamp(0)),
healthChecks = Set(MarathonHttpHealthCheck()),
readinessChecks = Seq(ReadinessCheck()),
upgradeStrategy = UpgradeStrategy(1.0, 1.0)
)
val instance = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instance))
f.tracker.get(instance.instanceId) returns Future.successful(Some(instance))
val promise = Promise[Unit]()
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, promise)
watch(ref)
// only one task is queued directly
val queueOrder = org.mockito.Mockito.inOrder(f.queue)
eventually {
queueOrder.verify(f.queue).add(_: AppDefinition, 1)
}
val newInstanceId = Instance.Id.forRunSpec(newApp.id)
val newTaskId = Task.Id(newInstanceId)
//unhealthy
ref ! InstanceHealthChanged(newInstanceId, newApp.version, newApp.id, healthy = Some(false))
eventually {
verify(f.tracker, never).setGoal(any, any, any)
()
}
//unready
ref ! ReadinessCheckResult(ReadinessCheck.DefaultName, newTaskId, ready = false, None)
eventually {
verify(f.tracker, never).setGoal(any, any, any)
()
}
//healthy
ref ! InstanceHealthChanged(newInstanceId, newApp.version, newApp.id, healthy = Some(true))
eventually {
verify(f.tracker, never).setGoal(any, any, any)
()
}
//ready
ref ! ReadinessCheckResult(ReadinessCheck.DefaultName, newTaskId, ready = true, None)
eventually {
verify(f.tracker, once).setGoal(any, any, any)
()
}
promise.future.futureValue
}
// regression DCOS-54927
"only handle InstanceChanged events of its own RunSpec" in {
val f = new Fixture
val app =
AppDefinition(id = AbsolutePathId("/myApp"), role = "*", instances = 1, versionInfo = VersionInfo.forNewConfig(Timestamp(0)))
val instanceA = f.runningInstance(app)
f.tracker.specInstances(eq(app.id), eq(true))(any) returns Future.successful(Seq(instanceA))
f.tracker.get(instanceA.instanceId) returns Future.successful(Some(instanceA))
val newApp = app.copy(versionInfo = VersionInfo.forNewConfig(Timestamp(1)))
f.queue.add(newApp, 1) returns Future.successful(Done)
val ref = f.replaceActor(newApp, Promise[Unit]())
watch(ref)
// Test that Instance changed events for a different RunSpec are not handled by the actor
import akka.testkit.TestProbe
val subscriber = TestProbe()
system.eventStream.subscribe(subscriber.ref, classOf[UnhandledMessage])
val otherApp = AppDefinition(id = AbsolutePathId("/some-other-app"), role = "*")
ref ! f.instanceChanged(otherApp, Killed)
subscriber.expectMsgClass(classOf[UnhandledMessage])
ref ! f.instanceChanged(otherApp, Running)
subscriber.expectMsgClass(classOf[UnhandledMessage])
ref ! PoisonPill
}
}
class Fixture {
val deploymentsManager: TestActorRef[Actor] = TestActorRef[Actor](Props.empty)
val deploymentStatus = DeploymentStatus(Raml.toRaml(DeploymentPlan.empty), Raml.toRaml(DeploymentStep(Seq.empty)))
val queue: LaunchQueue = mock[LaunchQueue]
val tracker: InstanceTracker = mock[InstanceTracker]
val readinessCheckExecutor: ReadinessCheckExecutor = mock[ReadinessCheckExecutor]
val hostName = "host.some"
val hostPorts = Seq(123)
tracker.setGoal(any, any, any) answers { args =>
def sendKilled(instance: Instance, goal: Goal): Unit = {
val updatedInstance = instance.copy(state = instance.state.copy(condition = Condition.Killed, goal = goal))
val events = InstanceChangedEventsGenerator.events(updatedInstance, None, Timestamp(0), Some(instance.state))
events.foreach(system.eventStream.publish)
}
val instanceId = args(0).asInstanceOf[Instance.Id]
val maybeInstance = tracker.get(instanceId).futureValue
maybeInstance.map { instance =>
val goal = args(1).asInstanceOf[Goal]
sendKilled(instance, goal)
Future.successful(Done)
}.getOrElse {
Future.failed(throw new IllegalArgumentException(s"instance $instanceId is not ready in instance tracker when querying"))
}
}
def runningInstance(app: AppDefinition): Instance = {
TestInstanceBuilder
.newBuilder(app.id, version = app.version)
.addTaskWithBuilder()
.taskRunning()
.withNetworkInfo(hostName = Some(hostName), hostPorts = hostPorts)
.build()
.getInstance()
}
def healthyInstance(app: AppDefinition, healthy: Boolean = true): Instance = {
TestInstanceBuilder
.newBuilderForRunSpec(app, now = app.version)
.addTaskWithBuilder()
.taskRunning()
.asHealthyTask(healthy)
.withNetworkInfo(hostName = Some(hostName), hostPorts = hostPorts)
.build()
.getInstance()
}
def readinessResults(
instance: Instance,
checkName: String,
ready: Boolean
): (Cancellable, Source[ReadinessCheckResult, Cancellable]) = {
val cancellable = new CancellableOnce(() => ())
val source = Source(
instance.tasksMap.values.map(task => ReadinessCheckResult(checkName, task.taskId, ready, None)).toList
).mapMaterializedValue { _ => cancellable }
(cancellable, source)
}
def instanceChanged(app: AppDefinition, condition: Condition): InstanceChanged = {
val instanceId = Instance.Id.forRunSpec(app.id)
val state = InstanceState(Condition.Running, Timestamp.now(), None, None, Goal.Running)
val instance: Instance = Instance(instanceId, None, state, Map.empty, app, None, "*")
InstanceChanged(instanceId, app.version, app.id, condition, instance)
}
def healthChanged(app: AppDefinition, healthy: Boolean): InstanceHealthChanged = {
InstanceHealthChanged(Instance.Id.forRunSpec(app.id), app.version, app.id, healthy = Some(healthy))
}
def replaceActor(app: AppDefinition, promise: Promise[Unit]): ActorRef =
system.actorOf(
TaskReplaceActor
.props(deploymentsManager, deploymentStatus, queue, tracker, system.eventStream, readinessCheckExecutor, app, promise)
)
}
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/core/deployment/impl/TaskReplaceActorTest.scala | Scala | apache-2.0 | 34,164 |
class UnitReturnOneOutput {
def foo(i: Int) {
/*start*/
if (true) return
val x = 0
println(i)
/*end*/
println(x)
}
}
/*
class UnitReturnOneOutput {
def foo(i: Int) {
val x: Int = testMethodName(i) match {
case Some(result) => result
case None => return
}
println(x)
}
def testMethodName(i: Int): Option[Int] = {
if (true) return None
val x = 0
println(i)
Some(x)
}
}
*/ | whorbowicz/intellij-scala | testdata/extractMethod/output/UnitReturnOneOutput.scala | Scala | apache-2.0 | 440 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis.controls
import org.orbeon.saxon.om.Item
import org.orbeon.oxf.xforms.model.DataModel
import org.orbeon.oxf.xforms.analysis.ElementAnalysis
trait SingleNodeTrait extends ElementAnalysis {
def isBindingOptional: Boolean
// NOTE: Static controls do not by themselves support concrete bindings, but whether a control can bind to a certain
// item or not is a property of the control type, not of a specific instance of concrete control. So we place this
// here instead of in concrete controls. This also helps Form Builder, which sometimes needs to test whether
// a binding is allowed without having access to a concrete control.
def isAllowedBoundItem(item: Item): Boolean = DataModel.isAllowedBoundItem(item)
}
trait OptionalSingleNode extends SingleNodeTrait { def isBindingOptional = true }
trait RequiredSingleNode extends SingleNodeTrait { def isBindingOptional = false } | wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/analysis/controls/SingleNodeTrait.scala | Scala | lgpl-2.1 | 1,585 |
import scala.quoted.*
object scalatest {
inline def assert(condition: => Boolean): Unit = ${ assertImpl('condition) }
def assertImpl(cond: Expr[Boolean])(using Quotes) : Expr[Unit] = {
import quotes.reflect.*
import util.*
cond.asTerm.underlyingArgument match {
case t @ Apply(TypeApply(Select(lhs, op), targs), rhs) =>
ValDef.let(Symbol.spliceOwner, lhs) { left =>
ValDef.let(Symbol.spliceOwner, rhs) { rs =>
val app = Select.overloaded(left, op, targs.map(_.tpe), rs)
val b = app.asExprOf[Boolean]
'{ scala.Predef.assert($b) }.asTerm
}
}.asExprOf[Unit]
}
}
}
| dotty-staging/dotty | tests/run-macros/reflect-pos-fun/assert_1.scala | Scala | apache-2.0 | 665 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.io.InputStream
import java.math.BigDecimal
import scala.util.Try
import scala.util.control.NonFatal
import com.univocity.parsers.csv.CsvParser
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
import org.apache.spark.sql.catalyst.util.{BadRecordException, DateTimeUtils}
import org.apache.spark.sql.execution.datasources.FailureSafeParser
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
class UnivocityParser(
dataSchema: StructType,
requiredSchema: StructType,
val options: CSVOptions) extends Logging {
require(requiredSchema.toSet.subsetOf(dataSchema.toSet),
"requiredSchema should be the subset of schema.")
def this(schema: StructType, options: CSVOptions) = this(schema, schema, options)
// A `ValueConverter` is responsible for converting the given value to a desired type.
private type ValueConverter = String => Any
val tokenizer = {
val parserSetting = options.asParserSettings
if (options.columnPruning && requiredSchema.length < dataSchema.length) {
val tokenIndexArr = requiredSchema.map(f => java.lang.Integer.valueOf(dataSchema.indexOf(f)))
parserSetting.selectIndexes(tokenIndexArr: _*)
}
new CsvParser(parserSetting)
}
private val schema = if (options.columnPruning) requiredSchema else dataSchema
private val row = new GenericInternalRow(schema.length)
// Retrieve the raw record string.
private def getCurrentInput: UTF8String = {
UTF8String.fromString(tokenizer.getContext.currentParsedContent().stripLineEnd)
}
// This parser first picks some tokens from the input tokens, according to the required schema,
// then parse these tokens and put the values in a row, with the order specified by the required
// schema.
//
// For example, let's say there is CSV data as below:
//
// a,b,c
// 1,2,A
//
// So the CSV data schema is: ["a", "b", "c"]
// And let's say the required schema is: ["c", "b"]
//
// with the input tokens,
//
// input tokens - [1, 2, "A"]
//
// Each input token is placed in each output row's position by mapping these. In this case,
//
// output row - ["A", 2]
private val valueConverters: Array[ValueConverter] = {
schema.map(f => makeConverter(f.name, f.dataType, f.nullable, options)).toArray
}
/**
* Create a converter which converts the string value to a value according to a desired type.
* Currently, we do not support complex types (`ArrayType`, `MapType`, `StructType`).
*
* For other nullable types, returns null if it is null or equals to the value specified
* in `nullValue` option.
*/
def makeConverter(
name: String,
dataType: DataType,
nullable: Boolean = true,
options: CSVOptions): ValueConverter = dataType match {
case _: ByteType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(_.toByte)
case _: ShortType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(_.toShort)
case _: IntegerType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(_.toInt)
case _: LongType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(_.toLong)
case _: FloatType => (d: String) =>
nullSafeDatum(d, name, nullable, options) {
case options.nanValue => Float.NaN
case options.negativeInf => Float.NegativeInfinity
case options.positiveInf => Float.PositiveInfinity
case datum => datum.toFloat
}
case _: DoubleType => (d: String) =>
nullSafeDatum(d, name, nullable, options) {
case options.nanValue => Double.NaN
case options.negativeInf => Double.NegativeInfinity
case options.positiveInf => Double.PositiveInfinity
case datum => datum.toDouble
}
case _: BooleanType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(_.toBoolean)
case dt: DecimalType => (d: String) =>
nullSafeDatum(d, name, nullable, options) { datum =>
val value = new BigDecimal(datum.replaceAll(",", ""))
Decimal(value, dt.precision, dt.scale)
}
case _: TimestampType => (d: String) =>
nullSafeDatum(d, name, nullable, options) { datum =>
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.
Try(options.timestampFormat.parse(datum).getTime * 1000L)
.getOrElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
DateTimeUtils.stringToTime(datum).getTime * 1000L
}
}
case _: DateType => (d: String) =>
nullSafeDatum(d, name, nullable, options) { datum =>
// This one will lose microseconds parts.
// See https://issues.apache.org/jira/browse/SPARK-10681.x
Try(DateTimeUtils.millisToDays(options.dateFormat.parse(datum).getTime))
.getOrElse {
// If it fails to parse, then tries the way used in 2.0 and 1.x for backwards
// compatibility.
DateTimeUtils.millisToDays(DateTimeUtils.stringToTime(datum).getTime)
}
}
case _: StringType => (d: String) =>
nullSafeDatum(d, name, nullable, options)(UTF8String.fromString)
case udt: UserDefinedType[_] => (datum: String) =>
makeConverter(name, udt.sqlType, nullable, options)
// We don't actually hit this exception though, we keep it for understandability
case _ => throw new RuntimeException(s"Unsupported type: ${dataType.typeName}")
}
private def nullSafeDatum(
datum: String,
name: String,
nullable: Boolean,
options: CSVOptions)(converter: ValueConverter): Any = {
if (datum == options.nullValue || datum == null) {
if (!nullable) {
throw new RuntimeException(s"null value found but field $name is not nullable.")
}
null
} else {
converter.apply(datum)
}
}
private val doParse = if (schema.nonEmpty) {
(input: String) => convert(tokenizer.parseLine(input))
} else {
// If `columnPruning` enabled and partition attributes scanned only,
// `schema` gets empty.
(_: String) => InternalRow.empty
}
/**
* Parses a single CSV string and turns it into either one resulting row or no row (if the
* the record is malformed).
*/
def parse(input: String): InternalRow = doParse(input)
private def convert(tokens: Array[String]): InternalRow = {
if (tokens.length != schema.length) {
// If the number of tokens doesn't match the schema, we should treat it as a malformed record.
// However, we still have chance to parse some of the tokens, by adding extra null tokens in
// the tail if the number is smaller, or by dropping extra tokens if the number is larger.
val checkedTokens = if (schema.length > tokens.length) {
tokens ++ new Array[String](schema.length - tokens.length)
} else {
tokens.take(schema.length)
}
def getPartialResult(): Option[InternalRow] = {
try {
Some(convert(checkedTokens))
} catch {
case _: BadRecordException => None
}
}
// For records with less or more tokens than the schema, tries to return partial results
// if possible.
throw BadRecordException(
() => getCurrentInput,
() => getPartialResult(),
new RuntimeException("Malformed CSV record"))
} else {
try {
var i = 0
while (i < schema.length) {
row(i) = valueConverters(i).apply(tokens(i))
i += 1
}
row
} catch {
case NonFatal(e) =>
// For corrupted records with the number of tokens same as the schema,
// CSV reader doesn't support partial results. All fields other than the field
// configured by `columnNameOfCorruptRecord` are set to `null`.
throw BadRecordException(() => getCurrentInput, () => None, e)
}
}
}
}
private[csv] object UnivocityParser {
/**
* Parses a stream that contains CSV strings and turns it into an iterator of tokens.
*/
def tokenizeStream(
inputStream: InputStream,
shouldDropHeader: Boolean,
tokenizer: CsvParser): Iterator[Array[String]] = {
convertStream(inputStream, shouldDropHeader, tokenizer)(tokens => tokens)
}
/**
* Parses a stream that contains CSV strings and turns it into an iterator of rows.
*/
def parseStream(
inputStream: InputStream,
shouldDropHeader: Boolean,
parser: UnivocityParser,
schema: StructType,
checkHeader: Array[String] => Unit): Iterator[InternalRow] = {
val tokenizer = parser.tokenizer
val safeParser = new FailureSafeParser[Array[String]](
input => Seq(parser.convert(input)),
parser.options.parseMode,
schema,
parser.options.columnNameOfCorruptRecord)
convertStream(inputStream, shouldDropHeader, tokenizer, checkHeader) { tokens =>
safeParser.parse(tokens)
}.flatten
}
private def convertStream[T](
inputStream: InputStream,
shouldDropHeader: Boolean,
tokenizer: CsvParser,
checkHeader: Array[String] => Unit = _ => ())(
convert: Array[String] => T) = new Iterator[T] {
tokenizer.beginParsing(inputStream)
private var nextRecord = {
if (shouldDropHeader) {
val firstRecord = tokenizer.parseNext()
checkHeader(firstRecord)
}
tokenizer.parseNext()
}
override def hasNext: Boolean = nextRecord != null
override def next(): T = {
if (!hasNext) {
throw new NoSuchElementException("End of stream")
}
val curRecord = convert(nextRecord)
nextRecord = tokenizer.parseNext()
curRecord
}
}
/**
* Parses an iterator that contains CSV strings and turns it into an iterator of rows.
*/
def parseIterator(
lines: Iterator[String],
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow] = {
val options = parser.options
val filteredLines: Iterator[String] = CSVUtils.filterCommentAndEmpty(lines, options)
val safeParser = new FailureSafeParser[String](
input => Seq(parser.parse(input)),
parser.options.parseMode,
schema,
parser.options.columnNameOfCorruptRecord)
filteredLines.flatMap(safeParser.parse)
}
}
| bravo-zhang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/UnivocityParser.scala | Scala | apache-2.0 | 11,360 |
/*
* Licensed to the Programming Language and Software Methodology Lab (PLSM)
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership.
* The PLSM licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
object Plugins extends BasicPlugin with PlayPlugin {
override protected[this] def version = NotSpecific
} | AtkinsChang/geoconvert | project/project/Plugins.scala | Scala | apache-2.0 | 937 |
package compiler
import datamodel._
import dto.UnitSpec
class ProtocolCompilerSpec extends UnitSpec {
private val protocolName = "TestProtocol"
"Protocol compiler" should "compile valid input" in {
val validInput =
"""
|issue {
| number Number jsonKey=issueNumber cType=uint32_t;
| url String;
| title String;
| creator user jsonKey=user;
| assignees Array[user];
| labels Array[label];
| is_open Boolean;
|}
|
|user {
| name String jsonKey=login;
| url String;
|}
|
|label {
| name String;
| color String[6];
|}
""".stripMargin
val validProtocol = Protocol(protocolName, List(
Message("issue", List(
Field("number", AliasedType("uint32_t", NumberType), "issueNumber"),
Field("url", DynamicStringType, "url"),
Field("title", DynamicStringType, "title"),
Field("creator", ObjectType("user"), "user"),
Field("assignees", ArrayType(ObjectType("user")), "assignees"),
Field("labels", ArrayType(ObjectType("label")), "labels"),
Field("is_open", BooleanType, "is_open")
)),
Message("user", List(
Field("name", DynamicStringType, "login"),
Field("url", DynamicStringType, "url")
)),
Message("label", List(
Field("name", DynamicStringType, "name"),
Field("color", FixedStringType(6), "color")
))
))
ProtocolCompiler(validInput, protocolName) shouldBe Right(validProtocol)
}
it should "report syntax errors" in {
val invalidSyntaxInput =
"""
|{
| number Number;
| url String;
|}
|
|user {
| name String jsonKey=login;
| url String;
|}
""".stripMargin
val invalidSyntaxError = ParserError(Location(2,1),"""string matching regex `[_a-zA-Z][_a-zA-Z0-9]*' expected but `{' found""")
ProtocolCompiler(invalidSyntaxInput, protocolName) shouldBe Left(invalidSyntaxError)
}
it should "report semantic errors" in {
val invalidSemanticsInput =
"""
|user {
| name String jsonKey=login;
| url String;
|}
|
|user {
| name String;
| color String[6];
|}
""".stripMargin
val invalidSemanticsError = DuplicateMessagesError(List("user"))
ProtocolCompiler(invalidSemanticsInput, protocolName) shouldBe Left(invalidSemanticsError)
}
}
| gatkin/cDTO | src/test/scala/compiler/ProtocolCompilerSpec.scala | Scala | mit | 2,584 |
object input_in_range2 {
def main(args: Array[String]) {
// Put code here
}
}
| LoyolaChicagoBooks/introcs-scala-examples | input_in_range2/input_in_range2.scala | Scala | gpl-3.0 | 86 |
package io.scalac.slack.bots
import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.util.Timeout
import io.scalac.slack.MessageEventBus
import io.scalac.slack.common.{Incoming, MessageEvent, Outgoing, _}
import scala.concurrent.ExecutionContext
import scala.language.implicitConversions
trait MessagePublisher {
import akka.pattern._
def bus: MessageEventBus
implicit def publish(event: MessageEvent): Unit = {
bus.publish(event)
}
def publish(directMessage: DirectMessage)(implicit context: ExecutionContext, userStorage: ActorRef, timeout: Timeout): Unit = {
userStorage ? FindChannel(directMessage.key) onSuccess {
case Some(channel: String) =>
val eventToSend = directMessage.event match {
case message: OutboundMessage => message.copy(channel = channel)
case message: RichOutboundMessage => message.copy(channel = channel)
case other => other
}
publish(eventToSend)
}
}
}
abstract class MessageListener extends Actor with ActorLogging with MessagePublisher
/**
* A raw messaging interface used to create internal system level bots.
* For user facing bots use AbstractBot
*/
abstract class IncomingMessageListener extends MessageListener {
@throws[Exception](classOf[Exception])
override def preStart(): Unit = bus.subscribe(self, Incoming)
}
abstract class OutgoingMessageListener extends MessageListener {
@throws[Exception](classOf[Exception])
override def preStart(): Unit = bus.subscribe(self, Outgoing)
}
/**
* The class to extend when creating a bot.
*/
abstract class AbstractBot extends IncomingMessageListener {
log.debug(s"Starting ${self.path.name} on $bus")
override val bus: MessageEventBus
def name: String = self.path.name
def help(channel: String): OutboundMessage
def act: Actor.Receive
def handleSystemCommands: Actor.Receive = {
case HelpRequest(t, ch) if t.map(_ == name).getOrElse(true) => publish(help(ch))
}
override final def receive: Actor.Receive = act.orElse(handleSystemCommands)
}
| Cheers-Dev/scala-slack-bot-core | src/main/scala/io/scalac/slack/bots/MessageListener.scala | Scala | mit | 2,057 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{Tumble, ValidationException}
import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.WeightedAvg
import org.apache.flink.table.planner.utils.{TableFunc0, TableTestBase}
import org.junit.Test
import java.math.BigDecimal
class CalcValidationTest extends TableTestBase {
@Test(expected = classOf[ValidationException])
def testInvalidUseOfRowtime(): Unit = {
val util = streamTestUtil()
util.addDataStream[(Long, Int, Double, Float, BigDecimal, String)](
"MyTable",
'rowtime, 'int, 'double, 'float, 'bigdec, 'string)
.select('rowtime.rowtime)
}
@Test(expected = classOf[ValidationException])
def testInvalidUseOfRowtime2(): Unit = {
val util = streamTestUtil()
util.addDataStream[(Long, Int, Double, Float, BigDecimal, String)](
"MyTable",
'rowtime, 'int, 'double, 'float, 'bigdec, 'string)
.window(Tumble over 2.millis on 'rowtime as 'w)
.groupBy('w)
.select('w.end.rowtime, 'int.count as 'int) // no rowtime on non-window reference
}
@Test(expected = classOf[ValidationException])
def testAddColumnsWithAgg(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.addColumns('a.sum)
}
@Test(expected = classOf[ValidationException])
def testAddOrReplaceColumnsWithAgg(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.addOrReplaceColumns('a.sum)
}
@Test(expected = classOf[ValidationException])
def testRenameColumnsWithAgg(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.renameColumns('a.sum)
}
@Test(expected = classOf[ValidationException])
def testRenameColumnsWithoutAlias(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.renameColumns('a)
}
@Test(expected = classOf[ValidationException])
def testRenameColumnsWithFunctallCall(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.renameColumns('a + 1 as 'a2)
}
@Test(expected = classOf[ValidationException])
def testRenameColumnsNotExist(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.renameColumns('e as 'e2)
}
@Test(expected = classOf[ValidationException])
def testDropColumnsWithAgg(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.dropColumns('a.sum)
}
@Test(expected = classOf[ValidationException])
def testDropColumnsNotExist(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.dropColumns('e)
}
@Test(expected = classOf[ValidationException])
def testDropColumnsWithValueLiteral(): Unit = {
val util = streamTestUtil()
val tab = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
tab.dropColumns("'a'")
}
@Test(expected = classOf[ValidationException])
def testInvalidMapFunctionTypeAggregation(): Unit = {
val util = streamTestUtil()
util.addTableSource[(Int)](
"MyTable", 'int)
.map('int.sum) // do not support AggregateFunction as input
}
@Test(expected = classOf[ValidationException])
def testInvalidMapFunctionTypeUDAGG(): Unit = {
val util = streamTestUtil()
val weightedAvg = new WeightedAvg
util.addTableSource[(Int)](
"MyTable", 'int)
.map(weightedAvg('int, 'int)) // do not support AggregateFunction as input
}
@Test(expected = classOf[ValidationException])
def testInvalidMapFunctionTypeUDAGG2(): Unit = {
val util = streamTestUtil()
util.addFunction("weightedAvg", new WeightedAvg)
util.addTableSource[(Int)](
"MyTable", 'int)
.map(call("weightedAvg", $"int", $"int")) // do not support AggregateFunction as input
}
@Test(expected = classOf[ValidationException])
def testInvalidMapFunctionTypeTableFunction(): Unit = {
val util = streamTestUtil()
util.addFunction("func", new TableFunc0)
util.addTableSource[(String)](
"MyTable", 'string)
.map(call("func", $"string") as "a") // do not support TableFunction as input
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/validation/CalcValidationTest.scala | Scala | apache-2.0 | 5,406 |
package elascala
/**
* Created by vayne on 15. 2. 24..
*/
case class ElascalaException(code: Int, reason: String) extends RuntimeException | MinCha/elascala | src/main/scala/elascala/ElascalaException.scala | Scala | apache-2.0 | 141 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.api
import java.io.File
import java.{lang, util}
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.{Collections, Properties}
import kafka.api.GroupedUserPrincipalBuilder._
import kafka.api.GroupedUserQuotaCallback._
import kafka.server._
import kafka.utils.JaasTestUtils.ScramLoginModule
import kafka.utils.{JaasTestUtils, Logging, TestUtils}
import kafka.zk.ConfigEntityChangeNotificationZNode
import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig}
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.{Cluster, Reconfigurable}
import org.apache.kafka.common.config.SaslConfigs
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.security.auth._
import org.apache.kafka.common.security.scram.ScramCredential
import org.apache.kafka.server.quota._
import org.junit.Assert._
import org.junit.{After, Before, Test}
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConverters._
class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup {
override protected def securityProtocol = SecurityProtocol.SASL_SSL
override protected def listenerName = new ListenerName("CLIENT")
override protected def interBrokerListenerName: ListenerName = new ListenerName("BROKER")
override protected lazy val trustStoreFile = Some(File.createTempFile("truststore", ".jks"))
override val consumerCount: Int = 0
override val producerCount: Int = 0
override val serverCount: Int = 2
private val kafkaServerSaslMechanisms = Seq("SCRAM-SHA-256")
private val kafkaClientSaslMechanism = "SCRAM-SHA-256"
override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism))
override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism))
private val adminClients = new ArrayBuffer[AdminClient]()
private var producerWithoutQuota: KafkaProducer[Array[Byte], Array[Byte]] = _
val defaultRequestQuota = 1000
val defaultProduceQuota = 2000 * 1000 * 1000
val defaultConsumeQuota = 1000 * 1000 * 1000
@Before
override def setUp() {
startSasl(jaasSections(kafkaServerSaslMechanisms, Some("SCRAM-SHA-256"), KafkaSasl, JaasTestUtils.KafkaServerContextName))
this.serverConfig.setProperty(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp, Long.MaxValue.toString)
this.serverConfig.setProperty(KafkaConfig.ClientQuotaCallbackClassProp, classOf[GroupedUserQuotaCallback].getName)
this.serverConfig.setProperty(s"${listenerName.configPrefix}${KafkaConfig.PrincipalBuilderClassProp}",
classOf[GroupedUserPrincipalBuilder].getName)
this.serverConfig.setProperty(KafkaConfig.DeleteTopicEnableProp, "true")
super.setUp()
brokerList = TestUtils.bootstrapServers(servers, listenerName)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
producerWithoutQuota = createNewProducer
producers += producerWithoutQuota
}
@After
override def tearDown(): Unit = {
// Close producers and consumers without waiting for requests to complete
// to avoid waiting for throttled responses
producers.foreach(_.close(0, TimeUnit.MILLISECONDS))
producers.clear()
consumers.foreach(_.close(0, TimeUnit.MILLISECONDS))
consumers.clear()
super.tearDown()
}
override def configureSecurityBeforeServersStart() {
super.configureSecurityBeforeServersStart()
zkClient.makeSurePersistentPathExists(ConfigEntityChangeNotificationZNode.path)
createScramCredentials(zkConnect, JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword)
}
@Test
def testCustomQuotaCallback() {
// Large quota override, should not throttle
var brokerId = 0
var user = createGroupWithOneUser("group0_user1", brokerId)
user.configureAndWaitForQuota(1000000, 2000000)
quotaLimitCalls.values.foreach(_.set(0))
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// ClientQuotaCallback#quotaLimit is invoked by each quota manager once for each new client
assertEquals(1, quotaLimitCalls(ClientQuotaType.PRODUCE).get)
assertEquals(1, quotaLimitCalls(ClientQuotaType.FETCH).get)
assertTrue(s"Too many quotaLimit calls $quotaLimitCalls", quotaLimitCalls(ClientQuotaType.REQUEST).get <= serverCount)
// Large quota updated to small quota, should throttle
user.configureAndWaitForQuota(9000, 3000)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Quota override deletion - verify default quota applied (large quota, no throttling)
user = addUser("group0_user2", brokerId)
user.removeQuotaOverrides()
user.waitForQuotaUpdate(defaultProduceQuota, defaultConsumeQuota, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Make default quota smaller, should throttle
user.configureAndWaitForQuota(8000, 2500, divisor = 1, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Configure large quota override, should not throttle
user = addUser("group0_user3", brokerId)
user.configureAndWaitForQuota(2000000, 2000000)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Quota large enough for one partition, should not throttle
brokerId = 1
user = createGroupWithOneUser("group1_user1", brokerId)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100)
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Create large number of partitions on another broker, should result in throttling on first partition
val largeTopic = "group1_largeTopic"
createTopic(largeTopic, numPartitions = 99, leader = 0)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove quota override and test default quota applied with scaling based on partitions
user = addUser("group1_user2", brokerId)
user.waitForQuotaUpdate(defaultProduceQuota / 100, defaultConsumeQuota / 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
user.configureAndWaitForQuota(8000 * 100, 2500 * 100, divisor=100, group = None)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
// Remove the second topic with large number of partitions, verify no longer throttled
adminZkClient.deleteTopic(largeTopic)
user = addUser("group1_user3", brokerId)
user.waitForQuotaUpdate(8000 * 100, 2500 * 100, defaultRequestQuota)
user.removeThrottleMetrics() // since group was throttled before
user.produceConsume(expectProduceThrottle = false, expectConsumeThrottle = false)
// Alter configs of custom callback dynamically
val adminClient = createAdminClient()
val newProps = new Properties
newProps.put(GroupedUserQuotaCallback.DefaultProduceQuotaProp, "8000")
newProps.put(GroupedUserQuotaCallback.DefaultFetchQuotaProp, "2500")
TestUtils.alterConfigs(servers, adminClient, newProps, perBrokerConfig = false)
user.waitForQuotaUpdate(8000, 2500, defaultRequestQuota)
user.produceConsume(expectProduceThrottle = true, expectConsumeThrottle = true)
assertEquals(serverCount, callbackInstances.get)
}
/**
* Creates a group with one user and one topic with one partition.
* @param firstUser First user to create in the group
* @param brokerId The broker id to use as leader of the partition
*/
private def createGroupWithOneUser(firstUser: String, brokerId: Int): GroupedUser = {
val user = addUser(firstUser, brokerId)
createTopic(user.topic, numPartitions = 1, brokerId)
user.configureAndWaitForQuota(defaultProduceQuota, defaultConsumeQuota, divisor = 1, group = None)
user
}
private def createTopic(topic: String, numPartitions: Int, leader: Int): Unit = {
val assignment = (0 until numPartitions).map { i => i -> Seq(leader) }.toMap
TestUtils.createTopic(zkClient, topic, assignment, servers)
}
private def createAdminClient(): AdminClient = {
val config = new util.HashMap[String, Object]
config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
TestUtils.bootstrapServers(servers, new ListenerName("BROKER")))
clientSecurityProps("admin-client").asInstanceOf[util.Map[Object, Object]].asScala.foreach { case (key, value) =>
config.put(key.toString, value)
}
config.put(SaslConfigs.SASL_JAAS_CONFIG,
ScramLoginModule(JaasTestUtils.KafkaScramAdmin, JaasTestUtils.KafkaScramAdminPassword).toString)
val adminClient = AdminClient.create(config)
adminClients += adminClient
adminClient
}
private def produceWithoutThrottle(topic: String, numRecords: Int): Unit = {
(0 until numRecords).foreach { i =>
val payload = i.toString.getBytes
producerWithoutQuota.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null, null, payload))
}
}
private def addUser(user: String, leader: Int): GroupedUser = {
val password = s"$user:secret"
createScramCredentials(zkConnect, user, password)
servers.foreach { server =>
val cache = server.credentialProvider.credentialCache.cache(kafkaClientSaslMechanism, classOf[ScramCredential])
TestUtils.waitUntilTrue(() => cache.get(user) != null, "SCRAM credentials not created")
}
val userGroup = group(user)
val topic = s"${userGroup}_topic"
val producerClientId = s"$user:producer-client-id"
val consumerClientId = s"$user:producer-client-id"
producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, producerClientId)
producerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
val producer = createNewProducer
producers += producer
consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId)
consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, s"$user-group")
consumerConfig.put(SaslConfigs.SASL_JAAS_CONFIG, ScramLoginModule(user, password).toString)
val consumer = createNewConsumer
consumers += consumer
GroupedUser(user, userGroup, topic, servers(leader), producerClientId, consumerClientId, producer, consumer)
}
case class GroupedUser(user: String, userGroup: String, topic: String, leaderNode: KafkaServer,
producerClientId: String, consumerClientId: String,
producer: KafkaProducer[Array[Byte], Array[Byte]],
consumer: KafkaConsumer[Array[Byte], Array[Byte]]) extends
QuotaTestClients(topic, leaderNode, producerClientId, consumerClientId, producer, consumer) {
override def userPrincipal: KafkaPrincipal = GroupedUserPrincipal(user, userGroup)
override def quotaMetricTags(clientId: String): Map[String, String] = {
Map(GroupedUserQuotaCallback.QuotaGroupTag -> userGroup)
}
override def overrideQuotas(producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
configureQuota(userGroup, producerQuota, consumerQuota, requestQuota)
}
override def removeQuotaOverrides(): Unit = {
adminZkClient.changeUserOrUserClientIdConfig(quotaEntityName(userGroup), new Properties)
}
def configureQuota(userGroup: String, producerQuota: Long, consumerQuota: Long, requestQuota: Double): Unit = {
val quotaProps = quotaProperties(producerQuota, consumerQuota, requestQuota)
adminZkClient.changeUserOrUserClientIdConfig(quotaEntityName(userGroup), quotaProps)
}
def configureAndWaitForQuota(produceQuota: Long, fetchQuota: Long, divisor: Int = 1,
group: Option[String] = Some(userGroup)): Unit = {
configureQuota(group.getOrElse(""), produceQuota, fetchQuota, defaultRequestQuota)
waitForQuotaUpdate(produceQuota / divisor, fetchQuota / divisor, defaultRequestQuota)
}
def produceConsume(expectProduceThrottle: Boolean, expectConsumeThrottle: Boolean): Unit = {
val numRecords = 1000
val produced = produceUntilThrottled(numRecords, waitForRequestCompletion = false)
verifyProduceThrottle(expectProduceThrottle, verifyClientMetric = false)
// make sure there are enough records on the topic to test consumer throttling
produceWithoutThrottle(topic, numRecords - produced)
consumeUntilThrottled(numRecords, waitForRequestCompletion = false)
verifyConsumeThrottle(expectConsumeThrottle, verifyClientMetric = false)
}
def removeThrottleMetrics(): Unit = {
def removeSensors(quotaType: QuotaType, clientId: String): Unit = {
val sensorSuffix = quotaMetricTags(clientId).values.mkString(":")
leaderNode.metrics.removeSensor(s"${quotaType}ThrottleTime-$sensorSuffix")
leaderNode.metrics.removeSensor(s"$quotaType-$sensorSuffix")
}
removeSensors(QuotaType.Produce, producerClientId)
removeSensors(QuotaType.Fetch, consumerClientId)
removeSensors(QuotaType.Request, producerClientId)
removeSensors(QuotaType.Request, consumerClientId)
}
private def quotaEntityName(userGroup: String): String = s"${userGroup}_"
}
}
object GroupedUserPrincipalBuilder {
def group(str: String): String = {
if (str.indexOf("_") <= 0)
""
else
str.substring(0, str.indexOf("_"))
}
}
class GroupedUserPrincipalBuilder extends KafkaPrincipalBuilder {
override def build(context: AuthenticationContext): KafkaPrincipal = {
val securityProtocol = context.securityProtocol
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
val user = context.asInstanceOf[SaslAuthenticationContext].server().getAuthorizationID
val userGroup = group(user)
if (userGroup.isEmpty)
new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
else
GroupedUserPrincipal(user, userGroup)
} else
throw new IllegalStateException(s"Unexpected security protocol $securityProtocol")
}
}
case class GroupedUserPrincipal(user: String, userGroup: String) extends KafkaPrincipal(KafkaPrincipal.USER_TYPE, user)
object GroupedUserQuotaCallback {
val QuotaGroupTag = "group"
val DefaultProduceQuotaProp = "default.produce.quota"
val DefaultFetchQuotaProp = "default.fetch.quota"
val UnlimitedQuotaMetricTags = Collections.emptyMap[String, String]
val quotaLimitCalls = Map(
ClientQuotaType.PRODUCE -> new AtomicInteger,
ClientQuotaType.FETCH -> new AtomicInteger,
ClientQuotaType.REQUEST -> new AtomicInteger
)
val callbackInstances = new AtomicInteger
}
/**
* Quota callback for a grouped user. Both user principals and topics of each group
* are prefixed with the group name followed by '_'. This callback defines quotas of different
* types at the group level. Group quotas are configured in ZooKeeper as user quotas with
* the entity name "${group}_". Default group quotas are configured in ZooKeeper as user quotas
* with the entity name "_".
*
* Default group quotas may also be configured using the configuration options
* "default.produce.quota" and "default.fetch.quota" which can be reconfigured dynamically
* without restarting the broker. This tests custom reconfigurable options for quota callbacks,
*/
class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable with Logging {
var brokerId: Int = -1
val customQuotasUpdated = ClientQuotaType.values.toList
.map(quotaType =>(quotaType -> new AtomicBoolean)).toMap
val quotas = ClientQuotaType.values.toList
.map(quotaType => (quotaType -> new ConcurrentHashMap[String, Double])).toMap
val partitionRatio = new ConcurrentHashMap[String, Double]()
override def configure(configs: util.Map[String, _]): Unit = {
brokerId = configs.get(KafkaConfig.BrokerIdProp).toString.toInt
callbackInstances.incrementAndGet
}
override def reconfigurableConfigs: util.Set[String] = {
Set(DefaultProduceQuotaProp, DefaultFetchQuotaProp).asJava
}
override def validateReconfiguration(configs: util.Map[String, _]): Unit = {
reconfigurableConfigs.asScala.foreach(configValue(configs, _))
}
override def reconfigure(configs: util.Map[String, _]): Unit = {
configValue(configs, DefaultProduceQuotaProp).foreach(value => quotas(ClientQuotaType.PRODUCE).put("", value))
configValue(configs, DefaultFetchQuotaProp).foreach(value => quotas(ClientQuotaType.FETCH).put("", value))
customQuotasUpdated.values.foreach(_.set(true))
}
private def configValue(configs: util.Map[String, _], key: String): Option[Long] = {
val value = configs.get(key)
if (value != null) Some(value.toString.toLong) else None
}
override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = {
principal match {
case groupPrincipal: GroupedUserPrincipal =>
val userGroup = groupPrincipal.userGroup
val quotaLimit = quotaOrDefault(userGroup, quotaType)
if (quotaLimit != null)
Map(QuotaGroupTag -> userGroup).asJava
else
UnlimitedQuotaMetricTags
case _ =>
UnlimitedQuotaMetricTags
}
}
override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = {
quotaLimitCalls(quotaType).incrementAndGet
val group = metricTags.get(QuotaGroupTag)
if (group != null) quotaOrDefault(group, quotaType) else null
}
override def updateClusterMetadata(cluster: Cluster): Boolean = {
val topicsByGroup = cluster.topics.asScala.groupBy(group)
!topicsByGroup.forall { case (group, groupTopics) =>
val groupPartitions = groupTopics.flatMap(topic => cluster.partitionsForTopic(topic).asScala)
val totalPartitions = groupPartitions.size
val partitionsOnThisBroker = groupPartitions.count { p => p.leader != null && p.leader.id == brokerId }
val multiplier = if (totalPartitions == 0)
1
else if (partitionsOnThisBroker == 0)
1.0 / totalPartitions
else
partitionsOnThisBroker.toDouble / totalPartitions
partitionRatio.put(group, multiplier) != multiplier
}
}
override def updateQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity, newValue: Double): Unit = {
quotas(quotaType).put(userGroup(quotaEntity), newValue)
}
override def removeQuota(quotaType: ClientQuotaType, quotaEntity: ClientQuotaEntity): Unit = {
quotas(quotaType).remove(userGroup(quotaEntity))
}
override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = customQuotasUpdated(quotaType).getAndSet(false)
def close(): Unit = {}
private def userGroup(quotaEntity: ClientQuotaEntity): String = {
val configEntity = quotaEntity.configEntities.get(0)
if (configEntity.entityType == ClientQuotaEntity.ConfigEntityType.USER)
group(configEntity.name)
else
throw new IllegalArgumentException(s"Config entity type ${configEntity.entityType} is not supported")
}
private def quotaOrDefault(group: String, quotaType: ClientQuotaType): lang.Double = {
val quotaMap = quotas(quotaType)
var quotaLimit: Any = quotaMap.get(group)
if (quotaLimit == null)
quotaLimit = quotaMap.get("")
if (quotaLimit != null) scaledQuota(quotaType, group, quotaLimit.asInstanceOf[Double]) else null
}
private def scaledQuota(quotaType: ClientQuotaType, group: String, configuredQuota: Double): Double = {
if (quotaType == ClientQuotaType.REQUEST)
configuredQuota
else {
val multiplier = partitionRatio.get(group)
if (multiplier <= 0.0) configuredQuota else configuredQuota * multiplier
}
}
}
| Ishiihara/kafka | core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala | Scala | apache-2.0 | 21,016 |
package models
import com.datastax.driver.core.utils._
import org.junit.runner._
import org.specs2.mutable._
import org.specs2.runner._
import scala.concurrent._
import scala.concurrent.duration._
@RunWith(classOf[JUnitRunner])
class PeriodicallyCleanedSpec extends Specification with EmbeddedCassandra {
"C* PeriodicallyCleaned" should {
"be able to be written" in {
val periodicallyCleaned = new PeriodicallyCleaned
val id = UUIDs.timeBased()
val future = for {
_ <- periodicallyCleaned.scheduleHourly(id, "key")
_ <- periodicallyCleaned.scheduleDaily(id, "key")
_ <- periodicallyCleaned.scheduleMonthly(id, "key")
h <- periodicallyCleaned.isScheduledHourly(id, "key")
d <- periodicallyCleaned.isScheduledDaily(id, "key")
w <- periodicallyCleaned.isScheduledWeekly(id, "key")
m <- periodicallyCleaned.isScheduledMonthly(id, "key")
} yield (h, d, w, m)
val ret = Await.result(future, 5.seconds)
ret mustEqual(true, true, false, true)
}
}
} | lizepeng/app.io | modules/models/test/models/PeriodicallyCleanedSpec.scala | Scala | apache-2.0 | 1,052 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.serialization
import java.util.{Collections => JCollections, List => JList, Map => JMap, UUID}
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Geometry
import org.geotools.factory.Hints
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import scala.collection.JavaConverters._
/** [[DatumWriter]] definitions for writing (serializing) components of a [[org.opengis.feature.simple.SimpleFeature]].
*
*/
trait AbstractWriter[Writer]
extends PrimitiveWriter[Writer]
with NullableWriter[Writer]
with CollectionWriter[Writer]
with GeometryWriter[Writer]
with HintKeyWriter[Writer]
with LazyLogging {
def writeUUID: DatumWriter[Writer, UUID] = (writer, uuid) => {
writeLong(writer, uuid.getMostSignificantBits)
writeLong(writer, uuid.getLeastSignificantBits)
}
/** A [[DatumWriter]] which writes the class name of ``obj`` and then the ``obj``. If the object is ``null`` then
* only a null marker will be written.
*
* @tparam T thpe of t
*/
def writeGeneric[T]: DatumWriter[Writer, T] = (writer, obj) => {
if (obj == null) {
writeString(writer, AbstractWriter.NULL_MARKER_STR)
} else {
writeString(writer, obj.getClass.getName)
selectWriter(obj.getClass.asInstanceOf[Class[T]])(writer, obj)
}
}
/**
* A [[DatumWriter]] for writing a map where the key and values may be any type. The map may not be null. The writer
* will call ``writeArrayStart(writer, map.size)`` and then, for each entry, call ``startItem`` followed by up to four
* writes. After writing all entries the reader will call ``endArray``.
*/
def writeGenericMap: DatumWriter[Writer, JMap[AnyRef, AnyRef]] = (writer, map) => {
// may not be able to write all entries - must pre-filter to know correct count
val filtered = map.asScala.filter {
case (key, value) =>
if (canSerialize(key)) {
true
} else {
logger.warn(s"Can't serialize Map entry ($key,$value). The map entry will be skipped.")
false
}
}
writeArrayStart(writer, filtered.size)
filtered.foreach {
case (key, value) =>
startItem(writer)
writeGeneric(writer, key)
writeGeneric(writer, value)
}
endArray(writer)
}
def canSerialize(obj: AnyRef): Boolean = obj match {
case key: Hints.Key => HintKeySerialization.canSerialize(key)
case _ => true
}
/**
* @param clazz the [[Class]] of the object to be written
* @tparam T the type of the object to be written
* @return a [[DatumWriter]] capable of writing object of the given ``clazz``
*/
def selectWriter[T](clazz: Class[_ <: T], metadata: JMap[_ <: AnyRef, _ <: AnyRef] = JCollections.emptyMap(),
isNullable: isNullableFn = notNullable): DatumWriter[Writer, T] = {
val writer = clazz match {
case cls if classOf[java.lang.String].isAssignableFrom(cls) => writeString.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.lang.Integer].isAssignableFrom(cls) => writeInt.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.lang.Long].isAssignableFrom(cls) => writeLong.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.lang.Float].isAssignableFrom(cls) => writeFloat.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.lang.Double].isAssignableFrom(cls) => writeDouble.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.lang.Boolean].isAssignableFrom(cls) => writeBoolean.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[java.util.Date].isAssignableFrom(cls) => writeDate.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[UUID].isAssignableFrom(cls) => writeUUID.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[Geometry].isAssignableFrom(cls) => writeGeometry.asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[Hints.Key].isAssignableFrom(cls) => writeHintKey.asInstanceOf[DatumWriter[Writer, T]]
case c if classOf[JList[_]].isAssignableFrom(c) =>
val elemClass = metadata.get(SimpleFeatureTypes.USER_DATA_LIST_TYPE).asInstanceOf[Class[_]]
val elemWriter = selectWriter(elemClass, isNullable = isNullable)
writeList(elemWriter).asInstanceOf[DatumWriter[Writer, T]]
case c if classOf[JMap[_, _]].isAssignableFrom(c) =>
val keyClass = metadata.get(SimpleFeatureTypes.USER_DATA_MAP_KEY_TYPE).asInstanceOf[Class[_]]
val valueClass = metadata.get(SimpleFeatureTypes.USER_DATA_MAP_VALUE_TYPE).asInstanceOf[Class[_]]
val keyWriter = selectWriter(keyClass, isNullable = isNullable)
val valueWriter = selectWriter(valueClass, isNullable = isNullable)
writeMap(keyWriter, valueWriter).asInstanceOf[DatumWriter[Writer, T]]
case cls if classOf[Array[Byte]].isAssignableFrom(cls) => writeBytes.asInstanceOf[DatumWriter[Writer, T]]
case _ => throw new IllegalArgumentException("Unsupported class: " + clazz)
}
if (isNullable(clazz)) {
writeNullable(writer)
} else {
writer
}
}
}
object AbstractWriter {
val NULL_MARKER_STR = "<null>"
}
| mdzimmerman/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/AbstractWriter.scala | Scala | apache-2.0 | 5,703 |
package checklist.laws
import cats.laws.discipline.arbitrary._
import cats.laws.discipline.{ApplicativeTests, ProfunctorTests}
import cats.implicits._
import checklist._
import cats.laws.discipline.MiniInt
class RuleLawTests extends CatsSuite {
checkAll("Rule[Int, String]", ApplicativeTests[Rule[MiniInt, ?]].applicative[String, String, String])
checkAll("Rule[Int, String]", ProfunctorTests[Rule].profunctor[MiniInt, Int, Int, String, String, String])
}
| davegurnell/checklist | core/src/test/scala/checklist/laws/RuleLawTests.scala | Scala | apache-2.0 | 462 |
package plda.week11
/**
*
* @author Lorand Szakacs, [email protected], [email protected]
* @since 06 Dec 2016
*
* http://scalapuzzlers.com/#pzzlr-051
*
*/
object Puzzler10Comparison extends App {
object Playroom {
case class Toy(squeezeMsg: String = this.toString) {
override def toString: String = squeezeMsg
}
}
import Playroom._
println(Toy("My name is Fido!") == new Toy("My name is Fido!"))
println(Toy() == new Toy())
// println("-------")
// println(Toy())
// println(new Toy())
}
| 2016-Fall-UPT-PLDA/labs | week-11/puzzlers/src/main/scala/plda/week11/Puzzler10Comparison.scala | Scala | gpl-3.0 | 567 |
/*
* Happy Melly Teller
* Copyright (C) 2013 - 2014, Happy Melly http://www.happymelly.com
*
* This file is part of the Happy Melly Teller.
*
* Happy Melly Teller is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Happy Melly Teller is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Happy Melly Teller. If not, see <http://www.gnu.org/licenses/>.
*
* If you have questions concerning this license or the applicable additional terms, you may contact
* by email Sergey Kotlov, [email protected] or
* in writing Happy Melly One, Handelsplein 37, Rotterdam, The Netherlands, 3071 PR
*/
package models.admin
/**
* A category for a `BookingEntry`.
*/
case class TransactionType(id: Option[Long], name: String)
| HappyMelly/teller | app/models/admin/TransactionType.scala | Scala | gpl-3.0 | 1,189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io._
import java.nio.charset.StandardCharsets
import java.util.{Date, Locale}
import java.util.concurrent.TimeUnit
import java.util.zip.{ZipInputStream, ZipOutputStream}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import com.google.common.io.{ByteStreams, Files}
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.{FileStatus, FileSystem, FSDataInputStream, Path}
import org.apache.hadoop.hdfs.{DFSInputStream, DistributedFileSystem}
import org.apache.hadoop.security.AccessControlException
import org.json4s.jackson.JsonMethods._
import org.mockito.ArgumentMatchers.{any, argThat}
import org.mockito.Mockito.{doThrow, mock, spy, verify, when}
import org.scalatest.concurrent.Eventually._
import org.scalatest.matchers.must.Matchers
import org.scalatest.matchers.should.Matchers._
import org.apache.spark.{JobExecutionStatus, SecurityManager, SPARK_VERSION, SparkConf, SparkFunSuite}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.EventLogTestHelper._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.DRIVER_LOG_DFS_DIR
import org.apache.spark.internal.config.History._
import org.apache.spark.internal.config.UI.{ADMIN_ACLS, ADMIN_ACLS_GROUPS, USER_GROUPS_MAPPING}
import org.apache.spark.io._
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.security.GroupMappingServiceProvider
import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.KVUtils.KVStoreScalaSerializer
import org.apache.spark.status.api.v1.{ApplicationAttemptInfo, ApplicationInfo}
import org.apache.spark.util.{Clock, JsonProtocol, ManualClock, Utils}
import org.apache.spark.util.kvstore.InMemoryStore
import org.apache.spark.util.logging.DriverLogger
class FsHistoryProviderSuite extends SparkFunSuite with Matchers with Logging {
private var testDir: File = null
override def beforeEach(): Unit = {
super.beforeEach()
testDir = Utils.createTempDir(namePrefix = s"a b%20c+d")
}
override def afterEach(): Unit = {
try {
Utils.deleteRecursively(testDir)
} finally {
super.afterEach()
}
}
/** Create a fake log file using the new log format used in Spark 1.3+ */
private def newLogFile(
appId: String,
appAttemptId: Option[String],
inProgress: Boolean,
codec: Option[String] = None): File = {
val ip = if (inProgress) EventLogFileWriter.IN_PROGRESS else ""
val logUri = SingleEventLogFileWriter.getLogPath(testDir.toURI, appId, appAttemptId, codec)
val logPath = new Path(logUri).toUri.getPath + ip
new File(logPath)
}
Seq(true, false).foreach { inMemory =>
test(s"Parse application logs (inMemory = $inMemory)") {
testAppLogParsing(inMemory)
}
}
private def testAppLogParsing(inMemory: Boolean): Unit = {
val clock = new ManualClock(12345678)
val conf = createTestConf(inMemory = inMemory)
val provider = new FsHistoryProvider(conf, clock)
// Write a new-style application log.
val newAppComplete = newLogFile("new1", None, inProgress = false)
writeFile(newAppComplete, None,
SparkListenerApplicationStart(newAppComplete.getName(), Some("new-app-complete"), 1L, "test",
None),
SparkListenerApplicationEnd(5L)
)
// Write a new-style application log.
val newAppCompressedComplete = newLogFile("new1compressed", None, inProgress = false,
Some("lzf"))
writeFile(newAppCompressedComplete, Some(CompressionCodec.createCodec(conf, "lzf")),
SparkListenerApplicationStart(newAppCompressedComplete.getName(), Some("new-complete-lzf"),
1L, "test", None),
SparkListenerApplicationEnd(4L))
// Write an unfinished app, new-style.
val newAppIncomplete = newLogFile("new2", None, inProgress = true)
writeFile(newAppIncomplete, None,
SparkListenerApplicationStart(newAppIncomplete.getName(), Some("new-incomplete"), 1L, "test",
None)
)
// Force a reload of data from the log directory, and check that logs are loaded.
// Take the opportunity to check that the offset checks work as expected.
updateAndCheck(provider) { list =>
list.size should be (3)
list.count(_.attempts.head.completed) should be (2)
def makeAppInfo(
id: String,
name: String,
start: Long,
end: Long,
lastMod: Long,
user: String,
completed: Boolean): ApplicationInfo = {
val duration = if (end > 0) end - start else 0
new ApplicationInfo(id, name, None, None, None, None,
List(ApplicationAttemptInfo(None, new Date(start),
new Date(end), new Date(lastMod), duration, user, completed, SPARK_VERSION)))
}
// For completed files, lastUpdated would be lastModified time.
list(0) should be (makeAppInfo("new-app-complete", newAppComplete.getName(), 1L, 5L,
newAppComplete.lastModified(), "test", true))
list(1) should be (makeAppInfo("new-complete-lzf", newAppCompressedComplete.getName(),
1L, 4L, newAppCompressedComplete.lastModified(), "test", true))
// For Inprogress files, lastUpdated would be current loading time.
list(2) should be (makeAppInfo("new-incomplete", newAppIncomplete.getName(), 1L, -1L,
clock.getTimeMillis(), "test", false))
// Make sure the UI can be rendered.
list.foreach { info =>
val appUi = provider.getAppUI(info.id, None)
appUi should not be null
appUi should not be None
}
}
}
test("SPARK-3697: ignore files that cannot be read.") {
// setReadable(...) does not work on Windows. Please refer JDK-6728842.
assume(!Utils.isWindows)
class TestFsHistoryProvider extends FsHistoryProvider(createTestConf()) {
var doMergeApplicationListingCall = 0
override private[history] def doMergeApplicationListing(
reader: EventLogFileReader,
lastSeen: Long,
enableSkipToEnd: Boolean,
lastCompactionIndex: Option[Long]): Unit = {
super.doMergeApplicationListing(reader, lastSeen, enableSkipToEnd, lastCompactionIndex)
doMergeApplicationListingCall += 1
}
}
val provider = new TestFsHistoryProvider
val logFile1 = newLogFile("new1", None, inProgress = false)
writeFile(logFile1, None,
SparkListenerApplicationStart("app1-1", Some("app1-1"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
val logFile2 = newLogFile("new2", None, inProgress = false)
writeFile(logFile2, None,
SparkListenerApplicationStart("app1-2", Some("app1-2"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
logFile2.setReadable(false, false)
updateAndCheck(provider) { list =>
list.size should be (1)
}
provider.doMergeApplicationListingCall should be (1)
}
test("history file is renamed from inprogress to completed") {
val provider = new FsHistoryProvider(createTestConf())
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", None),
SparkListenerApplicationEnd(2L)
)
updateAndCheck(provider) { list =>
list.size should be (1)
provider.getAttempt("app1", None).logPath should endWith(EventLogFileWriter.IN_PROGRESS)
}
logFile1.renameTo(newLogFile("app1", None, inProgress = false))
updateAndCheck(provider) { list =>
list.size should be (1)
provider.getAttempt("app1", None).logPath should not endWith(EventLogFileWriter.IN_PROGRESS)
}
}
test("Parse logs that application is not started") {
val provider = new FsHistoryProvider(createTestConf())
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, None,
SparkListenerLogStart("1.4")
)
updateAndCheck(provider) { list =>
list.size should be (0)
}
}
test("SPARK-5582: empty log directory") {
val provider = new FsHistoryProvider(createTestConf())
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", None),
SparkListenerApplicationEnd(2L))
val oldLog = new File(testDir, "old1")
oldLog.mkdir()
provider.checkForLogs()
val appListAfterRename = provider.getListing()
appListAfterRename.size should be (1)
}
test("apps with multiple attempts with order") {
val provider = new FsHistoryProvider(createTestConf())
val attempt1 = newLogFile("app1", Some("attempt1"), inProgress = true)
writeFile(attempt1, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", Some("attempt1"))
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (1)
}
val attempt2 = newLogFile("app1", Some("attempt2"), inProgress = true)
writeFile(attempt2, None,
SparkListenerApplicationStart("app1", Some("app1"), 2L, "test", Some("attempt2"))
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (2)
list.head.attempts.head.attemptId should be (Some("attempt2"))
}
val attempt3 = newLogFile("app1", Some("attempt3"), inProgress = false)
writeFile(attempt3, None,
SparkListenerApplicationStart("app1", Some("app1"), 3L, "test", Some("attempt3")),
SparkListenerApplicationEnd(4L)
)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (3)
list.head.attempts.head.attemptId should be (Some("attempt3"))
}
val app2Attempt1 = newLogFile("app2", Some("attempt1"), inProgress = false)
writeFile(app2Attempt1, None,
SparkListenerApplicationStart("app2", Some("app2"), 5L, "test", Some("attempt1")),
SparkListenerApplicationEnd(6L)
)
updateAndCheck(provider) { list =>
list.size should be (2)
list.head.attempts.size should be (1)
list.last.attempts.size should be (3)
list.head.attempts.head.attemptId should be (Some("attempt1"))
list.foreach { app =>
app.attempts.foreach { attempt =>
val appUi = provider.getAppUI(app.id, attempt.attemptId)
appUi should not be null
}
}
}
}
test("log urls without customization") {
val conf = createTestConf()
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
execInfo -> execInfo.logUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected)
}
test("custom log urls, including FILE_NAME") {
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = true))
// some of available attributes are not used in pattern which should be OK
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
val attr = execInfo.attributes
val newLogUrlMap = attr("LOG_FILES").split(",").map { file =>
val newLogUrl = getExpectedExecutorLogUrl(attr, Some(file))
file -> newLogUrl
}.toMap
execInfo -> newLogUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected)
}
test("custom log urls, excluding FILE_NAME") {
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = false))
// some of available attributes are not used in pattern which should be OK
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
val attr = execInfo.attributes
val newLogUrl = getExpectedExecutorLogUrl(attr, None)
execInfo -> Map("log" -> newLogUrl)
}.toMap
testHandlingExecutorLogUrl(conf, expected)
}
test("custom log urls with invalid attribute") {
// Here we are referring {{NON_EXISTING}} which is not available in attributes,
// which Spark will fail back to provide origin log url with warning log.
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = true) +
"/{{NON_EXISTING}}")
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
execInfo -> execInfo.logUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected)
}
test("custom log urls, LOG_FILES not available while FILE_NAME is specified") {
// For this case Spark will fail back to provide origin log url with warning log.
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = true))
val executorInfos = (1 to 5).map(
createTestExecutorInfo("app1", "user1", _, includingLogFiles = false))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
execInfo -> execInfo.logUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected)
}
test("custom log urls, app not finished, applyIncompleteApplication: true") {
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = true))
.set(APPLY_CUSTOM_EXECUTOR_LOG_URL_TO_INCOMPLETE_APP, true)
// ensure custom log urls are applied to incomplete application
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
val attr = execInfo.attributes
val newLogUrlMap = attr("LOG_FILES").split(",").map { file =>
val newLogUrl = getExpectedExecutorLogUrl(attr, Some(file))
file -> newLogUrl
}.toMap
execInfo -> newLogUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected, isCompletedApp = false)
}
test("custom log urls, app not finished, applyIncompleteApplication: false") {
val conf = createTestConf()
.set(CUSTOM_EXECUTOR_LOG_URL, getCustomExecutorLogUrl(includeFileName = true))
.set(APPLY_CUSTOM_EXECUTOR_LOG_URL_TO_INCOMPLETE_APP, false)
// ensure custom log urls are NOT applied to incomplete application
val executorInfos = (1 to 5).map(createTestExecutorInfo("app1", "user1", _))
val expected: Map[ExecutorInfo, Map[String, String]] = executorInfos.map { execInfo =>
execInfo -> execInfo.logUrlMap
}.toMap
testHandlingExecutorLogUrl(conf, expected, isCompletedApp = false)
}
private def getCustomExecutorLogUrl(includeFileName: Boolean): String = {
val baseUrl = "http://newhost:9999/logs/clusters/{{CLUSTER_ID}}/users/{{USER}}/containers/" +
"{{CONTAINER_ID}}"
if (includeFileName) baseUrl + "/{{FILE_NAME}}" else baseUrl
}
private def getExpectedExecutorLogUrl(
attributes: Map[String, String],
fileName: Option[String]): String = {
val baseUrl = s"http://newhost:9999/logs/clusters/${attributes("CLUSTER_ID")}" +
s"/users/${attributes("USER")}/containers/${attributes("CONTAINER_ID")}"
fileName match {
case Some(file) => baseUrl + s"/$file"
case None => baseUrl
}
}
private def testHandlingExecutorLogUrl(
conf: SparkConf,
expectedLogUrlMap: Map[ExecutorInfo, Map[String, String]],
isCompletedApp: Boolean = true): Unit = {
val provider = new FsHistoryProvider(conf)
val attempt1 = newLogFile("app1", Some("attempt1"), inProgress = true)
val executorAddedEvents = expectedLogUrlMap.keys.zipWithIndex.map { case (execInfo, idx) =>
SparkListenerExecutorAdded(1 + idx, s"exec$idx", execInfo)
}.toList.sortBy(_.time)
val allEvents = List(SparkListenerApplicationStart("app1", Some("app1"), 1L,
"test", Some("attempt1"))) ++ executorAddedEvents ++
(if (isCompletedApp) List(SparkListenerApplicationEnd(1000L)) else Seq())
writeFile(attempt1, None, allEvents: _*)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (1)
list.foreach { app =>
app.attempts.foreach { attempt =>
val appUi = provider.getAppUI(app.id, attempt.attemptId)
appUi should not be null
val executors = appUi.get.ui.store.executorList(false).iterator
executors should not be null
val iterForExpectation = expectedLogUrlMap.iterator
var executorCount = 0
while (executors.hasNext) {
val executor = executors.next()
val (expectedExecInfo, expectedLogs) = iterForExpectation.next()
executor.hostPort should startWith(expectedExecInfo.executorHost)
executor.executorLogs should be(expectedLogs)
executorCount += 1
}
executorCount should be (expectedLogUrlMap.size)
}
}
}
}
test("log cleaner") {
val maxAge = TimeUnit.SECONDS.toMillis(10)
val clock = new ManualClock(maxAge / 2)
val provider = new FsHistoryProvider(
createTestConf().set(MAX_LOG_AGE_S.key, s"${maxAge}ms"), clock)
val log1 = newLogFile("app1", Some("attempt1"), inProgress = false)
writeFile(log1, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", Some("attempt1")),
SparkListenerApplicationEnd(2L)
)
log1.setLastModified(0L)
val log2 = newLogFile("app1", Some("attempt2"), inProgress = false)
writeFile(log2, None,
SparkListenerApplicationStart("app1", Some("app1"), 3L, "test", Some("attempt2")),
SparkListenerApplicationEnd(4L)
)
log2.setLastModified(clock.getTimeMillis())
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (2)
}
// Move the clock forward so log1 exceeds the max age.
clock.advance(maxAge)
updateAndCheck(provider) { list =>
list.size should be (1)
list.head.attempts.size should be (1)
list.head.attempts.head.attemptId should be (Some("attempt2"))
}
assert(!log1.exists())
// Do the same for the other log.
clock.advance(maxAge)
updateAndCheck(provider) { list =>
list.size should be (0)
}
assert(!log2.exists())
}
test("should not clean inprogress application with lastUpdated time less than maxTime") {
val firstFileModifiedTime = TimeUnit.DAYS.toMillis(1)
val secondFileModifiedTime = TimeUnit.DAYS.toMillis(6)
val maxAge = TimeUnit.DAYS.toMillis(7)
val clock = new ManualClock(0)
val provider = new FsHistoryProvider(
createTestConf().set(MAX_LOG_AGE_S, maxAge / 1000), clock)
val log = newLogFile("inProgressApp1", None, inProgress = true)
writeFile(log, None,
SparkListenerApplicationStart(
"inProgressApp1", Some("inProgressApp1"), 3L, "test", Some("attempt1"))
)
clock.setTime(firstFileModifiedTime)
log.setLastModified(clock.getTimeMillis())
provider.checkForLogs()
writeFile(log, None,
SparkListenerApplicationStart(
"inProgressApp1", Some("inProgressApp1"), 3L, "test", Some("attempt1")),
SparkListenerJobStart(0, 1L, Nil, null)
)
clock.setTime(secondFileModifiedTime)
log.setLastModified(clock.getTimeMillis())
provider.checkForLogs()
clock.setTime(TimeUnit.DAYS.toMillis(10))
writeFile(log, None,
SparkListenerApplicationStart(
"inProgressApp1", Some("inProgressApp1"), 3L, "test", Some("attempt1")),
SparkListenerJobStart(0, 1L, Nil, null),
SparkListenerJobEnd(0, 1L, JobSucceeded)
)
log.setLastModified(clock.getTimeMillis())
provider.checkForLogs()
// This should not trigger any cleanup
updateAndCheck(provider) { list =>
list.size should be(1)
}
}
test("log cleaner for inProgress files") {
val firstFileModifiedTime = TimeUnit.SECONDS.toMillis(10)
val secondFileModifiedTime = TimeUnit.SECONDS.toMillis(20)
val maxAge = TimeUnit.SECONDS.toMillis(40)
val clock = new ManualClock(0)
val provider = new FsHistoryProvider(
createTestConf().set(MAX_LOG_AGE_S.key, s"${maxAge}ms"), clock)
val log1 = newLogFile("inProgressApp1", None, inProgress = true)
writeFile(log1, None,
SparkListenerApplicationStart(
"inProgressApp1", Some("inProgressApp1"), 3L, "test", Some("attempt1"))
)
clock.setTime(firstFileModifiedTime)
provider.checkForLogs()
val log2 = newLogFile("inProgressApp2", None, inProgress = true)
writeFile(log2, None,
SparkListenerApplicationStart(
"inProgressApp2", Some("inProgressApp2"), 23L, "test2", Some("attempt2"))
)
clock.setTime(secondFileModifiedTime)
provider.checkForLogs()
// This should not trigger any cleanup
updateAndCheck(provider) { list =>
list.size should be(2)
}
// Should trigger cleanup for first file but not second one
clock.setTime(firstFileModifiedTime + maxAge + 1)
updateAndCheck(provider) { list =>
list.size should be(1)
}
assert(!log1.exists())
assert(log2.exists())
// Should cleanup the second file as well.
clock.setTime(secondFileModifiedTime + maxAge + 1)
updateAndCheck(provider) { list =>
list.size should be(0)
}
assert(!log1.exists())
assert(!log2.exists())
}
test("Event log copy") {
val provider = new FsHistoryProvider(createTestConf())
val logs = (1 to 2).map { i =>
val log = newLogFile("downloadApp1", Some(s"attempt$i"), inProgress = false)
writeFile(log, None,
SparkListenerApplicationStart(
"downloadApp1", Some("downloadApp1"), 5000L * i, "test", Some(s"attempt$i")),
SparkListenerApplicationEnd(5001L * i)
)
log
}
provider.checkForLogs()
(1 to 2).foreach { i =>
val underlyingStream = new ByteArrayOutputStream()
val outputStream = new ZipOutputStream(underlyingStream)
provider.writeEventLogs("downloadApp1", Some(s"attempt$i"), outputStream)
outputStream.close()
val inputStream = new ZipInputStream(new ByteArrayInputStream(underlyingStream.toByteArray))
var totalEntries = 0
var entry = inputStream.getNextEntry
entry should not be null
while (entry != null) {
val actual = new String(ByteStreams.toByteArray(inputStream), StandardCharsets.UTF_8)
val expected =
Files.toString(logs.find(_.getName == entry.getName).get, StandardCharsets.UTF_8)
actual should be (expected)
totalEntries += 1
entry = inputStream.getNextEntry
}
totalEntries should be (1)
inputStream.close()
}
}
test("driver log cleaner") {
val firstFileModifiedTime = TimeUnit.SECONDS.toMillis(10)
val secondFileModifiedTime = TimeUnit.SECONDS.toMillis(20)
val maxAge = TimeUnit.SECONDS.toSeconds(40)
val clock = new ManualClock(0)
val testConf = new SparkConf()
testConf.set(HISTORY_LOG_DIR, Utils.createTempDir(namePrefix = "eventLog").getAbsolutePath())
testConf.set(DRIVER_LOG_DFS_DIR, testDir.getAbsolutePath())
testConf.set(DRIVER_LOG_CLEANER_ENABLED, true)
testConf.set(DRIVER_LOG_CLEANER_INTERVAL, maxAge / 4)
testConf.set(MAX_DRIVER_LOG_AGE_S, maxAge)
val provider = new FsHistoryProvider(testConf, clock)
val log1 = FileUtils.getFile(testDir, "1" + DriverLogger.DRIVER_LOG_FILE_SUFFIX)
createEmptyFile(log1)
clock.setTime(firstFileModifiedTime)
log1.setLastModified(clock.getTimeMillis())
provider.cleanDriverLogs()
val log2 = FileUtils.getFile(testDir, "2" + DriverLogger.DRIVER_LOG_FILE_SUFFIX)
createEmptyFile(log2)
val log3 = FileUtils.getFile(testDir, "3" + DriverLogger.DRIVER_LOG_FILE_SUFFIX)
createEmptyFile(log3)
clock.setTime(secondFileModifiedTime)
log2.setLastModified(clock.getTimeMillis())
log3.setLastModified(clock.getTimeMillis())
// This should not trigger any cleanup
provider.cleanDriverLogs()
provider.listing.view(classOf[LogInfo]).iterator().asScala.toSeq.size should be(3)
// Should trigger cleanup for first file but not second one
clock.setTime(firstFileModifiedTime + TimeUnit.SECONDS.toMillis(maxAge) + 1)
provider.cleanDriverLogs()
provider.listing.view(classOf[LogInfo]).iterator().asScala.toSeq.size should be(2)
assert(!log1.exists())
assert(log2.exists())
assert(log3.exists())
// Update the third file length while keeping the original modified time
Files.write("Add logs to file".getBytes(), log3)
log3.setLastModified(secondFileModifiedTime)
// Should cleanup the second file but not the third file, as filelength changed.
clock.setTime(secondFileModifiedTime + TimeUnit.SECONDS.toMillis(maxAge) + 1)
provider.cleanDriverLogs()
provider.listing.view(classOf[LogInfo]).iterator().asScala.toSeq.size should be(1)
assert(!log1.exists())
assert(!log2.exists())
assert(log3.exists())
// Should cleanup the third file as well.
clock.setTime(secondFileModifiedTime + 2 * TimeUnit.SECONDS.toMillis(maxAge) + 2)
provider.cleanDriverLogs()
provider.listing.view(classOf[LogInfo]).iterator().asScala.toSeq.size should be(0)
assert(!log3.exists())
}
test("SPARK-8372: new logs with no app ID are ignored") {
val provider = new FsHistoryProvider(createTestConf())
// Write a new log file without an app id, to make sure it's ignored.
val logFile1 = newLogFile("app1", None, inProgress = true)
writeFile(logFile1, None,
SparkListenerLogStart("1.4")
)
updateAndCheck(provider) { list =>
list.size should be (0)
}
}
test("provider correctly checks whether fs is in safe mode") {
val provider = spy(new FsHistoryProvider(createTestConf()))
val dfs = mock(classOf[DistributedFileSystem])
// Asserts that safe mode is false because we can't really control the return value of the mock,
// since the API is different between hadoop 1 and 2.
assert(!provider.isFsInSafeMode(dfs))
}
test("provider waits for safe mode to finish before initializing") {
val clock = new ManualClock()
val provider = new SafeModeTestProvider(createTestConf(), clock)
val initThread = provider.initialize()
try {
provider.getConfig().keys should contain ("HDFS State")
clock.setTime(5000)
provider.getConfig().keys should contain ("HDFS State")
provider.inSafeMode = false
clock.setTime(10000)
eventually(timeout(3.second), interval(10.milliseconds)) {
provider.getConfig().keys should not contain ("HDFS State")
}
} finally {
provider.stop()
}
}
testRetry("provider reports error after FS leaves safe mode") {
testDir.delete()
val clock = new ManualClock()
val provider = new SafeModeTestProvider(createTestConf(), clock)
val errorHandler = mock(classOf[Thread.UncaughtExceptionHandler])
provider.startSafeModeCheckThread(Some(errorHandler))
try {
provider.inSafeMode = false
clock.setTime(10000)
eventually(timeout(3.second), interval(10.milliseconds)) {
verify(errorHandler).uncaughtException(any(), any())
}
} finally {
provider.stop()
}
}
test("ignore hidden files") {
// FsHistoryProvider should ignore hidden files. (It even writes out a hidden file itself
// that should be ignored).
// write out one totally bogus hidden file
val hiddenGarbageFile = new File(testDir, ".garbage")
Utils.tryWithResource(new PrintWriter(hiddenGarbageFile)) { out =>
// scalastyle:off println
out.println("GARBAGE")
// scalastyle:on println
}
// also write out one real event log file, but since its a hidden file, we shouldn't read it
val tmpNewAppFile = newLogFile("hidden", None, inProgress = false)
val hiddenNewAppFile = new File(tmpNewAppFile.getParentFile, "." + tmpNewAppFile.getName)
tmpNewAppFile.renameTo(hiddenNewAppFile)
// and write one real file, which should still get picked up just fine
val newAppComplete = newLogFile("real-app", None, inProgress = false)
writeFile(newAppComplete, None,
SparkListenerApplicationStart(newAppComplete.getName(), Some("new-app-complete"), 1L, "test",
None),
SparkListenerApplicationEnd(5L)
)
val provider = new FsHistoryProvider(createTestConf())
updateAndCheck(provider) { list =>
list.size should be (1)
list(0).name should be ("real-app")
}
}
test("support history server ui admin acls") {
def createAndCheck(conf: SparkConf, properties: (String, String)*)
(checkFn: SecurityManager => Unit): Unit = {
// Empty the testDir for each test.
if (testDir.exists() && testDir.isDirectory) {
testDir.listFiles().foreach { f => if (f.isFile) f.delete() }
}
var provider: FsHistoryProvider = null
try {
provider = new FsHistoryProvider(conf)
val log = newLogFile("app1", Some("attempt1"), inProgress = false)
writeFile(log, None,
SparkListenerApplicationStart("app1", Some("app1"), System.currentTimeMillis(),
"test", Some("attempt1")),
SparkListenerEnvironmentUpdate(Map(
"Spark Properties" -> properties.toSeq,
"Hadoop Properties" -> Seq.empty,
"JVM Information" -> Seq.empty,
"System Properties" -> Seq.empty,
"Classpath Entries" -> Seq.empty
)),
SparkListenerApplicationEnd(System.currentTimeMillis()))
provider.checkForLogs()
val appUi = provider.getAppUI("app1", Some("attempt1"))
assert(appUi.nonEmpty)
val securityManager = appUi.get.ui.securityManager
checkFn(securityManager)
} finally {
if (provider != null) {
provider.stop()
}
}
}
// Test both history ui admin acls and application acls are configured.
val conf1 = createTestConf()
.set(HISTORY_SERVER_UI_ACLS_ENABLE, true)
.set(HISTORY_SERVER_UI_ADMIN_ACLS, Seq("user1", "user2"))
.set(HISTORY_SERVER_UI_ADMIN_ACLS_GROUPS, Seq("group1"))
.set(USER_GROUPS_MAPPING, classOf[TestGroupsMappingProvider].getName)
createAndCheck(conf1, (ADMIN_ACLS.key, "user"), (ADMIN_ACLS_GROUPS.key, "group")) {
securityManager =>
// Test whether user has permission to access UI.
securityManager.checkUIViewPermissions("user1") should be (true)
securityManager.checkUIViewPermissions("user2") should be (true)
securityManager.checkUIViewPermissions("user") should be (true)
securityManager.checkUIViewPermissions("abc") should be (false)
// Test whether user with admin group has permission to access UI.
securityManager.checkUIViewPermissions("user3") should be (true)
securityManager.checkUIViewPermissions("user4") should be (true)
securityManager.checkUIViewPermissions("user5") should be (true)
securityManager.checkUIViewPermissions("user6") should be (false)
}
// Test only history ui admin acls are configured.
val conf2 = createTestConf()
.set(HISTORY_SERVER_UI_ACLS_ENABLE, true)
.set(HISTORY_SERVER_UI_ADMIN_ACLS, Seq("user1", "user2"))
.set(HISTORY_SERVER_UI_ADMIN_ACLS_GROUPS, Seq("group1"))
.set(USER_GROUPS_MAPPING, classOf[TestGroupsMappingProvider].getName)
createAndCheck(conf2) { securityManager =>
// Test whether user has permission to access UI.
securityManager.checkUIViewPermissions("user1") should be (true)
securityManager.checkUIViewPermissions("user2") should be (true)
// Check the unknown "user" should return false
securityManager.checkUIViewPermissions("user") should be (false)
// Test whether user with admin group has permission to access UI.
securityManager.checkUIViewPermissions("user3") should be (true)
securityManager.checkUIViewPermissions("user4") should be (true)
// Check the "user5" without mapping relation should return false
securityManager.checkUIViewPermissions("user5") should be (false)
}
// Test neither history ui admin acls nor application acls are configured.
val conf3 = createTestConf()
.set(HISTORY_SERVER_UI_ACLS_ENABLE, true)
.set(USER_GROUPS_MAPPING, classOf[TestGroupsMappingProvider].getName)
createAndCheck(conf3) { securityManager =>
// Test whether user has permission to access UI.
securityManager.checkUIViewPermissions("user1") should be (false)
securityManager.checkUIViewPermissions("user2") should be (false)
securityManager.checkUIViewPermissions("user") should be (false)
// Test whether user with admin group has permission to access UI.
// Check should be failed since we don't have acl group settings.
securityManager.checkUIViewPermissions("user3") should be (false)
securityManager.checkUIViewPermissions("user4") should be (false)
securityManager.checkUIViewPermissions("user5") should be (false)
}
}
test("mismatched version discards old listing") {
val conf = createTestConf()
val oldProvider = new FsHistoryProvider(conf)
val logFile1 = newLogFile("app1", None, inProgress = false)
writeFile(logFile1, None,
SparkListenerLogStart("2.3"),
SparkListenerApplicationStart("test", Some("test"), 1L, "test", None),
SparkListenerApplicationEnd(5L)
)
updateAndCheck(oldProvider) { list =>
list.size should be (1)
}
assert(oldProvider.listing.count(classOf[ApplicationInfoWrapper]) === 1)
// Manually overwrite the version in the listing db; this should cause the new provider to
// discard all data because the versions don't match.
val meta = new FsHistoryProviderMetadata(FsHistoryProvider.CURRENT_LISTING_VERSION + 1,
AppStatusStore.CURRENT_VERSION, conf.get(LOCAL_STORE_DIR).get)
oldProvider.listing.setMetadata(meta)
oldProvider.stop()
val mistatchedVersionProvider = new FsHistoryProvider(conf)
assert(mistatchedVersionProvider.listing.count(classOf[ApplicationInfoWrapper]) === 0)
}
test("invalidate cached UI") {
val provider = new FsHistoryProvider(createTestConf())
val appId = "new1"
// Write an incomplete app log.
val appLog = newLogFile(appId, None, inProgress = true)
writeFile(appLog, None,
SparkListenerApplicationStart(appId, Some(appId), 1L, "test", None)
)
provider.checkForLogs()
// Load the app UI.
val oldUI = provider.getAppUI(appId, None)
assert(oldUI.isDefined)
intercept[NoSuchElementException] {
oldUI.get.ui.store.job(0)
}
// Add more info to the app log, and trigger the provider to update things.
writeFile(appLog, None,
SparkListenerApplicationStart(appId, Some(appId), 1L, "test", None),
SparkListenerJobStart(0, 1L, Nil, null)
)
provider.checkForLogs()
// Manually detach the old UI; ApplicationCache would do this automatically in a real SHS
// when the app's UI was requested.
provider.onUIDetached(appId, None, oldUI.get.ui)
// Load the UI again and make sure we can get the new info added to the logs.
val freshUI = provider.getAppUI(appId, None)
assert(freshUI.isDefined)
assert(freshUI != oldUI)
freshUI.get.ui.store.job(0)
}
test("clean up stale app information") {
withTempDir { storeDir =>
val conf = createTestConf().set(LOCAL_STORE_DIR, storeDir.getAbsolutePath())
val clock = new ManualClock()
val provider = spy(new FsHistoryProvider(conf, clock))
val appId = "new1"
// Write logs for two app attempts.
clock.advance(1)
val attempt1 = newLogFile(appId, Some("1"), inProgress = false)
writeFile(attempt1, None,
SparkListenerApplicationStart(appId, Some(appId), 1L, "test", Some("1")),
SparkListenerJobStart(0, 1L, Nil, null),
SparkListenerApplicationEnd(5L)
)
val attempt2 = newLogFile(appId, Some("2"), inProgress = false)
writeFile(attempt2, None,
SparkListenerApplicationStart(appId, Some(appId), 1L, "test", Some("2")),
SparkListenerJobStart(0, 1L, Nil, null),
SparkListenerApplicationEnd(5L)
)
updateAndCheck(provider) { list =>
assert(list.size === 1)
assert(list(0).id === appId)
assert(list(0).attempts.size === 2)
}
// Load the app's UI.
val ui = provider.getAppUI(appId, Some("1"))
assert(ui.isDefined)
// Delete the underlying log file for attempt 1 and rescan. The UI should go away, but since
// attempt 2 still exists, listing data should be there.
clock.advance(1)
attempt1.delete()
updateAndCheck(provider) { list =>
assert(list.size === 1)
assert(list(0).id === appId)
assert(list(0).attempts.size === 1)
}
assert(!ui.get.valid)
assert(provider.getAppUI(appId, None) === None)
// Delete the second attempt's log file. Now everything should go away.
clock.advance(1)
attempt2.delete()
updateAndCheck(provider) { list =>
assert(list.isEmpty)
}
}
}
test("SPARK-21571: clean up removes invalid history files") {
val clock = new ManualClock()
val conf = createTestConf().set(MAX_LOG_AGE_S.key, s"2d")
val provider = new FsHistoryProvider(conf, clock)
// Create 0-byte size inprogress and complete files
var logCount = 0
var validLogCount = 0
val emptyInProgress = newLogFile("emptyInprogressLogFile", None, inProgress = true)
emptyInProgress.createNewFile()
emptyInProgress.setLastModified(clock.getTimeMillis())
logCount += 1
val slowApp = newLogFile("slowApp", None, inProgress = true)
slowApp.createNewFile()
slowApp.setLastModified(clock.getTimeMillis())
logCount += 1
val emptyFinished = newLogFile("emptyFinishedLogFile", None, inProgress = false)
emptyFinished.createNewFile()
emptyFinished.setLastModified(clock.getTimeMillis())
logCount += 1
// Create an incomplete log file, has an end record but no start record.
val corrupt = newLogFile("nonEmptyCorruptLogFile", None, inProgress = false)
writeFile(corrupt, None, SparkListenerApplicationEnd(0))
corrupt.setLastModified(clock.getTimeMillis())
logCount += 1
provider.checkForLogs()
provider.cleanLogs()
assert(new File(testDir.toURI).listFiles().size === logCount)
// Move the clock forward 1 day and scan the files again. They should still be there.
clock.advance(TimeUnit.DAYS.toMillis(1))
provider.checkForLogs()
provider.cleanLogs()
assert(new File(testDir.toURI).listFiles().size === logCount)
// Update the slow app to contain valid info. Code should detect the change and not clean
// it up.
writeFile(slowApp, None,
SparkListenerApplicationStart(slowApp.getName(), Some(slowApp.getName()), 1L, "test", None))
slowApp.setLastModified(clock.getTimeMillis())
validLogCount += 1
// Move the clock forward another 2 days and scan the files again. This time the cleaner should
// pick up the invalid files and get rid of them.
clock.advance(TimeUnit.DAYS.toMillis(2))
provider.checkForLogs()
provider.cleanLogs()
assert(new File(testDir.toURI).listFiles().size === validLogCount)
}
test("always find end event for finished apps") {
// Create a log file where the end event is before the configure chunk to be reparsed at
// the end of the file. The correct listing should still be generated.
val log = newLogFile("end-event-test", None, inProgress = false)
writeFile(log, None,
Seq(
SparkListenerApplicationStart("end-event-test", Some("end-event-test"), 1L, "test", None),
SparkListenerEnvironmentUpdate(Map(
"Spark Properties" -> Seq.empty,
"Hadoop Properties" -> Seq.empty,
"JVM Information" -> Seq.empty,
"System Properties" -> Seq.empty,
"Classpath Entries" -> Seq.empty
)),
SparkListenerApplicationEnd(5L)
) ++ (1 to 1000).map { i => SparkListenerJobStart(i, i, Nil) }: _*)
val conf = createTestConf().set(END_EVENT_REPARSE_CHUNK_SIZE.key, s"1k")
val provider = new FsHistoryProvider(conf)
updateAndCheck(provider) { list =>
assert(list.size === 1)
assert(list(0).attempts.size === 1)
assert(list(0).attempts(0).completed)
}
}
test("parse event logs with optimizations off") {
val conf = createTestConf()
.set(END_EVENT_REPARSE_CHUNK_SIZE, 0L)
.set(FAST_IN_PROGRESS_PARSING, false)
val provider = new FsHistoryProvider(conf)
val complete = newLogFile("complete", None, inProgress = false)
writeFile(complete, None,
SparkListenerApplicationStart("complete", Some("complete"), 1L, "test", None),
SparkListenerApplicationEnd(5L)
)
val incomplete = newLogFile("incomplete", None, inProgress = true)
writeFile(incomplete, None,
SparkListenerApplicationStart("incomplete", Some("incomplete"), 1L, "test", None)
)
updateAndCheck(provider) { list =>
list.size should be (2)
list.count(_.attempts.head.completed) should be (1)
}
}
test("SPARK-24948: ignore files we don't have read permission on") {
val clock = new ManualClock(1533132471)
val provider = new FsHistoryProvider(createTestConf(), clock)
val accessDenied = newLogFile("accessDenied", None, inProgress = false)
writeFile(accessDenied, None,
SparkListenerApplicationStart("accessDenied", Some("accessDenied"), 1L, "test", None))
val accessGranted = newLogFile("accessGranted", None, inProgress = false)
writeFile(accessGranted, None,
SparkListenerApplicationStart("accessGranted", Some("accessGranted"), 1L, "test", None),
SparkListenerApplicationEnd(5L))
var isReadable = false
val mockedFs = spy(provider.fs)
doThrow(new AccessControlException("Cannot read accessDenied file")).when(mockedFs).open(
argThat((path: Path) => path.getName.toLowerCase(Locale.ROOT) == "accessdenied" &&
!isReadable))
val mockedProvider = spy(provider)
when(mockedProvider.fs).thenReturn(mockedFs)
updateAndCheck(mockedProvider) { list =>
list.size should be(1)
}
// Doing 2 times in order to check the inaccessibleList filter too
updateAndCheck(mockedProvider) { list =>
list.size should be(1)
}
val accessDeniedPath = new Path(accessDenied.getPath)
assert(!mockedProvider.isAccessible(accessDeniedPath))
clock.advance(24 * 60 * 60 * 1000 + 1) // add a bit more than 1d
isReadable = true
mockedProvider.cleanLogs()
updateAndCheck(mockedProvider) { list =>
assert(mockedProvider.isAccessible(accessDeniedPath))
assert(list.exists(_.name == "accessDenied"))
assert(list.exists(_.name == "accessGranted"))
list.size should be(2)
}
}
test("check in-progress event logs absolute length") {
val path = new Path("testapp.inprogress")
val provider = new FsHistoryProvider(createTestConf())
val mockedProvider = spy(provider)
val mockedFs = mock(classOf[FileSystem])
val in = mock(classOf[FSDataInputStream])
val dfsIn = mock(classOf[DFSInputStream])
when(mockedProvider.fs).thenReturn(mockedFs)
when(mockedFs.open(path)).thenReturn(in)
when(in.getWrappedStream).thenReturn(dfsIn)
when(dfsIn.getFileLength).thenReturn(200)
// FileStatus.getLen is more than logInfo fileSize
var fileStatus = new FileStatus(200, false, 0, 0, 0, path)
when(mockedFs.getFileStatus(path)).thenReturn(fileStatus)
var logInfo = new LogInfo(path.toString, 0, LogType.EventLogs, Some("appId"),
Some("attemptId"), 100, None, None, false)
var reader = EventLogFileReader(mockedFs, path)
assert(reader.isDefined)
assert(mockedProvider.shouldReloadLog(logInfo, reader.get))
fileStatus = new FileStatus()
fileStatus.setPath(path)
when(mockedFs.getFileStatus(path)).thenReturn(fileStatus)
// DFSInputStream.getFileLength is more than logInfo fileSize
logInfo = new LogInfo(path.toString, 0, LogType.EventLogs, Some("appId"),
Some("attemptId"), 100, None, None, false)
reader = EventLogFileReader(mockedFs, path)
assert(reader.isDefined)
assert(mockedProvider.shouldReloadLog(logInfo, reader.get))
// DFSInputStream.getFileLength is equal to logInfo fileSize
logInfo = new LogInfo(path.toString, 0, LogType.EventLogs, Some("appId"),
Some("attemptId"), 200, None, None, false)
reader = EventLogFileReader(mockedFs, path)
assert(reader.isDefined)
assert(!mockedProvider.shouldReloadLog(logInfo, reader.get))
// in.getWrappedStream returns other than DFSInputStream
val bin = mock(classOf[BufferedInputStream])
when(in.getWrappedStream).thenReturn(bin)
reader = EventLogFileReader(mockedFs, path)
assert(reader.isDefined)
assert(!mockedProvider.shouldReloadLog(logInfo, reader.get))
// fs.open throws exception
when(mockedFs.open(path)).thenThrow(new IOException("Throwing intentionally"))
reader = EventLogFileReader(mockedFs, path)
assert(reader.isDefined)
assert(!mockedProvider.shouldReloadLog(logInfo, reader.get))
}
test("log cleaner with the maximum number of log files") {
val clock = new ManualClock(0)
(5 to 0 by -1).foreach { num =>
val log1_1 = newLogFile("app1", Some("attempt1"), inProgress = false)
writeFile(log1_1, None,
SparkListenerApplicationStart("app1", Some("app1"), 1L, "test", Some("attempt1")),
SparkListenerApplicationEnd(2L)
)
log1_1.setLastModified(2L)
val log2_1 = newLogFile("app2", Some("attempt1"), inProgress = false)
writeFile(log2_1, None,
SparkListenerApplicationStart("app2", Some("app2"), 3L, "test", Some("attempt1")),
SparkListenerApplicationEnd(4L)
)
log2_1.setLastModified(4L)
val log3_1 = newLogFile("app3", Some("attempt1"), inProgress = false)
writeFile(log3_1, None,
SparkListenerApplicationStart("app3", Some("app3"), 5L, "test", Some("attempt1")),
SparkListenerApplicationEnd(6L)
)
log3_1.setLastModified(6L)
val log1_2_incomplete = newLogFile("app1", Some("attempt2"), inProgress = false)
writeFile(log1_2_incomplete, None,
SparkListenerApplicationStart("app1", Some("app1"), 7L, "test", Some("attempt2"))
)
log1_2_incomplete.setLastModified(8L)
val log3_2 = newLogFile("app3", Some("attempt2"), inProgress = false)
writeFile(log3_2, None,
SparkListenerApplicationStart("app3", Some("app3"), 9L, "test", Some("attempt2")),
SparkListenerApplicationEnd(10L)
)
log3_2.setLastModified(10L)
val provider = new FsHistoryProvider(createTestConf().set(MAX_LOG_NUM.key, s"$num"), clock)
updateAndCheck(provider) { list =>
assert(log1_1.exists() == (num > 4))
assert(log1_2_incomplete.exists()) // Always exists for all configurations
assert(log2_1.exists() == (num > 3))
assert(log3_1.exists() == (num > 2))
assert(log3_2.exists() == (num > 2))
}
}
}
test("backwards compatibility with LogInfo from Spark 2.4") {
case class LogInfoV24(
logPath: String,
lastProcessed: Long,
appId: Option[String],
attemptId: Option[String],
fileSize: Long)
val oldObj = LogInfoV24("dummy", System.currentTimeMillis(), Some("hello"),
Some("attempt1"), 100)
val serializer = new KVStoreScalaSerializer()
val serializedOldObj = serializer.serialize(oldObj)
val deserializedOldObj = serializer.deserialize(serializedOldObj, classOf[LogInfo])
assert(deserializedOldObj.logPath === oldObj.logPath)
assert(deserializedOldObj.lastProcessed === oldObj.lastProcessed)
assert(deserializedOldObj.appId === oldObj.appId)
assert(deserializedOldObj.attemptId === oldObj.attemptId)
assert(deserializedOldObj.fileSize === oldObj.fileSize)
// SPARK-25118: added logType: LogType.Value - expected 'null' on old format
assert(deserializedOldObj.logType === null)
// SPARK-28869: added lastIndex: Option[Long], isComplete: Boolean - expected 'None' and
// 'false' on old format. The default value for isComplete is wrong value for completed app,
// but the value will be corrected once checkForLogs is called.
assert(deserializedOldObj.lastIndex === None)
assert(deserializedOldObj.isComplete === false)
}
test("SPARK-29755 LogInfo should be serialized/deserialized by jackson properly") {
def assertSerDe(serializer: KVStoreScalaSerializer, info: LogInfo): Unit = {
val infoAfterSerDe = serializer.deserialize(serializer.serialize(info), classOf[LogInfo])
assert(infoAfterSerDe === info)
assertOptionAfterSerde(infoAfterSerDe.lastIndex, info.lastIndex)
}
val serializer = new KVStoreScalaSerializer()
val logInfoWithIndexAsNone = LogInfo("dummy", 0, LogType.EventLogs, Some("appId"),
Some("attemptId"), 100, None, None, false)
assertSerDe(serializer, logInfoWithIndexAsNone)
val logInfoWithIndex = LogInfo("dummy", 0, LogType.EventLogs, Some("appId"),
Some("attemptId"), 100, Some(3), None, false)
assertSerDe(serializer, logInfoWithIndex)
}
test("SPARK-29755 AttemptInfoWrapper should be serialized/deserialized by jackson properly") {
def assertSerDe(serializer: KVStoreScalaSerializer, attempt: AttemptInfoWrapper): Unit = {
val attemptAfterSerDe = serializer.deserialize(serializer.serialize(attempt),
classOf[AttemptInfoWrapper])
assert(attemptAfterSerDe.info === attempt.info)
// skip comparing some fields, as they've not triggered SPARK-29755
assertOptionAfterSerde(attemptAfterSerDe.lastIndex, attempt.lastIndex)
}
val serializer = new KVStoreScalaSerializer()
val appInfo = new ApplicationAttemptInfo(None, new Date(1), new Date(1), new Date(1),
10, "spark", false, "dummy")
val attemptInfoWithIndexAsNone = new AttemptInfoWrapper(appInfo, "dummyPath", 10, None,
None, None, None, None)
assertSerDe(serializer, attemptInfoWithIndexAsNone)
val attemptInfoWithIndex = new AttemptInfoWrapper(appInfo, "dummyPath", 10, Some(1),
None, None, None, None)
assertSerDe(serializer, attemptInfoWithIndex)
}
test("SPARK-29043: clean up specified event log") {
val clock = new ManualClock()
val conf = createTestConf().set(MAX_LOG_AGE_S, 0L).set(CLEANER_ENABLED, true)
val provider = new FsHistoryProvider(conf, clock)
// create an invalid application log file
val inValidLogFile = newLogFile("inValidLogFile", None, inProgress = true)
inValidLogFile.createNewFile()
writeFile(inValidLogFile, None,
SparkListenerApplicationStart(inValidLogFile.getName, None, 1L, "test", None))
inValidLogFile.setLastModified(clock.getTimeMillis())
// create a valid application log file
val validLogFile = newLogFile("validLogFile", None, inProgress = true)
validLogFile.createNewFile()
writeFile(validLogFile, None,
SparkListenerApplicationStart(validLogFile.getName, Some("local_123"), 1L, "test", None))
validLogFile.setLastModified(clock.getTimeMillis())
provider.checkForLogs()
// The invalid application log file would be cleaned by checkAndCleanLog().
assert(new File(testDir.toURI).listFiles().size === 1)
clock.advance(1)
// cleanLogs() would clean the valid application log file.
provider.cleanLogs()
assert(new File(testDir.toURI).listFiles().size === 0)
}
private def assertOptionAfterSerde(opt: Option[Long], expected: Option[Long]): Unit = {
if (expected.isEmpty) {
assert(opt.isEmpty)
} else {
// The issue happens only when the value in Option is being unboxed. Here we ensure unboxing
// to Long succeeds: even though IDE suggests `.toLong` is redundant, direct comparison
// doesn't trigger unboxing and passes even without SPARK-29755, so don't remove
// `.toLong` below. Please refer SPARK-29755 for more details.
assert(opt.get.toLong === expected.get.toLong)
}
}
test("compact event log files") {
def verifyEventLogFiles(
fs: FileSystem,
rootPath: String,
expectedIndexForCompact: Option[Long],
expectedIndicesForNonCompact: Seq[Long]): Unit = {
val reader = EventLogFileReader(fs, new Path(rootPath)).get
var logFiles = reader.listEventLogFiles
expectedIndexForCompact.foreach { idx =>
val headFile = logFiles.head
assert(EventLogFileWriter.isCompacted(headFile.getPath))
assert(idx == RollingEventLogFilesWriter.getEventLogFileIndex(headFile.getPath.getName))
logFiles = logFiles.drop(1)
}
assert(logFiles.size === expectedIndicesForNonCompact.size)
logFiles.foreach { logFile =>
assert(RollingEventLogFilesWriter.isEventLogFile(logFile))
assert(!EventLogFileWriter.isCompacted(logFile.getPath))
}
val indices = logFiles.map { logFile =>
RollingEventLogFilesWriter.getEventLogFileIndex(logFile.getPath.getName)
}
assert(expectedIndicesForNonCompact === indices)
}
withTempDir { dir =>
val conf = createTestConf()
conf.set(HISTORY_LOG_DIR, dir.getAbsolutePath)
conf.set(EVENT_LOG_ROLLING_MAX_FILES_TO_RETAIN, 1)
conf.set(EVENT_LOG_COMPACTION_SCORE_THRESHOLD, 0.0d)
val hadoopConf = SparkHadoopUtil.newConfiguration(conf)
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val provider = new FsHistoryProvider(conf)
val writer = new RollingEventLogFilesWriter("app", None, dir.toURI, conf, hadoopConf)
writer.start()
// writing event log file 1 - don't compact for now
writeEventsToRollingWriter(writer, Seq(
SparkListenerApplicationStart("app", Some("app"), 0, "user", None),
SparkListenerJobStart(1, 0, Seq.empty)), rollFile = false)
updateAndCheck(provider) { _ =>
verifyEventLogFiles(fs, writer.logPath, None, Seq(1))
val info = provider.listing.read(classOf[LogInfo], writer.logPath)
assert(info.lastEvaluatedForCompaction === Some(1))
}
// writing event log file 2 - compact the event log file 1 into 1.compact
writeEventsToRollingWriter(writer, Seq.empty, rollFile = true)
writeEventsToRollingWriter(writer, Seq(SparkListenerUnpersistRDD(1),
SparkListenerJobEnd(1, 1, JobSucceeded)), rollFile = false)
updateAndCheck(provider) { _ =>
verifyEventLogFiles(fs, writer.logPath, Some(1), Seq(2))
val info = provider.listing.read(classOf[LogInfo], writer.logPath)
assert(info.lastEvaluatedForCompaction === Some(2))
}
// writing event log file 3 - compact two files - 1.compact & 2 into one, 2.compact
writeEventsToRollingWriter(writer, Seq.empty, rollFile = true)
writeEventsToRollingWriter(writer, Seq(
SparkListenerExecutorAdded(3, "exec1", new ExecutorInfo("host1", 1, Map.empty)),
SparkListenerJobStart(2, 4, Seq.empty),
SparkListenerJobEnd(2, 5, JobSucceeded)), rollFile = false)
writer.stop()
updateAndCheck(provider) { _ =>
verifyEventLogFiles(fs, writer.logPath, Some(2), Seq(3))
val info = provider.listing.read(classOf[LogInfo], writer.logPath)
assert(info.lastEvaluatedForCompaction === Some(3))
val store = new InMemoryStore
val appStore = new AppStatusStore(store)
val reader = EventLogFileReader(fs, new Path(writer.logPath)).get
provider.rebuildAppStore(store, reader, 0L)
// replayed store doesn't have any job, as events for job are removed while compacting
intercept[NoSuchElementException] {
appStore.job(1)
}
// but other events should be available even they were in original files to compact
val appInfo = appStore.applicationInfo()
assert(appInfo.id === "app")
assert(appInfo.name === "app")
// All events in retained file(s) should be available, including events which would have
// been filtered out if compaction is applied. e.g. finished jobs, removed executors, etc.
val exec1 = appStore.executorSummary("exec1")
assert(exec1.hostPort === "host1")
val job2 = appStore.job(2)
assert(job2.status === JobExecutionStatus.SUCCEEDED)
}
}
}
/**
* Asks the provider to check for logs and calls a function to perform checks on the updated
* app list. Example:
*
* updateAndCheck(provider) { list =>
* // asserts
* }
*/
private def updateAndCheck(provider: FsHistoryProvider)
(checkFn: Seq[ApplicationInfo] => Unit): Unit = {
provider.checkForLogs()
provider.cleanLogs()
checkFn(provider.getListing().toSeq)
}
private def writeFile(file: File, codec: Option[CompressionCodec],
events: SparkListenerEvent*) = {
val fstream = new FileOutputStream(file)
val cstream = codec.map(_.compressedContinuousOutputStream(fstream)).getOrElse(fstream)
val bstream = new BufferedOutputStream(cstream)
val metadata = SparkListenerLogStart(org.apache.spark.SPARK_VERSION)
val eventJson = JsonProtocol.logStartToJson(metadata)
val metadataJson = compact(eventJson) + "\\n"
bstream.write(metadataJson.getBytes(StandardCharsets.UTF_8))
val writer = new OutputStreamWriter(bstream, StandardCharsets.UTF_8)
Utils.tryWithSafeFinally {
events.foreach(e => writer.write(compact(render(JsonProtocol.sparkEventToJson(e))) + "\\n"))
} {
writer.close()
}
}
private def createEmptyFile(file: File) = {
new FileOutputStream(file).close()
}
private def createTestConf(inMemory: Boolean = false): SparkConf = {
val conf = new SparkConf()
.set(HISTORY_LOG_DIR, testDir.getAbsolutePath())
.set(FAST_IN_PROGRESS_PARSING, true)
if (!inMemory) {
conf.set(LOCAL_STORE_DIR, Utils.createTempDir().getAbsolutePath())
}
conf
}
private def createTestExecutorInfo(
appId: String,
user: String,
executorSeqNum: Int,
includingLogFiles: Boolean = true): ExecutorInfo = {
val host = s"host$executorSeqNum"
val container = s"container$executorSeqNum"
val cluster = s"cluster$executorSeqNum"
val logUrlPrefix = s"http://$host:8888/$appId/$container/origin"
val executorLogUrlMap = Map("stdout" -> s"$logUrlPrefix/stdout",
"stderr" -> s"$logUrlPrefix/stderr")
val extraAttributes = if (includingLogFiles) Map("LOG_FILES" -> "stdout,stderr") else Map.empty
val executorAttributes = Map("CONTAINER_ID" -> container, "CLUSTER_ID" -> cluster,
"USER" -> user) ++ extraAttributes
new ExecutorInfo(host, 1, executorLogUrlMap, executorAttributes)
}
private class SafeModeTestProvider(conf: SparkConf, clock: Clock)
extends FsHistoryProvider(conf, clock) {
@volatile var inSafeMode = true
// Skip initialization so that we can manually start the safe mode check thread.
private[history] override def initialize(): Thread = null
private[history] override def isFsInSafeMode(): Boolean = inSafeMode
}
}
class TestGroupsMappingProvider extends GroupMappingServiceProvider {
private val mappings = Map(
"user3" -> "group1",
"user4" -> "group1",
"user5" -> "group")
override def getGroups(username: String): Set[String] = {
mappings.get(username).map(Set(_)).getOrElse(Set.empty)
}
}
| dbtsai/spark | core/src/test/scala/org/apache/spark/deploy/history/FsHistoryProviderSuite.scala | Scala | apache-2.0 | 60,870 |
package org.ferrit.core.crawler
import org.ferrit.core.parser.ParserResult
import org.ferrit.core.uri.{CrawlUri, FetchJob}
import org.ferrit.core.model.CrawlJob
import org.ferrit.core.http.Response
object FetchMessages {
// Messages about decision to fetch or not
sealed abstract class CanFetch {
override def toString = this.getClass.getSimpleName.replaceAll("\\\\$","")
}
object SeenAlready extends CanFetch
object OkayToFetch extends CanFetch
object UriFilterRejected extends CanFetch
object RobotsExcluded extends CanFetch
object UnsupportedScheme extends CanFetch
case class FetchDecision(uri: CrawlUri, decision: CanFetch)
case class FetchQueued(f: FetchJob)
case class FetchScheduled(f: FetchJob, delayMs: Long)
case class FetchGo(f: FetchJob)
case class FetchResponse(uri: CrawlUri, statusCode:Int)
case class FetchError(uri: CrawlUri, t: Throwable)
/**
* A FetchResult is the primary deliverable of a fetch operation.
* The ParserResult is an Option because the fetch result needs to
* encapsulate a notion of fetch failure or inability to parse.
*/
case class FetchResult(
// The HTTP response status code
// is not the Response itself, to avoid memory leaks by handlers
// although this could be changed later
statusCode: Int,
fetchJob: FetchJob,
crawlJob: CrawlJob,
response: Response,
// Overall duration of the fetch
duration: Long,
// Parser result is Option because it is None if the fetch failed
parserResult: Option[ParserResult]
)
case class DepthLimit(f: FetchJob)
} | reggoodwin/ferrit | src/main/scala/org/ferrit/core/crawler/FetchMessages.scala | Scala | mit | 1,602 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.utils.geohash
import com.vividsolutions.jts.geom.Point
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class GeomDistanceTest extends Specification with GeomDistance {
import Distance._
"1 kilometer" should {
"equate to 1000 meters" in {
val a : Double = 1 kilometer
val b : Double = 1000 meter
a must beCloseTo(b, 1e-6)
}
}
"Reference test Flinders Peak, AUS" should {
"be 54972.271 meters from Buninyong, AUS" in {
val ptFlindersPeak = WKTUtils.read("POINT(144.4248678889 -37.9510334167)").asInstanceOf[Point]
val ptBuninyong = WKTUtils.read("POINT(143.9264955278 -37.6528211389)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptFlindersPeak, ptBuninyong).getDistanceInMeters must beCloseTo(54972.271, 0.01)
}
}
"CCRi local" should {
"be 433.5 meters from Albemarle High School" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptAHS = WKTUtils.read("POINT(-78.5002901 38.0754152)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptAHS).getDistanceInMeters must beCloseTo(433.5, 0.01)
}
}
"CCRi remote" should {
"be 11422838.3 meters from Mount Potts, NZ" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptEdoras = WKTUtils.read("POINT(170.919998 -43.498299)").asInstanceOf[Point]
VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptEdoras).getDistanceInMeters must beCloseTo(14301344.142, 0.01)
}
}
"New Mexico" should {
"be 2300000 meters from CCRI" in {
val ptCCRI = WKTUtils.read("POINT(-78.4953560 38.0752150)").asInstanceOf[Point]
val ptNM = VincentyModel.moveWithBearingAndDistance(ptCCRI, -90.0, 2300000)
ptNM.getX must beCloseTo(-104.060, 0.01)
ptNM.getY must beCloseTo(35.236, 0.01)
val dist = VincentyModel.getDistanceBetweenTwoPoints(ptCCRI, ptNM).getDistanceInMeters
dist must beCloseTo(2300000, 0.1)
}
}
}
| jwkessi/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/geohash/GeomDistanceTest.scala | Scala | apache-2.0 | 2,820 |
val limit = (1 to 1000).find(n => n + math.sqrt(n) >= 1000).get
val rs = for (b <- 2 until limit; a <- 1 until b; c = 1000 - a - b
if a * a + b * b == c * c) yield a * b * c
val r = rs.head
assert(r == 31875000) // 32 ms
| pavelfatin/projecteuler | p09.scala | Scala | gpl-3.0 | 238 |
package com.filez.astyanax.multientity.play2
import javax.persistence.PersistenceException
import play.api._
import scala.collection.JavaConversions.asScalaSet
import impl._
class MultiEntityPlugin(app:Application) extends Plugin {
/** start of context configuration hierarchy */
val CONTEXTS = "multientity.context"
/** start of entity manager configuration hierarchy */
val MANAGERS = "multientity.manager"
/** plugin is enabled if configuration is found */
override
def enabled = {
if ( app.configuration.getObject(CONTEXTS).isEmpty ) {
Logger.warn("MultiEntity plugin disabled because there is no context configuration")
false
} else if ( app.configuration.getObject(MANAGERS).isEmpty ) {
Logger.warn("MultiEntity plugin disabled because there is no entity manager configuration")
false
} else
true
}
override
def onStart() {
Logger.info("multi entity plugin - on start hook")
val contextRoot = app.configuration.getObject(CONTEXTS).get.toConfig()
val managerRoot = app.configuration.getObject(MANAGERS).get.toConfig()
// dump context arguments
Logger.info("configuration context keys:")
for ( kv <- contextRoot.entrySet() ) {
Logger.info(s" config key ${kv.getKey()}")
}
Logger.info("Creating contexts:")
getPrimaryKeys( contextRoot.entrySet().map( kv => kv.getKey()))
.foreach { contextName =>
AstyanaxContextRegistry += Tuple2(contextName,
ContextBuilder(contextRoot.getObject(contextName).toConfig())
)
Logger.info(s" context $contextName created")
}
Logger.info("Creating Entity managers:")
getPrimaryKeys( managerRoot.entrySet().map( kv => kv.getKey()))
.foreach { managerName =>
val factory = FactoryBuilder(managerRoot.getObject(managerName).toConfig())
MultiEntityFactoryRegistry += Tuple2(managerName, factory)
Logger.info(s" entity manager $managerName created: ${factory.get()} ")
}
// create and/or truncate storage on startup
MultiEntityFactoryRegistry.foreach{ case (name,factory) =>
if (factory.createStorage)
factory.get().createStorage(None)
if (factory.truncateStorage)
factory.get().truncateStorage()
}
}
/** extract set of primary keys. Primary key is first part from
primarykey.setting configuration settings
*/
private def getPrimaryKeys(keys:Traversable[String]):Set[String] = {
var rc = Set[String]()
val rx = """^(\\p{Alpha}\\p{Alnum}*)\\.\\p{Alpha}+$""".r
keys.foreach { k =>
k match {
case rx(key) => rc = rc + key
}
}
rc
}
override
def onStop() {
Logger.info("multi entity plugin - on stop hook")
// delete storage
MultiEntityManagerCache.filter{
case (name, manager) => manager.factory.deleteStorage
}
.foreach{
case (name, manager) => manager.deleteStorage()
}
MultiEntityFactoryRegistry.clear()
AstyanaxContextRegistry.clear()
MultiEntityManagerCache.clear()
}
} | hsn10/multiplugin | src/main/scala/Plugin.scala | Scala | agpl-3.0 | 3,115 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.trees
import java.util.UUID
import scala.collection.{mutable, Map}
import scala.reflect.ClassTag
import org.apache.commons.lang3.ClassUtils
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.ScalaReflection._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType, FunctionResource}
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.JoinType
import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, Partitioning}
import org.apache.spark.sql.catalyst.util.StringUtils.PlanStringConcat
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
/** Used by [[TreeNode.getNodeNumbered]] when traversing the tree for a given number */
private class MutableInt(var i: Int)
case class Origin(
line: Option[Int] = None,
startPosition: Option[Int] = None)
/**
* Provides a location for TreeNodes to ask about the context of their origin. For example, which
* line of code is currently being parsed.
*/
object CurrentOrigin {
private val value = new ThreadLocal[Origin]() {
override def initialValue: Origin = Origin()
}
def get: Origin = value.get()
def set(o: Origin): Unit = value.set(o)
def reset(): Unit = value.set(Origin())
def setPosition(line: Int, start: Int): Unit = {
value.set(
value.get.copy(line = Some(line), startPosition = Some(start)))
}
def withOrigin[A](o: Origin)(f: => A): A = {
set(o)
val ret = try f finally { reset() }
ret
}
}
// A tag of a `TreeNode`, which defines name and type
case class TreeNodeTag[T](name: String)
// scalastyle:off
abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
// scalastyle:on
self: BaseType =>
val origin: Origin = CurrentOrigin.get
/**
* A mutable map for holding auxiliary information of this tree node. It will be carried over
* when this node is copied via `makeCopy`, or transformed via `transformUp`/`transformDown`.
*/
private val tags: mutable.Map[TreeNodeTag[_], Any] = mutable.Map.empty
protected def copyTagsFrom(other: BaseType): Unit = {
tags ++= other.tags
}
def setTagValue[T](tag: TreeNodeTag[T], value: T): Unit = {
tags(tag) = value
}
def getTagValue[T](tag: TreeNodeTag[T]): Option[T] = {
tags.get(tag).map(_.asInstanceOf[T])
}
/**
* Returns a Seq of the children of this node.
* Children should not change. Immutability required for containsChild optimization
*/
def children: Seq[BaseType]
lazy val containsChild: Set[TreeNode[_]] = children.toSet
private lazy val _hashCode: Int = scala.util.hashing.MurmurHash3.productHash(this)
override def hashCode(): Int = _hashCode
/**
* Faster version of equality which short-circuits when two treeNodes are the same instance.
* We don't just override Object.equals, as doing so prevents the scala compiler from
* generating case class `equals` methods
*/
def fastEquals(other: TreeNode[_]): Boolean = {
this.eq(other) || this == other
}
/**
* Find the first [[TreeNode]] that satisfies the condition specified by `f`.
* The condition is recursively applied to this node and all of its children (pre-order).
*/
def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) {
Some(this)
} else {
children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
}
/**
* Runs the given function on this node and then recursively on [[children]].
* @param f the function to be applied to each node in the tree.
*/
def foreach(f: BaseType => Unit): Unit = {
f(this)
children.foreach(_.foreach(f))
}
/**
* Runs the given function recursively on [[children]] then on this node.
* @param f the function to be applied to each node in the tree.
*/
def foreachUp(f: BaseType => Unit): Unit = {
children.foreach(_.foreachUp(f))
f(this)
}
/**
* Returns a Seq containing the result of applying the given function to each
* node in this tree in a preorder traversal.
* @param f the function to be applied.
*/
def map[A](f: BaseType => A): Seq[A] = {
val ret = new collection.mutable.ArrayBuffer[A]()
foreach(ret += f(_))
ret
}
/**
* Returns a Seq by applying a function to all nodes in this tree and using the elements of the
* resulting collections.
*/
def flatMap[A](f: BaseType => TraversableOnce[A]): Seq[A] = {
val ret = new collection.mutable.ArrayBuffer[A]()
foreach(ret ++= f(_))
ret
}
/**
* Returns a Seq containing the result of applying a partial function to all elements in this
* tree on which the function is defined.
*/
def collect[B](pf: PartialFunction[BaseType, B]): Seq[B] = {
val ret = new collection.mutable.ArrayBuffer[B]()
val lifted = pf.lift
foreach(node => lifted(node).foreach(ret.+=))
ret
}
/**
* Returns a Seq containing the leaves in this tree.
*/
def collectLeaves(): Seq[BaseType] = {
this.collect { case p if p.children.isEmpty => p }
}
/**
* Finds and returns the first [[TreeNode]] of the tree for which the given partial function
* is defined (pre-order), and applies the partial function to it.
*/
def collectFirst[B](pf: PartialFunction[BaseType, B]): Option[B] = {
val lifted = pf.lift
lifted(this).orElse {
children.foldLeft(Option.empty[B]) { (l, r) => l.orElse(r.collectFirst(pf)) }
}
}
/**
* Efficient alternative to `productIterator.map(f).toArray`.
*/
protected def mapProductIterator[B: ClassTag](f: Any => B): Array[B] = {
val arr = Array.ofDim[B](productArity)
var i = 0
while (i < arr.length) {
arr(i) = f(productElement(i))
i += 1
}
arr
}
/**
* Returns a copy of this node with the children replaced.
* TODO: Validate somewhere (in debug mode?) that children are ordered correctly.
*/
def withNewChildren(newChildren: Seq[BaseType]): BaseType = {
assert(newChildren.size == children.size, "Incorrect number of children")
var changed = false
val remainingNewChildren = newChildren.toBuffer
val remainingOldChildren = children.toBuffer
def mapTreeNode(node: TreeNode[_]): TreeNode[_] = {
val newChild = remainingNewChildren.remove(0)
val oldChild = remainingOldChildren.remove(0)
if (newChild fastEquals oldChild) {
oldChild
} else {
changed = true
newChild
}
}
def mapChild(child: Any): Any = child match {
case arg: TreeNode[_] if containsChild(arg) => mapTreeNode(arg)
// CaseWhen Case or any tuple type
case (left, right) => (mapChild(left), mapChild(right))
case nonChild: AnyRef => nonChild
case null => null
}
val newArgs = mapProductIterator {
case s: StructType => s // Don't convert struct types to some other type of Seq[StructField]
// Handle Seq[TreeNode] in TreeNode parameters.
case s: Stream[_] =>
// Stream is lazy so we need to force materialization
s.map(mapChild).force
case s: Seq[_] =>
s.map(mapChild)
case m: Map[_, _] =>
// `mapValues` is lazy and we need to force it to materialize
m.mapValues(mapChild).view.force
case arg: TreeNode[_] if containsChild(arg) => mapTreeNode(arg)
case Some(child) => Some(mapChild(child))
case nonChild: AnyRef => nonChild
case null => null
}
if (changed) makeCopy(newArgs) else this
}
/**
* Returns a copy of this node where `rule` has been recursively applied to the tree.
* When `rule` does not apply to a given node it is left unchanged.
* Users should not expect a specific directionality. If a specific directionality is needed,
* transformDown or transformUp should be used.
*
* @param rule the function use to transform this nodes children
*/
def transform(rule: PartialFunction[BaseType, BaseType]): BaseType = {
transformDown(rule)
}
/**
* Returns a copy of this node where `rule` has been recursively applied to it and all of its
* children (pre-order). When `rule` does not apply to a given node it is left unchanged.
*
* @param rule the function used to transform this nodes children
*/
def transformDown(rule: PartialFunction[BaseType, BaseType]): BaseType = {
val afterRule = CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(this, identity[BaseType])
}
// Check if unchanged and then possibly return old copy to avoid gc churn.
if (this fastEquals afterRule) {
mapChildren(_.transformDown(rule))
} else {
// If the transform function replaces this node with a new one, carry over the tags.
afterRule.tags ++= this.tags
afterRule.mapChildren(_.transformDown(rule))
}
}
/**
* Returns a copy of this node where `rule` has been recursively applied first to all of its
* children and then itself (post-order). When `rule` does not apply to a given node, it is left
* unchanged.
*
* @param rule the function use to transform this nodes children
*/
def transformUp(rule: PartialFunction[BaseType, BaseType]): BaseType = {
val afterRuleOnChildren = mapChildren(_.transformUp(rule))
val newNode = if (this fastEquals afterRuleOnChildren) {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(this, identity[BaseType])
}
} else {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(afterRuleOnChildren, identity[BaseType])
}
}
// If the transform function replaces this node with a new one, carry over the tags.
newNode.tags ++= this.tags
newNode
}
/**
* Returns a copy of this node where `f` has been applied to all the nodes in `children`.
*/
def mapChildren(f: BaseType => BaseType): BaseType = {
if (children.nonEmpty) {
mapChildren(f, forceCopy = false)
} else {
this
}
}
/**
* Returns a copy of this node where `f` has been applied to all the nodes in `children`.
* @param f The transform function to be applied on applicable `TreeNode` elements.
* @param forceCopy Whether to force making a copy of the nodes even if no child has been changed.
*/
private def mapChildren(
f: BaseType => BaseType,
forceCopy: Boolean): BaseType = {
var changed = false
def mapChild(child: Any): Any = child match {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case tuple @ (arg1: TreeNode[_], arg2: TreeNode[_]) =>
val newChild1 = if (containsChild(arg1)) {
f(arg1.asInstanceOf[BaseType])
} else {
arg1.asInstanceOf[BaseType]
}
val newChild2 = if (containsChild(arg2)) {
f(arg2.asInstanceOf[BaseType])
} else {
arg2.asInstanceOf[BaseType]
}
if (forceCopy || !(newChild1 fastEquals arg1) || !(newChild2 fastEquals arg2)) {
changed = true
(newChild1, newChild2)
} else {
tuple
}
case other => other
}
val newArgs = mapProductIterator {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case Some(arg: TreeNode[_]) if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
Some(newChild)
} else {
Some(arg)
}
case m: Map[_, _] => m.mapValues {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case other => other
}.view.force // `mapValues` is lazy and we need to force it to materialize
case d: DataType => d // Avoid unpacking Structs
case args: Stream[_] => args.map(mapChild).force // Force materialization on stream
case args: Iterable[_] => args.map(mapChild)
case nonChild: AnyRef => nonChild
case null => null
}
if (forceCopy || changed) makeCopy(newArgs, forceCopy) else this
}
/**
* Args to the constructor that should be copied, but not transformed.
* These are appended to the transformed args automatically by makeCopy
* @return
*/
protected def otherCopyArgs: Seq[AnyRef] = Nil
/**
* Creates a copy of this type of tree node after a transformation.
* Must be overridden by child classes that have constructor arguments
* that are not present in the productIterator.
* @param newArgs the new product arguments.
*/
def makeCopy(newArgs: Array[AnyRef]): BaseType = makeCopy(newArgs, allowEmptyArgs = false)
/**
* Creates a copy of this type of tree node after a transformation.
* Must be overridden by child classes that have constructor arguments
* that are not present in the productIterator.
* @param newArgs the new product arguments.
* @param allowEmptyArgs whether to allow argument list to be empty.
*/
private def makeCopy(
newArgs: Array[AnyRef],
allowEmptyArgs: Boolean): BaseType = attachTree(this, "makeCopy") {
// Skip no-arg constructors that are just there for kryo.
val ctors = getClass.getConstructors.filter(allowEmptyArgs || _.getParameterTypes.size != 0)
if (ctors.isEmpty) {
sys.error(s"No valid constructor for $nodeName")
}
val allArgs: Array[AnyRef] = if (otherCopyArgs.isEmpty) {
newArgs
} else {
newArgs ++ otherCopyArgs
}
val defaultCtor = ctors.find { ctor =>
if (ctor.getParameterTypes.length != allArgs.length) {
false
} else if (allArgs.contains(null)) {
// if there is a `null`, we can't figure out the class, therefore we should just fallback
// to older heuristic
false
} else {
val argsArray: Array[Class[_]] = allArgs.map(_.getClass)
ClassUtils.isAssignable(argsArray, ctor.getParameterTypes, true /* autoboxing */)
}
}.getOrElse(ctors.maxBy(_.getParameterTypes.length)) // fall back to older heuristic
try {
CurrentOrigin.withOrigin(origin) {
val res = defaultCtor.newInstance(allArgs.toArray: _*).asInstanceOf[BaseType]
res.copyTagsFrom(this)
res
}
} catch {
case e: java.lang.IllegalArgumentException =>
throw new TreeNodeException(
this,
s"""
|Failed to copy node.
|Is otherCopyArgs specified correctly for $nodeName.
|Exception message: ${e.getMessage}
|ctor: $defaultCtor?
|types: ${newArgs.map(_.getClass).mkString(", ")}
|args: ${newArgs.mkString(", ")}
""".stripMargin)
}
}
override def clone(): BaseType = {
mapChildren(_.clone(), forceCopy = true)
}
/**
* Returns the name of this type of TreeNode. Defaults to the class name.
* Note that we remove the "Exec" suffix for physical operators here.
*/
def nodeName: String = getClass.getSimpleName.replaceAll("Exec$", "")
/**
* The arguments that should be included in the arg string. Defaults to the `productIterator`.
*/
protected def stringArgs: Iterator[Any] = productIterator
private lazy val allChildren: Set[TreeNode[_]] = (children ++ innerChildren).toSet[TreeNode[_]]
/** Returns a string representing the arguments to this node, minus any children */
def argString(maxFields: Int): String = stringArgs.flatMap {
case tn: TreeNode[_] if allChildren.contains(tn) => Nil
case Some(tn: TreeNode[_]) if allChildren.contains(tn) => Nil
case Some(tn: TreeNode[_]) => tn.simpleString(maxFields) :: Nil
case tn: TreeNode[_] => tn.simpleString(maxFields) :: Nil
case seq: Seq[Any] if seq.toSet.subsetOf(allChildren.asInstanceOf[Set[Any]]) => Nil
case iter: Iterable[_] if iter.isEmpty => Nil
case seq: Seq[_] => truncatedString(seq, "[", ", ", "]", maxFields) :: Nil
case set: Set[_] => truncatedString(set.toSeq, "{", ", ", "}", maxFields) :: Nil
case array: Array[_] if array.isEmpty => Nil
case array: Array[_] => truncatedString(array, "[", ", ", "]", maxFields) :: Nil
case null => Nil
case None => Nil
case Some(null) => Nil
case Some(any) => any :: Nil
case table: CatalogTable =>
table.storage.serde match {
case Some(serde) => table.identifier :: serde :: Nil
case _ => table.identifier :: Nil
}
case other => other :: Nil
}.mkString(", ")
/**
* ONE line description of this node.
* @param maxFields Maximum number of fields that will be converted to strings.
* Any elements beyond the limit will be dropped.
*/
def simpleString(maxFields: Int): String = {
s"$nodeName ${argString(maxFields)}".trim
}
/** ONE line description of this node with more information */
def verboseString(maxFields: Int): String
/** ONE line description of this node with some suffix information */
def verboseStringWithSuffix(maxFields: Int): String = verboseString(maxFields)
override def toString: String = treeString
/** Returns a string representation of the nodes in this tree */
final def treeString: String = treeString(verbose = true)
final def treeString(
verbose: Boolean,
addSuffix: Boolean = false,
maxFields: Int = SQLConf.get.maxToStringFields): String = {
val concat = new PlanStringConcat()
treeString(concat.append, verbose, addSuffix, maxFields)
concat.toString
}
def treeString(
append: String => Unit,
verbose: Boolean,
addSuffix: Boolean,
maxFields: Int): Unit = {
generateTreeString(0, Nil, append, verbose, "", addSuffix, maxFields)
}
/**
* Returns a string representation of the nodes in this tree, where each operator is numbered.
* The numbers can be used with [[TreeNode.apply]] to easily access specific subtrees.
*
* The numbers are based on depth-first traversal of the tree (with innerChildren traversed first
* before children).
*/
def numberedTreeString: String =
treeString.split("\\n").zipWithIndex.map { case (line, i) => f"$i%02d $line" }.mkString("\\n")
/**
* Returns the tree node at the specified number, used primarily for interactive debugging.
* Numbers for each node can be found in the [[numberedTreeString]].
*
* Note that this cannot return BaseType because logical plan's plan node might return
* physical plan for innerChildren, e.g. in-memory relation logical plan node has a reference
* to the physical plan node it is referencing.
*/
def apply(number: Int): TreeNode[_] = getNodeNumbered(new MutableInt(number)).orNull
/**
* Returns the tree node at the specified number, used primarily for interactive debugging.
* Numbers for each node can be found in the [[numberedTreeString]].
*
* This is a variant of [[apply]] that returns the node as BaseType (if the type matches).
*/
def p(number: Int): BaseType = apply(number).asInstanceOf[BaseType]
private def getNodeNumbered(number: MutableInt): Option[TreeNode[_]] = {
if (number.i < 0) {
None
} else if (number.i == 0) {
Some(this)
} else {
number.i -= 1
// Note that this traversal order must be the same as numberedTreeString.
innerChildren.map(_.getNodeNumbered(number)).find(_ != None).getOrElse {
children.map(_.getNodeNumbered(number)).find(_ != None).flatten
}
}
}
/**
* All the nodes that should be shown as a inner nested tree of this node.
* For example, this can be used to show sub-queries.
*/
protected def innerChildren: Seq[TreeNode[_]] = Seq.empty
/**
* Appends the string representation of this node and its children to the given Writer.
*
* The `i`-th element in `lastChildren` indicates whether the ancestor of the current node at
* depth `i + 1` is the last child of its own parent node. The depth of the root node is 0, and
* `lastChildren` for the root node should be empty.
*
* Note that this traversal (numbering) order must be the same as [[getNodeNumbered]].
*/
def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int): Unit = {
if (depth > 0) {
lastChildren.init.foreach { isLast =>
append(if (isLast) " " else ": ")
}
append(if (lastChildren.last) "+- " else ":- ")
}
val str = if (verbose) {
if (addSuffix) verboseStringWithSuffix(maxFields) else verboseString(maxFields)
} else {
simpleString(maxFields)
}
append(prefix)
append(str)
append("\\n")
if (innerChildren.nonEmpty) {
innerChildren.init.foreach(_.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ false, append, verbose,
addSuffix = addSuffix, maxFields = maxFields))
innerChildren.last.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ true, append, verbose,
addSuffix = addSuffix, maxFields = maxFields)
}
if (children.nonEmpty) {
children.init.foreach(_.generateTreeString(
depth + 1, lastChildren :+ false, append, verbose, prefix, addSuffix, maxFields))
children.last.generateTreeString(
depth + 1, lastChildren :+ true, append, verbose, prefix, addSuffix, maxFields)
}
}
/**
* Returns a 'scala code' representation of this `TreeNode` and its children. Intended for use
* when debugging where the prettier toString function is obfuscating the actual structure. In the
* case of 'pure' `TreeNodes` that only contain primitives and other TreeNodes, the result can be
* pasted in the REPL to build an equivalent Tree.
*/
def asCode: String = {
val args = productIterator.map {
case tn: TreeNode[_] => tn.asCode
case s: String => "\\"" + s + "\\""
case other => other.toString
}
s"$nodeName(${args.mkString(",")})"
}
def toJSON: String = compact(render(jsonValue))
def prettyJson: String = pretty(render(jsonValue))
private def jsonValue: JValue = {
val jsonValues = scala.collection.mutable.ArrayBuffer.empty[JValue]
def collectJsonValue(tn: BaseType): Unit = {
val jsonFields = ("class" -> JString(tn.getClass.getName)) ::
("num-children" -> JInt(tn.children.length)) :: tn.jsonFields
jsonValues += JObject(jsonFields)
tn.children.foreach(collectJsonValue)
}
collectJsonValue(this)
jsonValues
}
protected def jsonFields: List[JField] = {
val fieldNames = getConstructorParameterNames(getClass)
val fieldValues = productIterator.toSeq ++ otherCopyArgs
assert(fieldNames.length == fieldValues.length, s"${getClass.getSimpleName} fields: " +
fieldNames.mkString(", ") + s", values: " + fieldValues.mkString(", "))
fieldNames.zip(fieldValues).map {
// If the field value is a child, then use an int to encode it, represents the index of
// this child in all children.
case (name, value: TreeNode[_]) if containsChild(value) =>
name -> JInt(children.indexOf(value))
case (name, value: Seq[BaseType]) if value.forall(containsChild) =>
name -> JArray(
value.map(v => JInt(children.indexOf(v.asInstanceOf[TreeNode[_]]))).toList
)
case (name, value) => name -> parseToJson(value)
}.toList
}
private def parseToJson(obj: Any): JValue = obj match {
case b: Boolean => JBool(b)
case b: Byte => JInt(b.toInt)
case s: Short => JInt(s.toInt)
case i: Int => JInt(i)
case l: Long => JInt(l)
case f: Float => JDouble(f)
case d: Double => JDouble(d)
case b: BigInt => JInt(b)
case null => JNull
case s: String => JString(s)
case u: UUID => JString(u.toString)
case dt: DataType => dt.jsonValue
// SPARK-17356: In usage of mllib, Metadata may store a huge vector of data, transforming
// it to JSON may trigger OutOfMemoryError.
case m: Metadata => Metadata.empty.jsonValue
case clazz: Class[_] => JString(clazz.getName)
case s: StorageLevel =>
("useDisk" -> s.useDisk) ~ ("useMemory" -> s.useMemory) ~ ("useOffHeap" -> s.useOffHeap) ~
("deserialized" -> s.deserialized) ~ ("replication" -> s.replication)
case n: TreeNode[_] => n.jsonValue
case o: Option[_] => o.map(parseToJson)
// Recursive scan Seq[TreeNode], Seq[Partitioning], Seq[DataType]
case t: Seq[_] if t.forall(_.isInstanceOf[TreeNode[_]]) ||
t.forall(_.isInstanceOf[Partitioning]) || t.forall(_.isInstanceOf[DataType]) =>
JArray(t.map(parseToJson).toList)
case t: Seq[_] if t.length > 0 && t.head.isInstanceOf[String] =>
JString(truncatedString(t, "[", ", ", "]", SQLConf.get.maxToStringFields))
case t: Seq[_] => JNull
case m: Map[_, _] => JNull
// if it's a scala object, we can simply keep the full class path.
// TODO: currently if the class name ends with "$", we think it's a scala object, there is
// probably a better way to check it.
case obj if obj.getClass.getName.endsWith("$") => "object" -> obj.getClass.getName
case p: Product if shouldConvertToJson(p) =>
try {
val fieldNames = getConstructorParameterNames(p.getClass)
val fieldValues = p.productIterator.toSeq
assert(fieldNames.length == fieldValues.length, s"${getClass.getSimpleName} fields: " +
fieldNames.mkString(", ") + s", values: " + fieldValues.mkString(", "))
("product-class" -> JString(p.getClass.getName)) :: fieldNames.zip(fieldValues).map {
case (name, value) => name -> parseToJson(value)
}.toList
} catch {
case _: RuntimeException => null
}
case _ => JNull
}
private def shouldConvertToJson(product: Product): Boolean = product match {
case exprId: ExprId => true
case field: StructField => true
case id: TableIdentifier => true
case join: JoinType => true
case id: FunctionIdentifier => true
case spec: BucketSpec => true
case catalog: CatalogTable => true
case partition: Partitioning => true
case resource: FunctionResource => true
case broadcast: BroadcastMode => true
case table: CatalogTableType => true
case storage: CatalogStorageFormat => true
case _ => false
}
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala | Scala | apache-2.0 | 28,022 |
package com.sksamuel.scrimage.filter
import org.scalatest.{ OneInstancePerTest, BeforeAndAfter, FunSuite }
import com.sksamuel.scrimage.Image
/** @author Stephen Samuel */
class HsbFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = Image(getClass.getResourceAsStream("/bird_small.png"))
test("filter output matches expected") {
val expected = Image(getClass.getResourceAsStream("/com/sksamuel/scrimage/filters/bird_small_hue.png"))
assert(original.filter(HSBFilter(0.5)) === expected)
}
}
| davenatx/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/HsbFilterTest.scala | Scala | apache-2.0 | 545 |
/**
* The MIT License
*
* Copyright (c) 2011 Benjamin Klum
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.helgoboss.commons_scala
/**
* Represents a result of a matching operation. Used by [[org.helgoboss.commons_scala.PathPattern]].
*/
sealed trait MatchResult
/**
* Contains all possible match results.
*/
object MatchResult {
/**
* Returned if neither the given path matches the pattern nor any sub path can ever match it.
*/
case object NeverMatches extends MatchResult
/**
* Returned if the given path matches the pattern.
*/
case object Matches extends MatchResult
/**
* Returned if the given path doesn't match the pattern but if a sub path might match it.
*/
case object CanMatchSubPath extends MatchResult
/**
* Returned if the given path matches the pattern and every sub path would match it as well.
*/
case object WillMatchEverySubPath extends MatchResult
}
| helgoboss/commons-scala | src/main/scala/org/helgoboss/commons_scala/MatchResult.scala | Scala | mit | 1,966 |
import scala.tools.partest.ReplTest
object Test extends ReplTest {
def code = """
object x { def x={} }
import x._
x
x
"""
}
| folone/dotty | tests/pending/run/t5655.scala | Scala | bsd-3-clause | 130 |
package services
import com.github.jurajburian.mailer.{Content, Message}
import javax.mail.internet.InternetAddress
import model.User
class MessageTemplateService {
def createConfirmRegistrationMessage(user: User, appName: String, fromEmail: String, followLink: String): Message =
Message(
subject = "Confirm Email",
content = Content().html(s"""<p>Hi, ${user.username}!</p>
| <p>Welcome to $appName. Please click the following link to confirm your email:</p>
| <p><a href="$followLink">$followLink</a></p>""".stripMargin),
from = new InternetAddress(fromEmail),
to = Seq(new InternetAddress(user.email))
)
def createRecoverPasswordMessage(user: User, appName: String, fromEmail: String, followLink: String): Message =
Message(
subject = "Reset Password",
content = Content().html(s"""<p>Please click this link to reset your password:</p>
| <a href="$followLink">$followLink</a>""".stripMargin),
from = new InternetAddress(fromEmail),
to = Seq(new InternetAddress(user.email))
)
}
| sysgears/apollo-universal-starter-kit | modules/user/server-scala/src/main/scala/services/MessageTemplateService.scala | Scala | mit | 1,086 |
package views.html.b3.my
package object vertical {
import views.html.b3._
import play.twirl.api.Html
import play.api.mvc.Call
import play.api.i18n.MessagesProvider
import views.html.helper._
/**
* Declares the class for the Vertical FieldConstructor.
*/
class VerticalFieldConstructor(val withFeedbackIcons: Boolean = false) extends B3FieldConstructor {
/* Define the default class of the corresponding form */
val formClass = "form-my-vertical"
/* Renders the corresponding template of the field constructor */
def apply(fieldInfo: B3FieldInfo, inputHtml: Html)(implicit msgsProv: MessagesProvider) = bsFieldConstructor(fieldInfo, inputHtml)(this, msgsProv)
/* Renders the corresponding template of the form group */
def apply(contentHtml: Html, argsMap: Map[Symbol, Any])(implicit msgsProv: MessagesProvider) = bsFormGroup(contentHtml, argsMap)(msgsProv)
}
/**
* Creates a new VerticalFieldConstructor to use for specific forms or scopes (don't use it as a default one).
* If a default B3FieldConstructor and a specific VerticalFieldConstructor are within the same scope, the more
* specific will be chosen.
*/
val fieldConstructorSpecific: VerticalFieldConstructor = new VerticalFieldConstructor()
/**
* Returns it as a B3FieldConstructor to use it as default within a template
*/
val fieldConstructor: B3FieldConstructor = fieldConstructorSpecific
/**
* **********************************************************************************************************************************
* SHORTCUT HELPERS
* *********************************************************************************************************************************
*/
def form(action: Call, args: (Symbol, Any)*)(body: VerticalFieldConstructor => Html) =
views.html.b3.form(action, args: _*)(body(fieldConstructorSpecific))(fieldConstructorSpecific)
} | adrianhurt/play-bootstrap | play26-bootstrap3/sample/app/views/b3/my/vertical/package.scala | Scala | apache-2.0 | 1,925 |
package org.ensime.sexp.formats
import BigIntConvertor._
import org.scalacheck.{ Arbitrary, Gen }
import org.scalatest._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import scala.collection.immutable.BitSet
class BigIntConvertorSpec extends FunSpec {
private val examples = List(
BitSet() -> BigInt(0),
BitSet(0) -> BigInt(1),
BitSet(1) -> BigInt(2),
BitSet(64) -> BigInt("18446744073709551616"),
BitSet(0, 64) -> BigInt("18446744073709551617"),
BitSet(1, 64) -> BigInt("18446744073709551618")
)
it("should convert basic BigSet to BitInt") {
examples foreach {
case (bitset, bigint) => assert(fromBitSet(bitset) === bigint)
}
}
it("should convert basic BigInt to BitSet") {
examples foreach {
case (bitset, bigint) => assert(toBitSet(bigint) === bitset)
}
}
}
class BigIntConvertorCheck extends FunSpec with GeneratorDrivenPropertyChecks {
def positiveIntStream: Arbitrary[Stream[Int]] = Arbitrary {
Gen.containerOf[Stream, Int](Gen.chooseNum(0, 2 * Short.MaxValue))
}
implicit def arbitraryBitSet: Arbitrary[BitSet] = Arbitrary {
for (seq <- positiveIntStream.arbitrary) yield BitSet(seq: _*)
}
it("should round-trip BigInt <=> BitSet") {
forAll { (bigint: BigInt) =>
whenever(bigint >= 0) {
// the exact rules for which negative numbers are allowed
// seems to be quite complex, but certainly it is sometimes
// valid.
assert(fromBitSet(toBitSet(bigint)) === bigint)
}
}
}
it("should round-trip BitSet <=> BigInt") {
forAll { (bitset: BitSet) =>
assert(toBitSet(fromBitSet(bitset)) === bitset)
}
}
}
| eddsteel/ensime | sexpress/src/test/scala/org/ensime/sexp/formats/BigIntConvertorSpec.scala | Scala | gpl-3.0 | 1,676 |
package com.sksamuel.elastic4s
package admin
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequestBuilder
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequestBuilder
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequestBuilder
/** @author Stephen Samuel
*
* DSL Syntax:
*
* repository create <repo> settings <settings>
* snapshot create <name> in <repo>
* snapshot delete <name> in <repo>
* snapshot restore <name> from <repo>
*
*/
trait SnapshotDsl {
def repository = RepositoryPrefix
object RepositoryPrefix {
def create(name: String) = new CreateRepositoryExpectsType(name)
}
class CreateRepositoryExpectsType(name: String) {
def `type`(`type`: String) = new CreateRepositoryDefinition(name, `type`)
}
def snapshot = SnapshotPrefix
object SnapshotPrefix {
def create(name: String) = new CreateSnapshotExpectsIn(name)
def restore(name: String) = new RestoreSnapshotExpectsFrom(name)
def delete(name: String) = new DeleteSnapshotExpectsIn(name)
}
class CreateSnapshotExpectsIn(name: String) {
def in(repo: String) = new CreateSnapshotDefinition(name, repo)
}
class RestoreSnapshotExpectsFrom(name: String) {
def from(repo: String) = new RestoreSnapshotDefinition(name, repo)
}
class DeleteSnapshotExpectsIn(name: String) {
def in(repo: String) = new DeleteSnapshotDefinition(name, repo)
}
}
class CreateRepositoryDefinition(name: String, `type`: String) {
val request = new PutRepositoryRequestBuilder(ProxyClients.cluster, name).setType(`type`)
def build = request.request()
def settings(map: Map[String, AnyRef]): this.type = {
import scala.collection.JavaConverters._
request.setSettings(map.asJava)
this
}
}
class DeleteSnapshotDefinition(name: String, repo: String) {
val request = new DeleteSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
}
class CreateSnapshotDefinition(name: String, repo: String) {
val request = new CreateSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
def partial(p: Boolean): this.type = {
request.setPartial(p)
this
}
def includeGlobalState(global: Boolean): this.type = {
request.setIncludeGlobalState(global)
this
}
def waitForCompletion(waitForCompletion: Boolean): this.type = {
request.setWaitForCompletion(waitForCompletion)
this
}
def index(index: String): this.type = {
request.setIndices(index)
this
}
def indexes(indexes: String*): this.type = {
request.setIndices(indexes: _*)
this
}
def settings(map: Map[String, AnyRef]): this.type = {
import scala.collection.JavaConverters._
request.setSettings(map.asJava)
this
}
}
class RestoreSnapshotDefinition(name: String, repo: String) {
val request = new RestoreSnapshotRequestBuilder(ProxyClients.cluster, repo, name)
def build = request.request()
def restoreGlobalState(global: Boolean): this.type = {
request.setRestoreGlobalState(global)
this
}
def waitForCompletion(waitForCompletion: Boolean): this.type = {
request.setWaitForCompletion(waitForCompletion)
this
}
def index(index: String): this.type = {
request.setIndices(index)
this
}
def indexes(indexes: String*): this.type = {
request.setIndices(indexes: _*)
this
}
def settings(map: Map[String, AnyRef]): this.type = {
import scala.collection.JavaConverters._
request.setSettings(map.asJava)
this
}
} | maxcom/elastic4s | src/main/scala/com/sksamuel/elastic4s/admin/SnapshotDsl.scala | Scala | apache-2.0 | 3,688 |
package com.dataintuitive
package object luciuscore {
type Value = Double
type ValueVector = Array[Value]
/**
* A RankVector is just an array of Ranks, being Double values.
*
* Please note that we wrap signatures but not RankVectors because we can't afford
* the overhead when running in a distributed way.
*/
type Rank = Double
type RankVector = Array[Rank]
type Index = Int
/**
* A generic representation of a row in a datafile
*/
type Row = Array[Option[String]]
}
| data-intuitive/LuciusCore | src/main/scala/com/dataintuitive/luciuscore/package.scala | Scala | apache-2.0 | 511 |
object SCL5247 extends App {
var msg = "hello"
val msgUpdater = msg_= _ // This is red with type aware highlighting
/*start*/msgUpdater("bye")/*end*/
println(msg)
}
//Unit | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL5247.scala | Scala | apache-2.0 | 184 |
package org.jetbrains.plugins.scala
package lang.refactoring.ui
import com.intellij.psi.PsiElement
import java.util
import com.intellij.refactoring.ui.AbstractMemberSelectionPanel
import java.awt.BorderLayout
import javax.swing.JScrollPane
import com.intellij.ui.{SeparatorFactory, ScrollPaneFactory}
import org.jetbrains.plugins.scala.lang.refactoring.ui.ScalaMemberSelectionTableBase
/**
* Nikolay.Tropin
* 8/20/13
*/
abstract class ScalaMemberSelectionPanelBase[M <: PsiElement, I <: ScalaMemberInfoBase[M]](title: String,
memberInfo: util.List[I],
abstractColumnHeader: String)
extends AbstractMemberSelectionPanel[M, I] {
setLayout(new BorderLayout)
private val myTable = createMemberSelectionTable(memberInfo, abstractColumnHeader)
val scrollPane: JScrollPane = ScrollPaneFactory.createScrollPane(myTable)
add(SeparatorFactory.createSeparator(title, myTable), BorderLayout.NORTH)
add(scrollPane, BorderLayout.CENTER)
def createMemberSelectionTable(memberInfos: util.List[I], abstractColumnHeader: String): ScalaMemberSelectionTableBase[M, I]
def getTable: ScalaMemberSelectionTableBase[M, I] = myTable
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/refactoring/ui/ScalaMemberSelectionPanelBase.scala | Scala | apache-2.0 | 1,323 |
package boatcraft.compatibility
import boatcraft.compatibility.ironchest.{IronChestsEventHandler, IronChestsGuiHandler}
import boatcraft.core.GUIHandler
import cpw.mods.fml.common.event.FMLPreInitializationEvent
import net.minecraftforge.common.MinecraftForge
import boatcraft.compatibility.ironchest.modifiers.blocks._
import boatcraft.api.modifiers.Block
import boatcraft.core.BoatCraft
import boatcraft.compatibility.ironchest.packets.IronChestSyncMessage
import cpw.mods.fml.relauncher.Side
import boatcraft.core.utilities.Helper
import cpw.mods.fml.common.Optional
object IronChests extends CompatModule {
@Optional.Method(modid = "IronChest")
override protected def doPreInit(e: FMLPreInitializationEvent) {
GUIHandler.handlerMap.put(code, IronChestsGuiHandler)
MinecraftForge.EVENT_BUS register IronChestsEventHandler
BoatCraft.channel.registerMessage(classOf[GenericIronChest.MessageHandler], classOf[IronChestSyncMessage],
Helper.Packet.getNextID, Side.CLIENT)
}
@Optional.Method(modid = "IronChest")
override protected def getBlocks = Array[Block](
Iron_Chest,
Gold_Chest,
Diamond_Chest,
Copper_Chest,
Silver_Chest,
Crystal_Chest,
Obsidian_Chest,
DirtChest9000)
} | Open-Code-Developers/BoatCraft | src/main/scala/boatcraft/compatibility/IronChests.scala | Scala | mit | 1,227 |
package nounou
//import breeze.linalg.DenseVector
import java.util.ServiceLoader
import com.google.gson.Gson
import nounou.elements.NNElement
import nounou.elements.data.NNData
import nounou.elements.data.filters.NNDataFilterMedianSubtract
import nounou.io.FileLoader
import nounou.elements.ranges._
import breeze.linalg.DenseVector
import nounou.util.{LoggingExt, NNGit}
/**A static class which encapsulates convenience functions for using nounou, with
* an emphasis on use from Mathematica/MatLab/Java
* @author ktakagaki
* //@date 2/17/14.
*/
object NN extends LoggingExt {
override final def toString(): String =
"Welcome to nounou, a Scala/Java adapter for neurophysiological data.\\n" +
NNGit.infoPrintout
def load(fileName: String): Array[NNElement] = FileLoader.load(fileName)
def load(fileNames: Array[String]): Array[NNElement] = FileLoader.load(fileNames)
// <editor-fold defaultstate="collapsed" desc=" options ">
def OptNull() = nounou.OptNull
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" frame ranges ">
final def SampleRange(start: Int, last: Int, step: Int, segment: Int) = new SampleRange(start, last, step, segment)
final def SampleRangeReal(start: Int, last: Int, step: Int, segment: Int) = new SampleRangeReal(start, last, step, segment)
final def SampleRangeValid(start: Int, last: Int, step: Int, segment: Int) = new SampleRangeValid(start, last, step, segment)
//The following are deprecated due to the ambiguity between step and segment variables
// final def SampleRange(start: Int, last: Int, step: Int) = new SampleRange(start, last, step, -1)
// final def SampleRange(start: Int, last: Int, segment: Int) = new SampleRange(start, last, -1, segment)
// final def SampleRange(start: Int, last: Int) = new SampleRange(start, last, -1, -1)
final def SampleRange( range: (Int, Int) ) = new SampleRange(range._1, range._2, -1, -1)
final def SampleRange( range: (Int, Int), segment: Int) = new SampleRange(range._1, range._2, -1, segment)
final def SampleRange( range: (Int, Int, Int) ) = new SampleRange(range._1, range._2, range._3, -1)
final def SampleRange( range: (Int, Int, Int), segment: Int ) = new SampleRange(range._1, range._2, range._3, segment)
final def SampleRange( range: Array[Int], segment: Int ): SampleRangeSpecifier =
nounou.elements.ranges.SampleRange.convertArrayToSampleRange(range, segment)
final def SampleRange( range: Array[Int] ): SampleRangeSpecifier = SampleRange( range, -1 )
final def SampleRangeAll(step: Int, segment: Int) = new SampleRangeAll(step, segment)
final def SampleRangeAll() = new SampleRangeAll(1, -1)
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" RangeTs ">
final def SampleRangeTs(startTs: Long, endTS: Long, stepTS: Long): SampleRangeTS =
new SampleRangeTS(startTs, endTS, stepTS)
final def FrameRangeTs(startTs: Long, endTS: Long): SampleRangeTS =
new SampleRangeTS(startTs, endTS, -1L)
// final def RangeTs(stamps: Array[Long], preTS: Long, postTS: Long): Array[ranges.RangeTs] =
// stamps.map( (s: Long) => ranges.RangeTs(s-preTS, s+postTS) )
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" filters ">
def filterMedianSubtract(data: NNData) = new NNDataFilterMedianSubtract(data)
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" toArray methods ">
def toArray(denseVector: DenseVector[Long]) = breeze.util.JavaArrayOps.dvToArray(denseVector)
// def toArray(xSpike: XSpike) = XSpike.toArray( xSpike )
// def toArray(xSpikes: Array[XSpike]) = XSpike.toArray( xSpikes )
// </editor-fold>
// def readSpikes(xData: XData, channels: Array[Int], xFrames: Array[Frame], length: Int, trigger: Int) =
// data.XSpike.readSpikes(xData, channels, xFrames, length, trigger)
// def readSpike(xData: XData, channels: Array[Int], xFrame: Frame, length: Int, trigger: Int) =
// data.XSpike.readSpike(xData, channels, xFrame, length, trigger)
//
// //final def XTrodes( trodeGroup: Array[Array[Int]] ): XTrodes = data.XTrodes( trodeGroup )
// final def XTrodeN( trodeGroup: Array[Int] ): NNTrodeN = new elements.NNTrodeN( trodeGroup.toVector )
}
//final def XSpikes(waveformLength: Int, xTrodes: XTrodes ) = new XSpikes(waveformLength, xTrodes)
// final def newNNData: NNData = new NNData
// // <editor-fold defaultstate="collapsed" desc=" RangeTsEvent ">
//
// def RangeTsEvent(eventTs: Long, preFrames: Int, postFrames: Int) =
// ranges.RangeTsEvent(eventTs, preFrames, postFrames)
//
// def RangeTsEvent(eventTs: Array[Long], preFrames: Int, postFrames: Int) =
// ranges.RangeTsEvent(eventTs, preFrames, postFrames)
//
// // </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" RangeMs ">
//// final def RangeMs(startMs: Double, lastMs: Double, stepMs: Double, optSegment: OptSegment) =
//// ranges.RangeMs(startMs, lastMs, stepMs, optSegment)
// final def RangeMs(startMs: Double, lastMs: Double, stepMs: Double) =
// ranges.RangeMs(startMs, lastMs, stepMs)
//// final def RangeMs(startMs: Double, lastMs: Double, optSegment: OptSegment) =
//// ranges.RangeMs(startMs, lastMs, optSegment)
// final def RangeMs(startMs: Double, lastMs: Double)=
// ranges.RangeMs(startMs, lastMs)
// </editor-fold>
// <editor-fold defaultstate="collapsed" desc=" RangeMsEvent ">
// final def RangeMsEvent(eventMs: Double, preMs: Double, postMs: Double, stepMs: Double, optSegment: OptSegment) =
// ranges.RangeMsEvent(eventMs, preMs, postMs, stepMs, optSegment)
// final def RangeMsEvent(eventMs: Double, preMs: Double, postMs: Double, optSegment: OptSegment) =
// ranges.RangeMsEvent(eventMs, preMs, postMs, optSegment)
// final def RangeMsEvent(eventMs: Double, preMs: Double, postMs: Double, stepMs: Double) =
// ranges.RangeMsEvent(eventMs, preMs, postMs, stepMs)
// final def RangeMsEvent(eventMs: Double, preMs: Double, postMs: Double) =
// ranges.RangeMsEvent(eventMs, preMs, postMs)
//// final def RangeMsEvent(eventMs: Array[Double], preMs: Double, postMs: Double, optSegment: OptSegment) =
//// ranges.RangeMsEvent(eventMs, preMs, postMs, optSegment)
// final def RangeMsEvent(eventMs: Array[Double], preMs: Double, postMs: Double) =
// ranges.RangeMsEvent(eventMs, preMs, postMs)
//// final def RangeMsEvent(eventMs: Array[Double], preMs: Double, postMs: Double, stepMs: Double, optSegment: OptSegment) =
//// ranges.RangeMsEvent(eventMs, preMs, postMs, stepMs, optSegment)
// final def RangeMsEvent(eventMs: Array[Double], preMs: Double, postMs: Double, stepMs: Double) =
// ranges.RangeMsEvent(eventMs, preMs, postMs, stepMs)
// </editor-fold>
| ktakagaki/nounou.rebooted150527 | src/main/scala/nounou/NN.scala | Scala | apache-2.0 | 6,836 |
package com.github.romangrebennikov.columnize.proxy
import java.nio.ByteBuffer
import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.io.Tcp.{PeerClosed, Received, Register, Write}
import akka.util.ByteString
/**
* Created by shutty on 10/5/15.
*/
case class Request(data:ByteBuffer)
case class Response(data:ByteBuffer)
class ServerConnection(proxy:ActorRef, socket:ActorRef) extends Actor with ActorLogging {
socket ! Register(self)
def receive = {
case Received(data) =>
//log.debug(s"received $data")
proxy ! Request(data.asByteBuffer)
case Response(data) =>
//log.debug(s"proxying response $data back to client")
socket ! Write(ByteString(data))
case PeerClosed =>
log.info("connection closed")
context stop self
}
}
| shuttie/columnize | src/main/scala/com/github/romangrebennikov/columnize/proxy/ServerConnection.scala | Scala | bsd-2-clause | 790 |
package edu.berkeley.nlp.entity.coref
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.futile.fig.basic.Indexer
trait DocumentInferencer {
def getInitialWeightVector(featureIndexer: Indexer[String]): Array[Float];
def computeLikelihood(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Float): Float;
def addUnregularizedStochasticGradient(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Float,
gradient: Array[Float]);
def viterbiDecode(docGraph: DocumentGraph, pairwiseScorer: PairwiseScorer): Array[Int];
def finishPrintStats();
def viterbiDecodeFormClustering(docGraph: DocumentGraph, pairwiseScorer: PairwiseScorer): (Array[Int], OrderedClustering) = {
val predBackptrs = viterbiDecode(docGraph, pairwiseScorer);
(predBackptrs, OrderedClustering.createFromBackpointers(predBackptrs));
}
def viterbiDecodeAll(docGraphs: Seq[DocumentGraph], pairwiseScorer: PairwiseScorer): Array[Array[Int]] = {
val allPredBackptrs = new Array[Array[Int]](docGraphs.size);
for (i <- 0 until docGraphs.size) {
val docGraph = docGraphs(i);
Logger.logs("Decoding " + i);
val predBackptrs = viterbiDecode(docGraph, pairwiseScorer);
allPredBackptrs(i) = predBackptrs;
}
allPredBackptrs;
}
def viterbiDecodeAllFormClusterings(docGraphs: Seq[DocumentGraph], pairwiseScorer: PairwiseScorer): (Array[Array[Int]], Array[OrderedClustering]) = {
val allPredBackptrs = viterbiDecodeAll(docGraphs, pairwiseScorer);
val allPredClusteringsSeq = (0 until docGraphs.size).map(i => OrderedClustering.createFromBackpointers(allPredBackptrs(i)));
(allPredBackptrs, allPredClusteringsSeq.toArray)
}
}
| malcolmgreaves/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/coref/DocumentInferencer.scala | Scala | gpl-3.0 | 1,944 |
package fpinscala.parallelism
import java.util.concurrent._
object Par {
type Par[A] = ExecutorService => Future[A]
def run[A](s: ExecutorService)(a: Par[A]): Future[A] = a(s)
/*
The implementation of `unit` does not use the `ExecutorService`, it simply returns a `Future` directly.
*/
def unit[A](a: A): Par[A] =
(es: ExecutorService) => UnitFuture(a)
/* Simple future for wrapping a constant value. */
case class UnitFuture[A](get: A) extends Future[A] {
def isDone = true
def get(timeout: Long, units: TimeUnit) = get
def isCancelled = false
def cancel(evenIfRunning: Boolean): Boolean = false
}
/*
Notice this implementation does not evaluate the call to `f` in a separate logical thread. This is in keeping with our design choice of having `fork` be the sole function in the API for controlling parallelism. We can always do `fork(map2(a,b)(f))` if we want the evaluation of `f` to occur in a separate thread.
This implementation does _not_ respect timeouts. In order to respect timeouts, we need a new `Future` implementation that records the amount of time spent evaluating `af`, then subtracts that time from the available time allocated for evaluating `bf`.
*/
def map2_simple[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] =
(es: ExecutorService) => {
val af = a(es)
val bf = b(es)
UnitFuture(f(af.get, bf.get))
}
/* This version respects timeouts. See `Map2Future` below. */
def map2[A,B,C](a: Par[A], b: Par[B])(f: (A,B) => C): Par[C] =
es => {
val (af, bf) = (a(es), b(es))
Map2Future(af, bf, f)
}
/*
This is the simplest, most natural implementation, but there are some problems with it--for one, the outer `Callable` will block waiting for the 'inner' task to complete. Since this blocked thread occupies a thread in our thread pool or whatever resource backs the `ExecutorService`, this implies we're losing out on some potential parallelism (essentially, we are using two threads when one should do). This is a symptom of a more serious problem with the implementation that we'll discuss later in the chapter.
*/
def fork_simple[A](a: => Par[A]): Par[A] =
es => es.submit(new Callable[A] {
def call = a(es).get
})
/*
Note: this implementation will not prevent repeated evaluation if multiple threads call `get` in parallel. We could prevent this using synchronization, but it isn't needed for our purposes here (also, repeated evaluation of pure values won't affect results).
*/
case class Map2Future[A,B,C](a: Future[A], b: Future[B],
f: (A,B) => C) extends Future[C] {
var cache: Option[C] = None
def isDone = cache.isDefined
def isCancelled = a.isCancelled || b.isCancelled
def cancel(evenIfRunning: Boolean) =
a.cancel(evenIfRunning) || b.cancel(evenIfRunning)
def get = compute(Long.MaxValue)
def get(timeout: Long, units: TimeUnit): C =
compute(TimeUnit.MILLISECONDS.convert(timeout, units))
private def compute(timeoutMs: Long): C = cache match {
case Some(c) => c
case None =>
val start = System.currentTimeMillis
val ar = a.get(timeoutMs, TimeUnit.MILLISECONDS)
val stop = System.currentTimeMillis; val at = stop-start
val br = b.get(timeoutMs - at, TimeUnit.MILLISECONDS)
cache = Some(f(ar, br))
cache.get
}
}
def asyncF[A,B](f: A => B): A => Par[B] =
a => fork(unit(f(a)))
def map[A,B](fa: Par[A])(f: A => B): Par[B] =
map2(fa, unit(()))((a,_) => f(a))
def sortPar(l: Par[List[Int]]) = map(l)(_.sorted)
/*
This implementation does not preserve timeouts, and you can probably see how it would be rather finnicky to do this correctly. This is an argument in favor of definining combinators like `parMap` in terms of simpler combinators.
*/
def parMap[A,B](l: List[A])(f: A => B): Par[List[B]] =
es => {
val fs: List[Future[B]] = l map (a => asyncF(f)(a)(es))
UnitFuture(fs.map(_.get))
}
def sequence[A](l: List[Par[A]]): Par[List[A]] =
l.foldRight[Par[List[A]]](unit(List()))((h,t) => map2(h,t)(_ :: _))
def parFilter[A](l: List[A])(f: A => Boolean): Par[List[A]] = {
val pars: List[Par[List[A]]] =
l map (asyncF((a: A) => if (f(a)) List(a) else List()))
map(sequence(pars))(_.flatten) // convenience method on `List` for concatenating a list of lists
}
def equal[A](e: ExecutorService)(p: Par[A], p2: Par[A]): Boolean =
p(e).get == p2(e).get
def delay[A](fa: => Par[A]): Par[A] =
es => fa(es)
/*
The correctness of this implementation requires only that the `ExecutorService` begins executing tasks in the order they are submitted. This enables us to safely call `innerF.get`. (You may want to try proving to yourself that this cannot deadlock)
*/
def fork[A](p: => Par[A]): Par[A] = {
es => {
val latch = new CountDownLatch(1)
var result: Option[A] = None
var innerF: Future[A] = null
var resultF: Future[_] = null
val f = es.submit(new Runnable {
def run = {
innerF = p(es)
resultF = es.submit(new Runnable {
def run = { result = Some(innerF.get); latch.countDown }
})
}
})
new Future[A] {
def get = { latch.await; result.get }
def get(timeout: Long, units: TimeUnit) = {
latch.await(timeout, units)
result.get
}
def isDone = latch.getCount == 0
def cancel(b: Boolean) = {
isCancelled =
isCancelled ||
f.cancel(b) ||
(innerF != null && innerF.cancel(b)) ||
(resultF != null && resultF.cancel(b))
isCancelled
}
var isCancelled = false
}
}
}
/* This implementation uses `get` directly and does not propagate timeouts. */
def choice[A](a: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] =
es => if (a(es).get) ifTrue(es) else ifFalse(es)
def choiceN[A](a: Par[Int])(choices: List[Par[A]]): Par[A] =
es => choices(a(es).get)(es)
def choiceViaChoiceN[A](a: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] =
choiceN(map(a)(b => if (b) 1 else 0))(List(ifTrue, ifFalse))
def chooser[A,B](a: Par[A])(choices: A => Par[B]): Par[B] =
es => choices(a(es).get)(es)
/* `chooser` is usually called `flatMap` or `bind`. */
def flatMap[A,B](a: Par[A])(choices: A => Par[B]): Par[B] =
es => choices(a(es).get)(es)
def choiceViaFlatMap[A](p: Par[Boolean])(ifTrue: Par[A], ifFalse: Par[A]): Par[A] =
flatMap(p)(b => if (b) ifTrue else ifFalse)
def choiceNViaFlatMap[A](p: Par[Int])(choices: List[Par[A]]): Par[A] =
flatMap(p)(i => choices(i))
/*
This implementation is not safe for execution on bounded thread pools, and it also does not preserve timeouts. Can you see why? You may wish to try implementing a nonblocking version like was done for `fork`.
*/
def join[A](a: Par[Par[A]]): Par[A] =
es => a(es).get.apply(es)
def joinViaFlatMap[A](a: Par[Par[A]]): Par[A] =
flatMap(a)(a => a)
def flatMapViaJoin[A,B](p: Par[A])(f: A => Par[B]): Par[B] =
join(map(p)(f))
/* Gives us infix syntax for `Par`. */
implicit def toParOps[A](p: Par[A]): ParOps[A] = new ParOps(p)
class ParOps[A](p: Par[A]) {
}
}
object Examples {
def sum(as: IndexedSeq[Int]): Int =
if (as.size <= 1) as.headOption getOrElse 0 // Hints and standalone answers
else {
val (l,r) = as.splitAt(as.length/2)
sum(l) + sum(r)
}
} | ShokuninSan/fpinscala | answers/src/main/scala/fpinscala/parallelism/Par.scala | Scala | mit | 7,652 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.responders.v2
import org.knora.webapi.CoreSpec
import org.knora.webapi.responders.v2.ResourcesResponseCheckerV2.compareReadResourcesSequenceV2Response
import org.knora.webapi.util.StringFormatter
class ResourcesResponseCheckerV2Spec extends CoreSpec() {
private implicit val stringFormatter: StringFormatter = StringFormatter.getGeneralInstance
private val resourcesResponderV2SpecFullData = new ResourcesResponderV2SpecFullData
private val resourcesResponderCheckerV2SpecFullData = new ResourcesResponseCheckerV2SpecFullData
"The ResourcesResponseCheckerV2" should {
"not throw an exception if received and expected resource responses are the same" in {
compareReadResourcesSequenceV2Response(expected = resourcesResponderV2SpecFullData.expectedFullResourceResponseForZeitgloecklein, received = resourcesResponderV2SpecFullData.expectedFullResourceResponseForZeitgloecklein)
}
"throw an exception if received and expected resource responses are different" in {
assertThrows[AssertionError] {
compareReadResourcesSequenceV2Response(expected = resourcesResponderV2SpecFullData.expectedFullResourceResponseForZeitgloecklein, received = resourcesResponderV2SpecFullData.expectedFullResourceResponseForReise)
}
}
"throw an exception when comparing a full response to a preview response of the same resource" in {
assertThrows[AssertionError] {
compareReadResourcesSequenceV2Response(expected = resourcesResponderV2SpecFullData.expectedFullResourceResponseForZeitgloecklein, received = resourcesResponderV2SpecFullData.expectedPreviewResourceResponseForZeitgloecklein)
}
}
"throw an exception when comparing a full response to a full response with a different number of values for a property" in {
assertThrows[AssertionError] {
compareReadResourcesSequenceV2Response(expected = resourcesResponderV2SpecFullData.expectedFullResourceResponseForReise, received = resourcesResponderCheckerV2SpecFullData.expectedFullResourceResponseForReiseWrong)
}
}
}
} | musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/responders/v2/ResourcesResponseCheckerV2Spec.scala | Scala | agpl-3.0 | 2,954 |
package test;
trait T {
abstract class Foo;
private object FOO_0 extends Foo {
Console.println("FOO_0 initialized")
}
trait X {
def foo : Foo = FOO_0;
}
}
object Test extends App {
val t = new T{}
val x = new t.X{}
Console.println(x.foo)
}
| yusuke2255/dotty | tests/untried/pos/t675.scala | Scala | bsd-3-clause | 266 |
package com.eharmony.aloha.models.h2o
import com.eharmony.aloha
import com.eharmony.aloha.annotate.CLI
import com.eharmony.aloha.id.ModelId
import com.eharmony.aloha.io.vfs.VfsType.VfsType
import com.eharmony.aloha.io.vfs.{Vfs, VfsType}
/**
* Created by rdeak on 11/17/15.
*/
@CLI(flag = "--h2o")
object Cli {
private[this] val CommandName = "h2o"
/**
* '''NOTE''' null default values is only OK because both parameters are required
* @param spec
* @param model
* @param id
* @param name
* @param externalModel
* @param numMissingThreshold
* @param notes
* @param vfsType
*/
case class Config(spec: String = null,
model: String = null,
id: Long = 0,
name: String = "",
externalModel: Boolean = false,
numMissingThreshold: Option[Int] = None,
notes: Vector[String] = Vector.empty,
vfsType: VfsType = VfsType.vfs2)
def main(args: Array[String]) {
cliParser.parse(args, Config()) match {
case Some(Config(spec, model, id, name, externalModel, numMissingThresh, notes, vfsType)) =>
val specVfs = Vfs.fromVfsType(vfsType)(spec)
val modelVfs = Vfs.fromVfsType(vfsType)(model)
val json = H2oModel.json(specVfs, modelVfs, ModelId(id, name), None, externalModel, numMissingThresh, Option(notes))
println(json.compactPrint)
case None => // Will be taken care of by scopt.
}
}
private[this] def cliParser = {
new scopt.OptionParser[Config](CommandName) {
head(CommandName, aloha.version)
opt[String]('s', "spec") action { (x, c) =>
c.copy(spec = x)
} text "spec is an Apache VFS URL to an aloha spec file." required()
opt[String]('m', "model") action { (x, c) =>
c.copy(model = x)
} text "model is an Apache VFS URL to a VW binary model." required()
opt[String]("fs-type") action { (x, c) =>
c.copy(vfsType = VfsType.withName(x))
} text "file system type: vfs1, vfs2, file. default = vfs2." optional()
opt[String]('n', "name") action { (x, c) =>
c.copy(name = x)
} text "name of the model." optional()
opt[Long]('i', "id") action { (x, c) =>
c.copy(id = x)
} text "numeric id of the model." optional()
opt[Unit]("external") action { (x, c) =>
c.copy(externalModel = true)
} text "link to a binary VW model rather than embedding it inline in the aloha model." optional()
opt[Int]("num-missing-thresh") action { (x, c) =>
c.copy(numMissingThreshold = Option(x))
} text "number of missing features to allow before returning a 'no-prediction'." optional()
opt[String]("note") action { (x, c) =>
c.copy(notes = c.notes :+ x)
} text "notes to add to the model. Can provide this many parameter times." unbounded() optional()
checkConfig { c =>
success
}
}
}
}
| eHarmony/aloha | aloha-h2o/src/main/scala/com/eharmony/aloha/models/h2o/Cli.scala | Scala | mit | 2,993 |
package redstone
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.collection.mutable.HashSet
import redstone._
import redstone.piece._
import redstone.solver._
import redstone.solver.util._
@RunWith(classOf[JUnitRunner])
class PriorityQueueSuite extends FunSuite {
test("contains") {
val movesAlreadyExplored: HashSet[Board] = HashSet()
var pieces1: List[BoardPiece] = List()
val boardPiece1 = new SmallSquare(0, 0, 0)
pieces1 = pieces1 :+ boardPiece1
val board1 = new Board(pieces1)
movesAlreadyExplored.add(board1)
assert(movesAlreadyExplored.contains(board1) === true)
var pieces2: List[BoardPiece] = List()
val boardPiece2 = new SmallSquare(0, 0, 0)
pieces2 = pieces2 :+ boardPiece1
val board2 = new Board(pieces2)
assert(movesAlreadyExplored.contains(board2) === true)
}
}
| skumargithub/redstone-solver | src/test/scala/redstone/PriorityQueueSuite.scala | Scala | gpl-2.0 | 919 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.cassandra.sink
import com.datamountaineer.streamreactor.common.config.Helpers
import com.datamountaineer.streamreactor.common.utils.JarManifest
import java.util
import com.datamountaineer.streamreactor.connect.cassandra.config.{CassandraConfigConstants, CassandraConfigSink}
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.connect.connector.Task
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.SinkConnector
import scala.collection.JavaConverters._
import scala.util.{Failure, Try}
/**
* <h1>CassandraSinkConnector</h1>
* Kafka connect Cassandra Sink connector
*
* Sets up CassandraSinkTask and configurations for the tasks.
**/
class CassandraSinkConnector extends SinkConnector with StrictLogging {
private var configProps: util.Map[String, String] = _
private val configDef = CassandraConfigSink.sinkConfig
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
/**
* States which SinkTask class to use
**/
override def taskClass(): Class[_ <: Task] = classOf[CassandraSinkTask]
/**
* Set the configuration for each work and determine the split
*
* @param maxTasks The max number of task workers be can spawn
* @return a List of configuration properties per worker
**/
override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = {
logger.info(s"Setting task configurations for $maxTasks workers.")
(1 to maxTasks).map(_ => configProps).toList.asJava
}
/**
* Start the sink and set to configuration
*
* @param props A map of properties for the connector and worker
**/
override def start(props: util.Map[String, String]): Unit = {
//check input topics
Helpers.checkInputTopics(CassandraConfigConstants.KCQL, props.asScala.toMap)
configProps = props
Try(new CassandraConfigSink(props)) match {
case Failure(f) =>
throw new ConnectException(s"Couldn't start Cassandra sink due to configuration error: ${f.getMessage}", f)
case _ =>
}
}
override def stop(): Unit = {}
override def version(): String = manifest.version()
override def config(): ConfigDef = configDef
}
| datamountaineer/stream-reactor | kafka-connect-cassandra/src/main/scala/com/datamountaineer/streamreactor/connect/cassandra/sink/CassandraSinkConnector.scala | Scala | apache-2.0 | 2,924 |
package com.stefansavev.randomprojections.serialization.core
import java.io._
import com.stefansavev.randomprojections.serialization.core.PrimitiveTypeSerializers.TypedIntSerializer
//approach is losely based on "Scala for generic programmers"
trait TypedSerializer[T]{
type SerializerType = T
def toBinary(outputStream: OutputStream, input: T): Unit
def fromBinary(inputStream: InputStream): T
}
object Core{
/* moved to TupleSerializers via code autogen, left here as an example only
class Tuple2Serializer[A, B](serA: TypedSerializer[A], serB: TypedSerializer[B]) extends TypedSerializer[(A, B)] {
def toBinary(outputStream: OutputStream, input: (A, B)): Unit = {
serA.toBinary(outputStream, input._1)
serB.toBinary(outputStream, input._2)
}
def fromBinary(inputStream: InputStream): (A, B) = {
val a = serA.fromBinary(inputStream)
val b = serB.fromBinary(inputStream)
(a, b)
}
}
implicit def tuple2Serializer[A, B](implicit serA: TypedSerializer[A], serB: TypedSerializer[B]): Tuple2Serializer[A, B] = {
new Tuple2Serializer[A, B](serA, serB)
}
*/
abstract class TypeTag[A](implicit mf: Manifest[A]){
def tag: Int
def manifest: Manifest[A] = mf
}
class Subtype1Serializer[BaseType, SubType1 <: BaseType](tag1: TypeTag[SubType1], subTypeSer1 : TypedSerializer[SubType1]) extends TypedSerializer[BaseType] {
def toBinary(outputStream: OutputStream, input: BaseType): Unit = {
if (tag1.manifest.runtimeClass.equals(input.getClass)){
TypedIntSerializer.toBinary(outputStream, tag1.tag)
subTypeSer1.toBinary(outputStream, input.asInstanceOf[SubType1])
}
else{
throw new IllegalStateException("Unsupported subtype in serialization")
}
}
def fromBinary(inputStream: InputStream): BaseType = {
val inputTag = TypedIntSerializer.fromBinary(inputStream)
if (inputTag == tag1.tag) {
subTypeSer1.fromBinary(inputStream)
}
else{
throw new IllegalStateException("Unknown tag in deserialization")
}
}
}
class Subtype2Serializer[BaseType, SubType1 <: BaseType, SubType2 <: BaseType](
tag1: TypeTag[SubType1],
tag2: TypeTag[SubType2],
subTypeSer1 : TypedSerializer[SubType1],
subTypeSer2 : TypedSerializer[SubType2]) extends TypedSerializer[BaseType] {
if (tag1.tag == tag2.tag){
throw new IllegalStateException("Subtypes should have different tags")
}
if (tag1.manifest.runtimeClass.equals(tag2.manifest.runtimeClass)){
throw new IllegalStateException("Subtypes should be of different classes")
}
def toBinary(outputStream: OutputStream, input: BaseType): Unit = {
if (tag1.manifest.runtimeClass.equals(input.getClass)){
TypedIntSerializer.toBinary(outputStream, tag1.tag)
subTypeSer1.toBinary(outputStream, input.asInstanceOf[SubType1])
}
else if (tag2.manifest.runtimeClass.equals(input.getClass)){
TypedIntSerializer.toBinary(outputStream, tag2.tag)
subTypeSer2.toBinary(outputStream, input.asInstanceOf[SubType2])
}
else{
throw new IllegalStateException("Unsupported subtype in serialization")
}
}
def fromBinary(inputStream: InputStream): BaseType = {
val inputTag = TypedIntSerializer.fromBinary(inputStream)
if (inputTag == tag1.tag) {
subTypeSer1.fromBinary(inputStream)
}
else if (inputTag == tag2.tag){
subTypeSer2.fromBinary(inputStream)
}
else{
throw new IllegalStateException("Unknown tag in deserialization")
}
}
}
implicit def subtype1Serializer[BaseType, SubType1 <: BaseType](implicit typeTag1: TypeTag[SubType1], subTypeSer1 : TypedSerializer[SubType1]): Subtype1Serializer[BaseType, SubType1] = {
new Subtype1Serializer[BaseType, SubType1](typeTag1, subTypeSer1)
}
implicit def subtype2Serializer[BaseType, SubType1 <: BaseType, SubType2 <: BaseType](
implicit
typeTag1: TypeTag[SubType1],
typeTag2: TypeTag[SubType2],
subTypeSer1 : TypedSerializer[SubType1],
subTypeSer2 : TypedSerializer[SubType2]): Subtype2Serializer[BaseType, SubType1, SubType2] = {
new Subtype2Serializer[BaseType, SubType1, SubType2](typeTag1, typeTag2, subTypeSer1, subTypeSer2)
}
//Iso means isomorphism, just mapping from A to B, and back
trait Iso[A, B]{
type Input = A
type Output = B
def from(input: A): B
def to(output: B): A
}
def toFile[A](serializer: TypedSerializer[A], outputFile: String, input: A): Unit = {
toFile(serializer, new File(outputFile), input)
}
def toFile[A](serializer: TypedSerializer[A], outputFile: File, input: A): Unit = {
val outputStream = new BufferedOutputStream(new FileOutputStream(outputFile))
serializer.toBinary(outputStream, input)
outputStream.close()
}
def fromFile[A](serializer: TypedSerializer[A], inputFile: File): A = {
val inputStream = new BufferedInputStream(new FileInputStream(inputFile))
val output = serializer.fromBinary(inputStream)
inputStream.close()
output
}
def fromFile[A](serializer: TypedSerializer[A], inputFile: String): A = {
fromFile(serializer, new File(inputFile))
}
class IsoSerializer[A, B](iso: Iso[A, B], serB: TypedSerializer[B]) extends TypedSerializer[A]{
def toBinary(outputStream: OutputStream, input: A): Unit = {
serB.toBinary(outputStream, iso.from(input))
}
def fromBinary(inputStream: InputStream): A = {
iso.to(serB.fromBinary(inputStream))
}
}
implicit def isoSerializer[A, B](implicit iso: Iso[A, B], serB: TypedSerializer[B]): IsoSerializer[A, B] = {
new IsoSerializer[A, B](iso, serB)
}
}
| codeaudit/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/randomprojections/serialization/core/CoreSerialization.scala | Scala | apache-2.0 | 5,760 |
package org.jetbrains.plugins.scala
package codeInsight
package intention
package booleans
import com.intellij.testFramework.EditorTestUtil
/**
* Nikolay.Tropin
* 4/29/13
*/
class SimplifyBooleanExprWithLiteralTest extends intentions.ScalaIntentionTestBase {
import EditorTestUtil.{CARET_TAG => CARET}
override def familyName: String = ScalaCodeInsightBundle.message("family.name.simplify.boolean.expression.with.a.literal")
def test_NotTrue(): Unit = {
val text = s"$CARET!true"
val result = s"false"
doTest(text, result)
}
def test_TrueEqualsA(): Unit = {
val text =
s"""val a = true
|${CARET}true == a""".stripMargin
val result =
s"""val a = true
|a""".stripMargin
doTest(text, result)
}
def test_TrueAndA(): Unit = {
val text =
s"""val a = true
|true $CARET&& a""".stripMargin
val result =
s"""val a = true
|a""".stripMargin
doTest(text, result)
}
def test_AOrFalse(): Unit = {
val text = s"val a: Boolean = false; a $CARET| false"
val result = s"val a: Boolean = false; a"
doTest(text, result)
}
def test_TwoExpressions(): Unit = {
val text =
s"""val a = true
|${CARET}true && (a || false)""".stripMargin
val result =
s"""val a = true
|a""".stripMargin
doTest(text, result)
}
def test_TrueNotEqualsA(): Unit = {
val text =
s"""val a = true
|val flag: Boolean = ${CARET}true != a""".stripMargin
val result =
s"""val a = true
|val flag: Boolean = !a""".stripMargin
doTest(text, result)
}
def test_SimplifyInParentheses(): Unit = {
val text =
s"""val a = true
|!(${CARET}true != a)""".stripMargin
val result =
s"""val a = true
|!(!a)""".stripMargin
doTest(text, result)
}
def test_TrueAsAny(): Unit = {
val text =
s"""def trueAsAny: Any = {
| true
|}
|if (trueAsAny =$CARET= true) {
| println("true")
|} else {
| println("false")
|}""".stripMargin
checkIntentionIsNotAvailable(text)
}
} | JetBrains/intellij-scala | scala/codeInsight/test/org/jetbrains/plugins/scala/codeInsight/intention/booleans/SimplifyBooleanExprWithLiteralTest.scala | Scala | apache-2.0 | 2,157 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.testdata.candidate.assessmentcentre
import com.google.inject.name.Named
import javax.inject.{ Inject, Singleton }
import model.UniqueIdentifier
import model.assessmentscores._
import model.exchange.testdata.CreateCandidateResponse.CreateCandidateResponse
import model.testdata.candidate.CreateCandidateData.CreateCandidateData
import org.joda.time.{ DateTime, DateTimeZone }
import play.api.mvc.RequestHeader
import services.assessmentscores.AssessmentScoresService
import services.testdata.candidate.ConstructiveGenerator
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
@Singleton
class AssessmentCentreScoresEnteredStatusGenerator @Inject() (val previousStatusGenerator: AssessmentCentreAllocationConfirmedStatusGenerator,
@Named("AssessorAssessmentScoresService")
assessorAssessmentScoresService: AssessmentScoresService
) extends ConstructiveGenerator {
val updatedBy = UniqueIdentifier.randomUniqueIdentifier
// The scores awarded to the candidate by assessor/reviewer
def analysisExerciseSample(assessorOrReviewer: String) = AssessmentScoresExercise(
attended = true,
makingEffectiveDecisionsAverage = Some(5.0),
communicatingAndInfluencingAverage = Some(4.0),
seeingTheBigPictureAverage = Some(4.0),
updatedBy = updatedBy,
seeingTheBigPictureScores = Some(SeeingTheBigPictureScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
makingEffectiveDecisionsScores = Some(MakingEffectiveDecisionsScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
communicatingAndInfluencingScores = Some(CommunicatingAndInfluencingScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
seeingTheBigPictureFeedback = Some("Strategic approach feedback " + assessorOrReviewer),
makingEffectiveDecisionsFeedback = Some("Analysis and Decision feedback" + assessorOrReviewer),
communicatingAndInfluencingFeedback = Some("Leading and communicating feedback" + assessorOrReviewer)
)
def groupExerciseSample(assessorOrReviewer: String) = AssessmentScoresExercise(
attended = true,
makingEffectiveDecisionsAverage = Some(5.0),
workingTogetherDevelopingSelfAndOthersAverage = Some(2.0),
communicatingAndInfluencingAverage = Some(4.0),
updatedBy = updatedBy,
makingEffectiveDecisionsScores = Some(MakingEffectiveDecisionsScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
workingTogetherDevelopingSelfAndOthersScores = Some(WorkingTogetherDevelopingSelfAndOtherScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
communicatingAndInfluencingScores = Some(CommunicatingAndInfluencingScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
makingEffectiveDecisionsFeedback = Some("Analysis and Decision feedback" + assessorOrReviewer),
workingTogetherDevelopingSelfAndOthersFeedback = Some("Building Productive feedback" + assessorOrReviewer),
communicatingAndInfluencingFeedback = Some("Leading and communicating feedback" + assessorOrReviewer)
)
def leadershipExerciseSample(assessorOrReviewer: String) = AssessmentScoresExercise(
attended = true,
workingTogetherDevelopingSelfAndOthersAverage = Some(4.0),
communicatingAndInfluencingAverage = Some(4.0),
seeingTheBigPictureAverage = Some(4.0),
updatedBy = updatedBy,
workingTogetherDevelopingSelfAndOthersScores = Some(WorkingTogetherDevelopingSelfAndOtherScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
communicatingAndInfluencingScores = Some(CommunicatingAndInfluencingScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
seeingTheBigPictureScores = Some(SeeingTheBigPictureScores(
Some(1.0), Some(1.0), Some(1.0), Some(1.0), Some(1.0)
)),
workingTogetherDevelopingSelfAndOthersFeedback = Some("Building Productive feedback " + assessorOrReviewer),
communicatingAndInfluencingFeedback = Some("Leading and communicating feedback " + assessorOrReviewer),
seeingTheBigPictureFeedback = Some("Strategic approach feedback " + assessorOrReviewer)
)
def finalFeedbackSample(assessorOrReviewer: String) = AssessmentScoresFinalFeedback(
"final feedback for " + assessorOrReviewer, updatedBy, DateTime.now(DateTimeZone.UTC)
)
def generate(generationId: Int, generatorConfig: CreateCandidateData)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[CreateCandidateResponse] = {
import model.command.AssessmentScoresCommands.AssessmentScoresSectionType._
for {
candidateInPreviousStatus <- previousStatusGenerator.generate(generationId, generatorConfig)
appId = UniqueIdentifier(candidateInPreviousStatus.applicationId.getOrElse(sys.error("Missed application id for candidate")))
assessorOrReviewer = "assessor"
_ <- assessorAssessmentScoresService.submitExercise(appId, writtenExercise, analysisExerciseSample(assessorOrReviewer))
_ <- assessorAssessmentScoresService.submitExercise(appId, teamExercise, groupExerciseSample(assessorOrReviewer))
_ <- assessorAssessmentScoresService.submitExercise(appId, leadershipExercise, leadershipExerciseSample(assessorOrReviewer))
_ <- assessorAssessmentScoresService.submitFinalFeedback(appId, finalFeedbackSample(assessorOrReviewer))
} yield {
candidateInPreviousStatus
}
}
}
| hmrc/fset-faststream | app/services/testdata/candidate/assessmentcentre/AssessmentCentreScoresEnteredStatusGenerator.scala | Scala | apache-2.0 | 6,247 |
package recfun
import common._
import scala.annotation.tailrec
object Main {
def main(args: Array[String]) {
println("Pascal's Triangle")
for (row <- 0 to 10) {
for (col <- 0 to row)
print(pascal(col, row) + " ")
println()
}
}
/**
* Exercise 1
*/
def pascal(c: Int, r: Int): Int = {
if(r == 0 || c == 0 || c == r )
1
else
pascal(c - 1, r - 1) + pascal(c , r - 1)
}
/**
* Exercise 2
*/
def balance(chars: List[Char]): Boolean = {
@tailrec
def findBalance(chars : List[Char], state : Int) : Int = {
if (chars.isEmpty) state
else {
if(chars.head == '(') findBalance(chars.tail, state - 1)
else if(chars.head == ')' && state < 0) findBalance(chars.tail, state + 1)
else if(chars.head == ')' ) -1 // no starting '('
else findBalance(chars.tail, state)
}
}
findBalance(chars, 0) == 0
}
/**
* Exercise 3
*/
def countChange(money: Int, coins: List[Int]): Int = {
if (money < 0 || coins.isEmpty)
0
else if (money == 0)
1
else
countChange(money - coins.head, coins) + countChange(money, coins.tail)
}
}
| alvindaiyan/scalalearning | alvindaiyan-coursera-scala-recfun-18bbfb6a3fd5/src/main/scala/recfun/Main.scala | Scala | apache-2.0 | 1,190 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jc.incremental.jawa
import _root_.java.io._
import java.net.InetAddress
import com.intellij.openapi.diagnostic.{Logger => JpsLogger}
import org.argus.jc.incremental.jawa.data.{CompilationData, CompilerData}
import org.argus.jc.incremental.jawa.local.LocalServer
import org.argus.jc.incremental.jawa.model.ProjectSettings
import org.argus.jc.incremental.jawa.remote.RemoteServer
import org.jetbrains.jps.ModuleChunk
import org.jetbrains.jps.builders.java.JavaBuilderUtil
import org.jetbrains.jps.incremental._
import org.jetbrains.jps.incremental.messages.ProgressMessage
import org.jetbrains.jps.model.module.JpsModule
import _root_.scala.collection.JavaConverters._
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
object JawaBuilder {
def compile(context: CompileContext,
chunk: ModuleChunk,
sources: Seq[File],
modules: Set[JpsModule],
client: Client): Either[String, ModuleLevelBuilder.ExitCode] = {
context.processMessage(new ProgressMessage("Reading compilation settings..."))
for {
compilerData <- CompilerData.from(context, chunk)
compilationData <- CompilationData.from(sources, context, chunk)
} yield {
val server = getServer(context)
server.compile(compilerData, compilationData, client)
}
}
def hasBuildModules(chunk: ModuleChunk): Boolean = {
import _root_.scala.collection.JavaConverters._
chunk.getModules.asScala.exists(_.getName.endsWith("-build")) // gen-idea doesn't use the SBT module type
}
def projectSettings(context: CompileContext): ProjectSettings = SettingsManager.getProjectSettings(context.getProjectDescriptor.getProject)
def isMakeProject(context: CompileContext): Boolean = JavaBuilderUtil.isCompileJavaIncrementally(context) && {
for {
chunk <- context.getProjectDescriptor.getBuildTargetIndex.getSortedTargetChunks(context).asScala
target <- chunk.getTargets.asScala
} {
if (!context.getScope.isAffected(target)) return false
}
true
}
val Log: JpsLogger = JpsLogger.getInstance(JawaBuilder.getClass.getName)
// Cached local localServer
private var cachedServer: Option[Server] = None
private val lock = new Object()
def localServer: Server = {
lock.synchronized {
val server = cachedServer.getOrElse(new LocalServer())
cachedServer = Some(server)
server
}
}
private def cleanLocalServerCache() {
lock.synchronized {
cachedServer = None
}
}
private def getServer(context: CompileContext): Server = {
val settings = SettingsManager.getGlobalSettings(context.getProjectDescriptor.getModel.getGlobal)
if (settings.isCompileServerEnabled && JavaBuilderUtil.CONSTANT_SEARCH_SERVICE.get(context) != null) {
cleanLocalServerCache()
new RemoteServer(InetAddress.getByName(null), settings.getCompileServerPort)
} else {
localServer
}
}
}
| arguslab/argus-cit-intellij | jc-plugin/src/main/scala/org/argus/jc/incremental/jawa/JawaBuilder.scala | Scala | epl-1.0 | 3,336 |
package taczombie.client.util
object RegexHelper {
def checkPort(input: String): Boolean = {
val portPatternNumber = """^([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$""".r
if (portPatternNumber.findFirstIn(input) != None) {
return true
} else {
return false
}
}
} | mahieke/TacZombie | gui/src/main/scala/taczombie/client/util/RegexHelper.scala | Scala | gpl-2.0 | 339 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Locale, ServiceConfigurationError, ServiceLoader}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkException
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.catalog.TableProvider
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.command.DataWritingCommand
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2
import org.apache.spark.sql.execution.datasources.v2.orc.OrcDataSourceV2
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{CalendarIntervalType, StructField, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.{ThreadUtils, Utils}
/**
* The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to
* acting as the canonical set of parameters that can describe a Data Source, this class is used to
* resolve a description to a concrete implementation that can be used in a query plan
* (either batch or streaming) or to write out data using an external library.
*
* From an end user's perspective a DataSource description can be created explicitly using
* [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is
* used when resolving a description from a metastore to a concrete implementation.
*
* Many of the arguments to this class are optional, though depending on the specific API being used
* these optional arguments might be filled in during resolution using either inference or external
* metadata. For example, when reading a partitioned table from a file system, partition columns
* will be inferred from the directory layout even if they are not specified.
*
* @param paths A list of file system paths that hold data. These will be globbed before and
* qualified. This option only works when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. This list is
* generally empty during the read path, unless this DataSource is managed
* by Hive. In these cases, during `resolveRelation`, we will call
* `getOrInferFileFormatSchema` for file based DataSources to infer the
* partitioning. In other cases, if this list is empty, then this table
* is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
* @param catalogTable Optional catalog table reference that can be used to push down operations
* over the datasource to the catalog service.
*/
case class DataSource(
sparkSession: SparkSession,
className: String,
paths: Seq[String] = Nil,
userSpecifiedSchema: Option[StructType] = None,
partitionColumns: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
options: Map[String, String] = Map.empty,
catalogTable: Option[CatalogTable] = None) extends Logging {
case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String])
lazy val providingClass: Class[_] = {
val cls = DataSource.lookupDataSource(className, sparkSession.sessionState.conf)
// `providingClass` is used for resolving data source relation for catalog tables.
// As now catalog for data source V2 is under development, here we fall back all the
// [[FileDataSourceV2]] to [[FileFormat]] to guarantee the current catalog works.
// [[FileDataSourceV2]] will still be used if we call the load()/save() method in
// [[DataFrameReader]]/[[DataFrameWriter]], since they use method `lookupDataSource`
// instead of `providingClass`.
cls.newInstance() match {
case f: FileDataSourceV2 => f.fallbackFileFormat
case _ => cls
}
}
private def providingInstance() = providingClass.getConstructor().newInstance()
private def newHadoopConfiguration(): Configuration =
sparkSession.sessionState.newHadoopConfWithOptions(options)
lazy val sourceInfo: SourceInfo = sourceSchema()
private val caseInsensitiveOptions = CaseInsensitiveMap(options)
private val equality = sparkSession.sessionState.conf.resolver
bucketSpec.map { bucket =>
SchemaUtils.checkColumnNameDuplication(
bucket.bucketColumnNames, "in the bucket definition", equality)
SchemaUtils.checkColumnNameDuplication(
bucket.sortColumnNames, "in the sort definition", equality)
}
/**
* Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer
* it. In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510.
* This method will try to skip file scanning whether `userSpecifiedSchema` and
* `partitionColumns` are provided. Here are some code paths that use this method:
* 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns
* 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the
* dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred
* dataType if they don't.
* 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to
* provide the schema. Here, we also perform partition inference like 2, and try to use
* dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use
* this information, therefore calls to this method should be very cheap, i.e. there won't
* be any further inference in any triggers.
*
* @param format the file format object for this DataSource
* @param getFileIndex [[InMemoryFileIndex]] for getting partition schema and file list
* @return A pair of the data schema (excluding partition columns) and the schema of the partition
* columns.
*/
private def getOrInferFileFormatSchema(
format: FileFormat,
getFileIndex: () => InMemoryFileIndex): (StructType, StructType) = {
lazy val tempFileIndex = getFileIndex()
val partitionSchema = if (partitionColumns.isEmpty) {
// Try to infer partitioning, because no DataSource in the read path provides the partitioning
// columns properly unless it is a Hive DataSource
tempFileIndex.partitionSchema
} else {
// maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred
// partitioning
if (userSpecifiedSchema.isEmpty) {
val inferredPartitions = tempFileIndex.partitionSchema
inferredPartitions
} else {
val partitionFields = partitionColumns.map { partitionColumn =>
userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse {
val inferredPartitions = tempFileIndex.partitionSchema
val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn))
if (inferredOpt.isDefined) {
logDebug(
s"""Type of partition column: $partitionColumn not found in specified schema
|for $format.
|User Specified Schema
|=====================
|${userSpecifiedSchema.orNull}
|
|Falling back to inferred dataType if it exists.
""".stripMargin)
}
inferredOpt
}.getOrElse {
throw new AnalysisException(s"Failed to resolve the schema for $format for " +
s"the partition column: $partitionColumn. It must be specified manually.")
}
}
StructType(partitionFields)
}
}
val dataSchema = userSpecifiedSchema.map { schema =>
StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name))))
}.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
tempFileIndex.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format. It must be specified manually.")
}
// We just print a waring message if the data schema and partition schema have the duplicate
// columns. This is because we allow users to do so in the previous Spark releases and
// we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`).
// See SPARK-18108 and SPARK-21144 for related discussions.
try {
SchemaUtils.checkColumnNameDuplication(
(dataSchema ++ partitionSchema).map(_.name),
"in the data schema and the partition schema",
equality)
} catch {
case e: AnalysisException => logWarning(e.getMessage)
}
(dataSchema, partitionSchema)
}
/** Returns the name and schema of the source that can be used to continually read data. */
private def sourceSchema(): SourceInfo = {
providingInstance() match {
case s: StreamSourceProvider =>
val (name, schema) = s.sourceSchema(
sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions)
SourceInfo(name, schema, Nil)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
// Check whether the path exists if it is not a glob pattern.
// For glob pattern, we do not check it because the glob pattern might only make sense
// once the streaming job starts and some upstream source starts dropping data.
val hdfsPath = new Path(path)
if (!SparkHadoopUtil.get.isGlobPath(hdfsPath)) {
val fs = hdfsPath.getFileSystem(newHadoopConfiguration())
if (!fs.exists(hdfsPath)) {
throw new AnalysisException(s"Path does not exist: $path")
}
}
val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference
val isTextSource = providingClass == classOf[text.TextFileFormat]
// If the schema inference is disabled, only text sources require schema to be specified
if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) {
throw new IllegalArgumentException(
"Schema must be specified when creating a streaming source DataFrame. " +
"If some files already exist in the directory, then depending on the file format " +
"you may be able to create a static DataFrame on that directory with " +
"'spark.read.load(directory)' and infer schema from it.")
}
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format, () => {
// The operations below are expensive therefore try not to do them if we don't need to,
// e.g., in streaming mode, we have already inferred and registered partition columns,
// we will never have to materialize the lazy val below
val globbedPaths =
checkAndGlobPathIfNecessary(checkEmptyGlobPath = false, checkFilesExist = false)
createInMemoryFileIndex(globbedPaths)
})
val forceNullable =
sparkSession.sessionState.conf.getConf(SQLConf.FILE_SOURCE_SCHEMA_FORCE_NULLABLE)
val sourceDataSchema = if (forceNullable) dataSchema.asNullable else dataSchema
SourceInfo(
s"FileSource[$path]",
StructType(sourceDataSchema ++ partitionSchema),
partitionSchema.fieldNames)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a source that can be used to continually read data. */
def createSource(metadataPath: String): Source = {
providingInstance() match {
case s: StreamSourceProvider =>
s.createSource(
sparkSession.sqlContext,
metadataPath,
userSpecifiedSchema,
className,
caseInsensitiveOptions)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
new FileStreamSource(
sparkSession = sparkSession,
path = path,
fileFormatClassName = className,
schema = sourceInfo.schema,
partitionColumns = sourceInfo.partitionColumns,
metadataPath = metadataPath,
options = caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a sink that can be used to continually write data. */
def createSink(outputMode: OutputMode): Sink = {
providingInstance() match {
case s: StreamSinkProvider =>
s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode)
case fileFormat: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
if (outputMode != OutputMode.Append) {
throw new AnalysisException(
s"Data source $className does not support $outputMode output mode")
}
new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed writing")
}
}
/**
* Create a resolved [[BaseRelation]] that can be used to read data from or write data into this
* [[DataSource]]
*
* @param checkFilesExist Whether to confirm that the files exist when generating the
* non-streaming file based datasource. StructuredStreaming jobs already
* list file existence, and when generating incremental jobs, the batch
* is considered as a non-streaming file based data source. Since we know
* that files already exist, we don't need to check them again.
*/
def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = {
val relation = (providingInstance(), userSpecifiedSchema) match {
// TODO: Throw when too much is given.
case (dataSource: SchemaRelationProvider, Some(schema)) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema)
case (dataSource: RelationProvider, None) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
case (_: SchemaRelationProvider, None) =>
throw new AnalysisException(s"A schema needs to be specified when using $className.")
case (dataSource: RelationProvider, Some(schema)) =>
val baseRelation =
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
if (baseRelation.schema != schema) {
throw new AnalysisException(
"The user-specified schema doesn't match the actual schema: " +
s"user-specified: ${schema.toDDL}, actual: ${baseRelation.schema.toDDL}. If " +
"you're using DataFrameReader.schema API or creating a table, please do not " +
"specify the schema. Or if you're scanning an existed table, please drop " +
"it and re-create it.")
}
baseRelation
// We are reading from the results of a streaming query. Load files from the metadata log
// instead of listing them using HDFS APIs.
case (format: FileFormat, _)
if FileStreamSink.hasMetadata(
caseInsensitiveOptions.get("path").toSeq ++ paths,
newHadoopConfiguration(),
sparkSession.sessionState.conf) =>
val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head)
val fileCatalog = new MetadataLogFileIndex(sparkSession, basePath,
caseInsensitiveOptions, userSpecifiedSchema)
val dataSchema = userSpecifiedSchema.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
fileCatalog.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format at ${fileCatalog.allFiles().mkString(",")}. " +
"It must be specified manually")
}
HadoopFsRelation(
fileCatalog,
partitionSchema = fileCatalog.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
format,
caseInsensitiveOptions)(sparkSession)
// This is a non-streaming file based datasource.
case (format: FileFormat, _) =>
val useCatalogFileIndex = sparkSession.sqlContext.conf.manageFilesourcePartitions &&
catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog &&
catalogTable.get.partitionColumnNames.nonEmpty
val (fileCatalog, dataSchema, partitionSchema) = if (useCatalogFileIndex) {
val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes
val index = new CatalogFileIndex(
sparkSession,
catalogTable.get,
catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize))
(index, catalogTable.get.dataSchema, catalogTable.get.partitionSchema)
} else {
val globbedPaths = checkAndGlobPathIfNecessary(
checkEmptyGlobPath = true, checkFilesExist = checkFilesExist)
val index = createInMemoryFileIndex(globbedPaths)
val (resultDataSchema, resultPartitionSchema) =
getOrInferFileFormatSchema(format, () => index)
(index, resultDataSchema, resultPartitionSchema)
}
HadoopFsRelation(
fileCatalog,
partitionSchema = partitionSchema,
dataSchema = dataSchema.asNullable,
bucketSpec = bucketSpec,
format,
caseInsensitiveOptions)(sparkSession)
case _ =>
throw new AnalysisException(
s"$className is not a valid Spark SQL Data Source.")
}
relation match {
case hs: HadoopFsRelation =>
SchemaUtils.checkColumnNameDuplication(
hs.dataSchema.map(_.name),
"in the data schema",
equality)
SchemaUtils.checkColumnNameDuplication(
hs.partitionSchema.map(_.name),
"in the partition schema",
equality)
DataSourceUtils.verifySchema(hs.fileFormat, hs.dataSchema)
case _ =>
SchemaUtils.checkColumnNameDuplication(
relation.schema.map(_.name),
"in the data schema",
equality)
}
relation
}
/**
* Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]].
* The returned command is unresolved and need to be analyzed.
*/
private def planForWritingFileFormat(
format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = {
// Don't glob path for the write path. The contracts here are:
// 1. Only one output path can be specified on the write path;
// 2. Output path must be a legal HDFS style file system path;
// 3. It's OK that the output path doesn't exist yet;
val allPaths = paths ++ caseInsensitiveOptions.get("path")
val outputPath = if (allPaths.length == 1) {
val path = new Path(allPaths.head)
val fs = path.getFileSystem(newHadoopConfiguration())
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
} else {
throw new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${allPaths.mkString(", ")}")
}
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive)
val fileIndex = catalogTable.map(_.identifier).map { tableIdent =>
sparkSession.table(tableIdent).queryExecution.analyzed.collect {
case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location
}.head
}
// For partitioned relation r, r.schema's column ordering can be different from the column
// ordering of data.logicalPlan (partition columns are all moved after data column). This
// will be adjusted within InsertIntoHadoopFsRelation.
InsertIntoHadoopFsRelationCommand(
outputPath = outputPath,
staticPartitions = Map.empty,
ifPartitionNotExists = false,
partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted),
bucketSpec = bucketSpec,
fileFormat = format,
options = options,
query = data,
mode = mode,
catalogTable = catalogTable,
fileIndex = fileIndex,
outputColumnNames = data.output.map(_.name))
}
/**
* Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for
* the following reading.
*
* @param mode The save mode for this writing.
* @param data The input query plan that produces the data to be written. Note that this plan
* is analyzed and optimized.
* @param outputColumnNames The original output column names of the input query plan. The
* optimizer may not preserve the output column's names' case, so we need
* this parameter instead of `data.output`.
* @param physicalPlan The physical plan of the input query plan. We should run the writing
* command with this physical plan instead of creating a new physical plan,
* so that the metrics can be correctly linked to the given physical plan and
* shown in the web UI.
*/
def writeAndRead(
mode: SaveMode,
data: LogicalPlan,
outputColumnNames: Seq[String],
physicalPlan: SparkPlan): BaseRelation = {
val outputColumns = DataWritingCommand.logicalPlanOutputWithNames(data, outputColumnNames)
if (outputColumns.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingInstance() match {
case dataSource: CreatableRelationProvider =>
dataSource.createRelation(
sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data))
case format: FileFormat =>
val cmd = planForWritingFileFormat(format, mode, data)
val resolvedPartCols = cmd.partitionColumns.map { col =>
// The partition columns created in `planForWritingFileFormat` should always be
// `UnresolvedAttribute` with a single name part.
assert(col.isInstanceOf[UnresolvedAttribute])
val unresolved = col.asInstanceOf[UnresolvedAttribute]
assert(unresolved.nameParts.length == 1)
val name = unresolved.nameParts.head
outputColumns.find(a => equality(a.name, name)).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${data.output.map(_.name).mkString(", ")}]")
}
}
val resolved = cmd.copy(
partitionColumns = resolvedPartCols,
outputColumnNames = outputColumnNames)
resolved.run(sparkSession, physicalPlan)
// Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring
copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation()
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/**
* Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]].
*/
def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = {
if (data.schema.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingInstance() match {
case dataSource: CreatableRelationProvider =>
SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode)
case format: FileFormat =>
DataSource.validateSchema(data.schema)
planForWritingFileFormat(format, mode, data)
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/** Returns an [[InMemoryFileIndex]] that can be used to get partition schema and file list. */
private def createInMemoryFileIndex(globbedPaths: Seq[Path]): InMemoryFileIndex = {
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
new InMemoryFileIndex(
sparkSession, globbedPaths, options, userSpecifiedSchema, fileStatusCache)
}
/**
* Checks and returns files in all the paths.
*/
private def checkAndGlobPathIfNecessary(
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean): Seq[Path] = {
val allPaths = caseInsensitiveOptions.get("path") ++ paths
DataSource.checkAndGlobPathIfNecessary(allPaths.toSeq, newHadoopConfiguration(),
checkEmptyGlobPath, checkFilesExist)
}
}
object DataSource extends Logging {
/** A map to maintain backward compatibility in case we move data sources around. */
private val backwardCompatibilityMap: Map[String, String] = {
val jdbc = classOf[JdbcRelationProvider].getCanonicalName
val json = classOf[JsonFileFormat].getCanonicalName
val parquet = classOf[ParquetFileFormat].getCanonicalName
val csv = classOf[CSVFileFormat].getCanonicalName
val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat"
val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat"
val nativeOrc = classOf[OrcFileFormat].getCanonicalName
val socket = classOf[TextSocketSourceProvider].getCanonicalName
val rate = classOf[RateStreamProvider].getCanonicalName
Map(
"org.apache.spark.sql.jdbc" -> jdbc,
"org.apache.spark.sql.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc" -> jdbc,
"org.apache.spark.sql.json" -> json,
"org.apache.spark.sql.json.DefaultSource" -> json,
"org.apache.spark.sql.execution.datasources.json" -> json,
"org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json,
"org.apache.spark.sql.parquet" -> parquet,
"org.apache.spark.sql.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.hive.orc.DefaultSource" -> orc,
"org.apache.spark.sql.hive.orc" -> orc,
"org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc,
"org.apache.spark.sql.execution.datasources.orc" -> nativeOrc,
"org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm,
"org.apache.spark.ml.source.libsvm" -> libsvm,
"com.databricks.spark.csv" -> csv,
"org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket,
"org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate
)
}
/**
* Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0.
*/
private val spark2RemovedClasses = Set(
"org.apache.spark.sql.DataFrame",
"org.apache.spark.sql.sources.HadoopFsRelationProvider",
"org.apache.spark.Logging")
/** Given a provider name, look up the data source class definition. */
def lookupDataSource(provider: String, conf: SQLConf): Class[_] = {
val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match {
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" =>
classOf[OrcDataSourceV2].getCanonicalName
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" =>
"org.apache.spark.sql.hive.orc.OrcFileFormat"
case "com.databricks.spark.avro" if conf.replaceDatabricksSparkAvroEnabled =>
"org.apache.spark.sql.avro.AvroFileFormat"
case name => name
}
val provider2 = s"$provider1.DefaultSource"
val loader = Utils.getContextOrSparkClassLoader
val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader)
try {
serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match {
// the provider format did not match any given registered aliases
case Nil =>
try {
Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match {
case Success(dataSource) =>
// Found the data source using fully qualified path
dataSource
case Failure(error) =>
if (provider1.startsWith("org.apache.spark.sql.hive.orc")) {
throw new AnalysisException(
"Hive built-in ORC data source must be used with Hive support enabled. " +
"Please use the native ORC data source by setting 'spark.sql.orc.impl' to " +
"'native'")
} else if (provider1.toLowerCase(Locale.ROOT) == "avro" ||
provider1 == "com.databricks.spark.avro" ||
provider1 == "org.apache.spark.sql.avro") {
throw new AnalysisException(
s"Failed to find data source: $provider1. Avro is built-in but external data " +
"source module since Spark 2.4. Please deploy the application as per " +
"the deployment section of \\"Apache Avro Data Source Guide\\".")
} else if (provider1.toLowerCase(Locale.ROOT) == "kafka") {
throw new AnalysisException(
s"Failed to find data source: $provider1. Please deploy the application as " +
"per the deployment section of " +
"\\"Structured Streaming + Kafka Integration Guide\\".")
} else {
throw new ClassNotFoundException(
s"Failed to find data source: $provider1. Please find packages at " +
"http://spark.apache.org/third-party-projects.html",
error)
}
}
} catch {
case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
"Please check if your library is compatible with Spark 2.0", e)
} else {
throw e
}
}
case head :: Nil =>
// there is exactly one registered alias
head.getClass
case sources =>
// There are multiple registered aliases for the input. If there is single datasource
// that has "org.apache.spark" package in the prefix, we use it considering it is an
// internal datasource within Spark.
val sourceNames = sources.map(_.getClass.getName)
val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark"))
if (internalSources.size == 1) {
logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " +
s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).")
internalSources.head.getClass
} else {
throw new AnalysisException(s"Multiple sources found for $provider1 " +
s"(${sourceNames.mkString(", ")}), please specify the fully qualified class name.")
}
}
} catch {
case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] =>
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getCause.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"Detected an incompatible DataSourceRegister. " +
"Please remove the incompatible library from classpath or upgrade it. " +
s"Error: ${e.getMessage}", e)
} else {
throw e
}
}
}
/**
* Returns an optional [[TableProvider]] instance for the given provider. It returns None if
* there is no corresponding Data Source V2 implementation, or the provider is configured to
* fallback to Data Source V1 code path.
*/
def lookupDataSourceV2(provider: String, conf: SQLConf): Option[TableProvider] = {
val useV1Sources = conf.getConf(SQLConf.USE_V1_SOURCE_LIST).toLowerCase(Locale.ROOT)
.split(",").map(_.trim)
val cls = lookupDataSource(provider, conf)
cls.newInstance() match {
case d: DataSourceRegister if useV1Sources.contains(d.shortName()) => None
case t: TableProvider
if !useV1Sources.contains(cls.getCanonicalName.toLowerCase(Locale.ROOT)) =>
Some(t)
case _ => None
}
}
/**
* Checks and returns files in all the paths.
*/
private[sql] def checkAndGlobPathIfNecessary(
pathStrings: Seq[String],
hadoopConf: Configuration,
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean,
numThreads: Integer = 40): Seq[Path] = {
val qualifiedPaths = pathStrings.map { pathString =>
val path = new Path(pathString)
val fs = path.getFileSystem(hadoopConf)
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
}
// Split the paths into glob and non glob paths, because we don't need to do an existence check
// for globbed paths.
val (globPaths, nonGlobPaths) = qualifiedPaths.partition(SparkHadoopUtil.get.isGlobPath)
val globbedPaths =
try {
ThreadUtils.parmap(globPaths, "globPath", numThreads) { globPath =>
val fs = globPath.getFileSystem(hadoopConf)
val globResult = SparkHadoopUtil.get.globPath(fs, globPath)
if (checkEmptyGlobPath && globResult.isEmpty) {
throw new AnalysisException(s"Path does not exist: $globPath")
}
globResult
}.flatten
} catch {
case e: SparkException => throw e.getCause
}
if (checkFilesExist) {
try {
ThreadUtils.parmap(nonGlobPaths, "checkPathsExist", numThreads) { path =>
val fs = path.getFileSystem(hadoopConf)
if (!fs.exists(path)) {
throw new AnalysisException(s"Path does not exist: $path")
}
}
} catch {
case e: SparkException => throw e.getCause
}
}
val allPaths = globbedPaths ++ nonGlobPaths
if (checkFilesExist) {
val (filteredOut, filteredIn) = allPaths.partition { path =>
InMemoryFileIndex.shouldFilterOut(path.getName)
}
if (filteredIn.isEmpty) {
logWarning(
s"All paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
} else {
logDebug(
s"Some paths were ignored:\\n ${filteredOut.mkString("\\n ")}")
}
}
allPaths.toSeq
}
/**
* When creating a data source table, the `path` option has a special meaning: the table location.
* This method extracts the `path` option and treat it as table location to build a
* [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this.
*/
def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = {
val path = CaseInsensitiveMap(options).get("path")
val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path")
CatalogStorageFormat.empty.copy(
locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath.toMap)
}
/**
* Called before writing into a FileFormat based data source to make sure the
* supplied schema is not empty.
* @param schema
*/
def validateSchema(schema: StructType): Unit = {
def hasEmptySchema(schema: StructType): Boolean = {
schema.size == 0 || schema.find {
case StructField(_, b: StructType, _, _) => hasEmptySchema(b)
case _ => false
}.isDefined
}
if (hasEmptySchema(schema)) {
throw new AnalysisException(
s"""
|Datasource does not support writing empty or nested empty schemas.
|Please make sure the data schema has at least one or more column(s).
""".stripMargin)
}
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala | Scala | apache-2.0 | 39,483 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.junit.Test
import org.scalastyle.file.CheckerTest
import org.scalatest.junit.AssertionsForJUnit
// scalastyle:off magic.number multiple.string.literals
class EmptyClassCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "empty.class"
val classUnderTest = classOf[EmptyClassChecker]
@Test def testKO(): Unit = {
val source = """
package foobar
class Foobar1 {}
class Foobar2 { /* foobar */ }
class Foobar3 {
// foobar
}
class Foobar4 { }
class Foobar5 {
}
class Foobar6 {
def foobar() = 4
}
class Foobar7
"""
assertErrors(List(columnError(4, 6), columnError(5, 6), columnError(6, 6), columnError(9, 6), columnError(10, 6)), source)
}
@Test def testInnerClass(): Unit = {
val source = """
package foobar
class Outer {
class Foobar1
class Foobar2 {}
trait Barbar {}
}
"""
assertErrors(List(columnError(6, 8), columnError(7, 8)), source)
}
}
class ClassTypeParameterCheckerTest extends AssertionsForJUnit with CheckerTest {
val key = "class.type.parameter.name"
val classUnderTest = classOf[ClassTypeParameterChecker]
@Test def testClass(): Unit = {
val source = """
package foobar
class Foobar1
class Foobar2[T]
class Foobar3[Foo] {
def foo = 4
}
class Foobar4[Foo[T]] {
def foo = 4
}
class Foobar5[+T]
class Foobar6[T <: Any]
class Foobar7[List[T], List[Foo], List[T]]
class Foobar8[List[T], List[T], List[Foo]]
class Foobar9[Foo <: Any]
class Foobar0[+Foo]
"""
assertErrors(List(columnError(6, 6), columnError(14, 6), columnError(15, 6), columnError(16, 6), columnError(17, 6)), source, Map("regex" -> "^[A-Z]$"))
}
@Test def testTrait(): Unit = {
val source = """
package foobar
trait Foobar1
trait Foobar2[T]
trait Foobar3[Foo] {
def foo = 4
}
trait Foobar4[Foo[T]] {
def foo = 4
}
trait Foobar5[+T]
trait Foobar6[T <: Any]
trait Foobar7[List[T], List[Foo], List[T]]
trait Foobar8[List[T], List[T], List[Foo]]
trait Foobar9[Foo <: Any]
trait Foobar0[+Foo]
"""
assertErrors(List(columnError(6, 6), columnError(14, 6), columnError(15, 6), columnError(16, 6), columnError(17, 6)), source, Map("regex" -> "^[A-Z]$"))
}
}
| scalastyle/scalastyle | src/test/scala/org/scalastyle/scalariform/ClassCheckerTest.scala | Scala | apache-2.0 | 2,902 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.api.dag.Transformation
import org.apache.flink.configuration.MemorySize
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.streaming.api.operators.{OneInputStreamOperator, SimpleOperatorFactory}
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.dataformat.BaseRow
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.sort.SortCodeGenerator
import org.apache.flink.table.planner.delegation.BatchPlanner
import org.apache.flink.table.planner.plan.cost.{FlinkCost, FlinkCostFactory}
import org.apache.flink.table.planner.plan.nodes.exec.{BatchExecNode, ExecNode}
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, RelExplainUtil, SortUtil}
import org.apache.flink.table.runtime.operators.sort.SortOperator
import org.apache.flink.table.runtime.typeutils.BaseRowTypeInfo
import org.apache.calcite.plan.{RelOptCluster, RelOptCost, RelOptPlanner, RelTraitSet}
import org.apache.calcite.rel.core.Sort
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelCollation, RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import java.util
import scala.collection.JavaConversions._
/**
* Batch physical RelNode for [[Sort]].
*
* This node will output all data rather than `limit` records.
*/
class BatchExecSort(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
sortCollation: RelCollation)
extends Sort(cluster, traitSet, inputRel, sortCollation)
with BatchPhysicalRel
with BatchExecNode[BaseRow] {
require(sortCollation.getFieldCollations.size() > 0)
private val (keys, orders, nullsIsLast) = SortUtil.getKeysAndOrders(
sortCollation.getFieldCollations)
override def copy(
traitSet: RelTraitSet,
newInput: RelNode,
newCollation: RelCollation,
offset: RexNode,
fetch: RexNode): Sort = {
new BatchExecSort(cluster, traitSet, newInput, newCollation)
}
override def explainTerms(pw: RelWriter): RelWriter = {
pw.input("input", getInput)
.item("orderBy", RelExplainUtil.collationToString(sortCollation, getRowType))
}
override def estimateRowCount(mq: RelMetadataQuery): Double = mq.getRowCount(getInput)
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
val rowCount = mq.getRowCount(getInput)
if (rowCount == null) {
return null
}
val numOfSortKeys = sortCollation.getFieldCollations.size()
val cpuCost = FlinkCost.COMPARE_CPU_COST * numOfSortKeys *
rowCount * Math.max(Math.log(rowCount), 1.0)
val memCost = FlinkRelMdUtil.computeSortMemory(mq, getInput)
val costFactory = planner.getCostFactory.asInstanceOf[FlinkCostFactory]
costFactory.makeCost(rowCount, cpuCost, 0, 0, memCost)
}
//~ ExecNode methods -----------------------------------------------------------
override def getDamBehavior = DamBehavior.FULL_DAM
override def getInputNodes: util.List[ExecNode[BatchPlanner, _]] =
List(getInput.asInstanceOf[ExecNode[BatchPlanner, _]])
override def replaceInputNode(
ordinalInParent: Int,
newInputNode: ExecNode[BatchPlanner, _]): Unit = {
replaceInput(ordinalInParent, newInputNode.asInstanceOf[RelNode])
}
override protected def translateToPlanInternal(
planner: BatchPlanner): Transformation[BaseRow] = {
val input = getInputNodes.get(0).translateToPlan(planner)
.asInstanceOf[Transformation[BaseRow]]
val conf = planner.getTableConfig
val inputType = FlinkTypeFactory.toLogicalRowType(getInput.getRowType)
val outputType = FlinkTypeFactory.toLogicalRowType(getRowType)
// sort code gen
val keyTypes = keys.map(inputType.getTypeAt)
val codeGen = new SortCodeGenerator(conf, keys, keyTypes, orders, nullsIsLast)
val operator = new SortOperator(
codeGen.generateNormalizedKeyComputer("BatchExecSortComputer"),
codeGen.generateRecordComparator("BatchExecSortComparator"))
val sortMemory = MemorySize.parse(conf.getConfiguration.getString(
ExecutionConfigOptions.TABLE_EXEC_RESOURCE_SORT_MEMORY)).getBytes
ExecNode.createOneInputTransformation(
input,
getRelDetailedDescription,
SimpleOperatorFactory.of(operator.asInstanceOf[OneInputStreamOperator[BaseRow, BaseRow]]),
BaseRowTypeInfo.of(outputType),
input.getParallelism,
sortMemory)
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchExecSort.scala | Scala | apache-2.0 | 5,367 |
package com.programmaticallyspeaking.ncd.infra
import com.programmaticallyspeaking.ncd.testing.UnitTest
class TraversableExtensionsTest extends UnitTest {
import TraversableExtensions._
"distinctBy" - {
"ignores irrelevant properties" in {
val list = Seq(Subject("a", 42), Subject("a", 43), Subject("c", 43))
val result = list.distinctBy(_.b)
result should be (Seq(Subject("a", 42), Subject("a", 43)))
}
}
case class Subject(a: String, b: Int)
}
| provegard/ncdbg | src/test/scala/com/programmaticallyspeaking/ncd/infra/TraversableExtensionsTest.scala | Scala | bsd-3-clause | 484 |
package resolver.parser.document
import scala.collection.mutable.HashMap
/**
* Taken from Berkeley's
*/
class BerkeleyPronounSet {
val firstPersonPronouns = Set("i", "me", "myself", "mine", "my", "we", "us", "ourself", "ourselves", "ours", "our");
val secondPersonPronouns = Set("you", "yourself", "yours", "your", "yourselves");
val thirdPersonPronouns = Set("he", "him", "himself", "his", "she", "her", "herself", "hers", "her", "it", "itself", "its", "one", "oneself", "one's", "they", "them", "themself", "themselves", "theirs", "their", "they", "them", "'em", "themselves");
val otherPronouns = Set("who", "whom", "whose", "where", "when","which");
// Borrowed from Stanford
val singularPronouns = Set("i", "me", "myself", "mine", "my", "yourself", "he", "him", "himself", "his", "she", "her", "herself", "hers", "her", "it", "itself", "its", "one", "oneself", "one's");
val pluralPronouns = Set("we", "us", "ourself", "ourselves", "ours", "our", "yourself", "yourselves", "they", "them", "themself", "themselves", "theirs", "their");
val malePronouns = Set("he", "him", "himself", "his");
val femalePronouns = Set("her", "hers", "herself", "she");
val neutralPronouns = Set("it", "its", "itself", "where", "here", "there", "which");
val allPronouns = firstPersonPronouns ++ secondPersonPronouns ++ thirdPersonPronouns ++ otherPronouns;
// Constructed based on Stanford's Dictionaries class
val pronounsToCanonicalPronouns = new HashMap[String,String]();
pronounsToCanonicalPronouns.put("i", "i");
pronounsToCanonicalPronouns.put("me", "i");
pronounsToCanonicalPronouns.put("my", "i");
pronounsToCanonicalPronouns.put("myself", "i");
pronounsToCanonicalPronouns.put("mine", "i");
pronounsToCanonicalPronouns.put("you", "you");
pronounsToCanonicalPronouns.put("your", "you");
pronounsToCanonicalPronouns.put("yourself", "you");
pronounsToCanonicalPronouns.put("yourselves", "you");
pronounsToCanonicalPronouns.put("yours", "you");
pronounsToCanonicalPronouns.put("he", "he");
pronounsToCanonicalPronouns.put("him", "he");
pronounsToCanonicalPronouns.put("his", "he");
pronounsToCanonicalPronouns.put("himself", "he");
pronounsToCanonicalPronouns.put("she", "she");
pronounsToCanonicalPronouns.put("her", "she");
pronounsToCanonicalPronouns.put("herself", "she");
pronounsToCanonicalPronouns.put("hers", "she");
pronounsToCanonicalPronouns.put("we", "we");
pronounsToCanonicalPronouns.put("us", "we");
pronounsToCanonicalPronouns.put("our", "we");
pronounsToCanonicalPronouns.put("ourself", "we");
pronounsToCanonicalPronouns.put("ourselves", "we");
pronounsToCanonicalPronouns.put("ours", "we");
pronounsToCanonicalPronouns.put("they", "they");
pronounsToCanonicalPronouns.put("them", "they");
pronounsToCanonicalPronouns.put("their", "they");
pronounsToCanonicalPronouns.put("themself", "they");
pronounsToCanonicalPronouns.put("themselves", "they");
pronounsToCanonicalPronouns.put("theirs", "they");
pronounsToCanonicalPronouns.put("'em", "they");
pronounsToCanonicalPronouns.put("it", "it");
pronounsToCanonicalPronouns.put("itself", "it");
pronounsToCanonicalPronouns.put("its", "it");
pronounsToCanonicalPronouns.put("one", "one");
pronounsToCanonicalPronouns.put("oneself", "one");
pronounsToCanonicalPronouns.put("one's", "one");
pronounsToCanonicalPronouns.put("that", "that");
pronounsToCanonicalPronouns.put("which", "which");
pronounsToCanonicalPronouns.put("who", "who");
pronounsToCanonicalPronouns.put("whom", "who");
// pronounsToCanonicalPronouns.put("where", "where");
// pronounsToCanonicalPronouns.put("whose", "whose");
// This entry is here just to make results consistent with earlier ones
// on our very small dev set
pronounsToCanonicalPronouns.put("thy", "thy");
pronounsToCanonicalPronouns.put("y'all", "you");
pronounsToCanonicalPronouns.put("you're", "you");
pronounsToCanonicalPronouns.put("you'll", "you");
pronounsToCanonicalPronouns.put("'s", "'s");
def isPronLc(str: String): Boolean = {
allPronouns.contains(str.toLowerCase());
}
def getCanonicalPronLc(str: String): String = {
if (!pronounsToCanonicalPronouns.contains(str.toLowerCase())) {
"";
} else {
pronounsToCanonicalPronouns(str.toLowerCase());
}
}
}
| danielryan2430/WordEmbeddingCorefResolver | src/main/scala/resolver/parser/document/BerkeleyPronounSet.scala | Scala | apache-2.0 | 4,326 |
package com.wavesplatform.api.http
case class ApiException(apiError: ApiError) extends Exception(apiError.message)
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/api/http/ApiException.scala | Scala | mit | 116 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.URI
import java.util.Locale
import Project.{ Initialize => _, Setting => _, _ }
import Keys.{ appConfiguration, stateBuildStructure, commands, configuration, historyPath, projectCommand, sessionSettings, shellPrompt, thisProject, thisProjectRef, watch }
import Scope.{ GlobalScope, ThisScope }
import Def.{ Flattened, Initialize, ScopedKey, Setting }
import Types.{ const, idFun }
import complete.DefaultParsers
import language.experimental.macros
sealed trait ProjectDefinition[PR <: ProjectReference] {
/**
* The project ID is used to uniquely identify a project within a build.
* It is used to refer to a project from the command line and in the scope of keys.
*/
def id: String
/** The base directory for the project.*/
def base: File
/**
* The configurations for this project. These are groups of related tasks and the main reason
* to list them here is when one configuration extends another. In this case, a setting lookup
* in one configuration will fall back to the configurations it extends configuration if the setting doesn't exist.
*/
def configurations: Seq[Configuration]
/**
* The explicitly defined sequence of settings that configure this project.
* These do not include the automatically appended settings as configured by `auto`.
*/
def settings: Seq[Setting[_]]
/**
* The references to projects that are aggregated by this project.
* When a task is run on this project, it will also be run on aggregated projects.
*/
def aggregate: Seq[PR]
@deprecated("Delegation between projects should be replaced by directly sharing settings.", "0.13.0")
def delegates: Seq[PR]
/** The references to projects that are classpath dependencies of this project. */
def dependencies: Seq[ClasspathDep[PR]]
/** The references to projects that are aggregate and classpath dependencies of this project. */
def uses: Seq[PR] = aggregate ++ dependencies.map(_.project)
def referenced: Seq[PR] = delegates ++ uses
/** Configures the sources of automatically appended settings.*/
def auto: AddSettings
/**
* The defined [[Plugins]] associated with this project.
* A [[AutoPlugin]] is a common label that is used by plugins to determine what settings, if any, to add to a project.
*/
def plugins: Plugins
/** The [[AutoPlugin]]s enabled for this project. This value is only available on a loaded Project. */
private[sbt] def autoPlugins: Seq[AutoPlugin]
override final def hashCode: Int = id.hashCode ^ base.hashCode ^ getClass.hashCode
override final def equals(o: Any) = o match {
case p: ProjectDefinition[_] => p.getClass == this.getClass && p.id == id && p.base == base
case _ => false
}
override def toString =
{
val agg = ifNonEmpty("aggregate", aggregate)
val dep = ifNonEmpty("dependencies", dependencies)
val conf = ifNonEmpty("configurations", configurations)
val autos = ifNonEmpty("autoPlugins", autoPlugins.map(_.label))
val fields = s"id $id" :: s"base: $base" :: agg ::: dep ::: conf ::: (s"plugins: List($plugins)" :: autos)
s"Project(${fields.mkString(", ")})"
}
private[this] def ifNonEmpty[T](label: String, ts: Iterable[T]): List[String] = if (ts.isEmpty) Nil else s"$label: $ts" :: Nil
}
sealed trait Project extends ProjectDefinition[ProjectReference] {
// TODO: add parameters for plugins in 0.14.0 (not reasonable to do in a binary compatible way in 0.13)
def copy(id: String = id, base: File = base, aggregate: => Seq[ProjectReference] = aggregate, dependencies: => Seq[ClasspathDep[ProjectReference]] = dependencies,
delegates: => Seq[ProjectReference] = delegates, settings: => Seq[Setting[_]] = settings, configurations: Seq[Configuration] = configurations,
auto: AddSettings = auto): Project =
unresolved(id, base, aggregate = aggregate, dependencies = dependencies, delegates = delegates, settings, configurations, auto, plugins, autoPlugins)
def resolve(resolveRef: ProjectReference => ProjectRef): ResolvedProject =
{
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) = ResolvedClasspathDependency(resolveRef(d.project), d.configuration)
resolved(id, base, aggregate = resolveRefs(aggregate), dependencies = resolveDeps(dependencies), delegates = resolveRefs(delegates),
settings, configurations, auto, plugins, autoPlugins)
}
def resolveBuild(resolveRef: ProjectReference => ProjectReference): Project =
{
def resolveRefs(prs: Seq[ProjectReference]) = prs map resolveRef
def resolveDeps(ds: Seq[ClasspathDep[ProjectReference]]) = ds map resolveDep
def resolveDep(d: ClasspathDep[ProjectReference]) = ClasspathDependency(resolveRef(d.project), d.configuration)
unresolved(id, base, aggregate = resolveRefs(aggregate), dependencies = resolveDeps(dependencies), delegates = resolveRefs(delegates),
settings, configurations, auto, plugins, autoPlugins)
}
/**
* Applies the given functions to this Project.
* The second function is applied to the result of applying the first to this Project and so on.
* The intended use is a convenience for applying default configuration provided by a plugin.
*/
def configure(transforms: (Project => Project)*): Project = Function.chain(transforms)(this)
/** Sets the base directory for this project.*/
def in(dir: File): Project = copy(base = dir)
/** Adds configurations to this project. Added configurations replace existing configurations with the same name.*/
def overrideConfigs(cs: Configuration*): Project = copy(configurations = Defaults.overrideConfigs(cs: _*)(configurations))
/**
* Adds configuration at the *start* of the configuration list for this project. Previous configurations replace this prefix
* list with the same name.
*/
private[sbt] def prefixConfigs(cs: Configuration*): Project = copy(configurations = Defaults.overrideConfigs(configurations: _*)(cs))
/** Adds new configurations directly to this project. To override an existing configuration, use `overrideConfigs`. */
def configs(cs: Configuration*): Project = copy(configurations = configurations ++ cs)
/** Adds classpath dependencies on internal or external projects. */
def dependsOn(deps: ClasspathDep[ProjectReference]*): Project = copy(dependencies = dependencies ++ deps)
@deprecated("Delegation between projects should be replaced by directly sharing settings.", "0.13.0")
def delegateTo(from: ProjectReference*): Project = copy(delegates = delegates ++ from)
/**
* Adds projects to be aggregated. When a user requests a task to run on this project from the command line,
* the task will also be run in aggregated projects.
*/
def aggregate(refs: ProjectReference*): Project = copy(aggregate = (aggregate: Seq[ProjectReference]) ++ refs)
/** Appends settings to the current settings sequence for this project. */
def settings(ss: Def.SettingsDefinition*): Project = copy(settings = (settings: Seq[Def.Setting[_]]) ++ Def.settings(ss: _*))
@deprecated("Use settingSets method.", "0.13.5")
def autoSettings(select: AddSettings*): Project = settingSets(select.toSeq: _*)
/** Configures how settings from other sources, such as .sbt files, are appended to the explicitly specified settings for this project. */
def settingSets(select: AddSettings*): Project = copy(auto = AddSettings.seq(select: _*))
/**
* Adds a list of .sbt files whose settings will be appended to the settings of this project.
* They will be appended after the explicit settings and already defined automatic settings sources.
*/
def addSbtFiles(files: File*): Project = copy(auto = AddSettings.append(auto, AddSettings.sbtFiles(files: _*)))
/**
* Sets the list of .sbt files to parse for settings to be appended to this project's settings.
* Any configured .sbt files are removed from this project's list.
*/
def setSbtFiles(files: File*): Project = copy(auto = AddSettings.append(AddSettings.clearSbtFiles(auto), AddSettings.sbtFiles(files: _*)))
/**
* Sets the [[AutoPlugin]]s of this project.
* A [[AutoPlugin]] is a common label that is used by plugins to determine what settings, if any, to enable on a project.
*/
def enablePlugins(ns: Plugins*): Project = setPlugins(ns.foldLeft(plugins)(Plugins.and))
/** Disable the given plugins on this project. */
def disablePlugins(ps: AutoPlugin*): Project =
setPlugins(Plugins.and(plugins, Plugins.And(ps.map(p => Plugins.Exclude(p)).toList)))
private[this] def setPlugins(ns: Plugins): Project = {
// TODO: for 0.14.0, use copy when it has the additional `plugins` parameter
unresolved(id, base, aggregate = aggregate, dependencies = dependencies, delegates = delegates, settings, configurations, auto, ns, autoPlugins)
}
/** Definitively set the [[AutoPlugin]]s for this project. */
private[sbt] def setAutoPlugins(autos: Seq[AutoPlugin]): Project = {
// TODO: for 0.14.0, use copy when it has the additional `autoPlugins` parameter
unresolved(id, base, aggregate = aggregate, dependencies = dependencies, delegates = delegates, settings, configurations, auto, plugins, autos)
}
}
sealed trait ResolvedProject extends ProjectDefinition[ProjectRef] {
/** The [[AutoPlugin]]s enabled for this project as computed from [[plugins]].*/
def autoPlugins: Seq[AutoPlugin]
}
sealed trait ClasspathDep[PR <: ProjectReference] { def project: PR; def configuration: Option[String] }
final case class ResolvedClasspathDependency(project: ProjectRef, configuration: Option[String]) extends ClasspathDep[ProjectRef]
final case class ClasspathDependency(project: ProjectReference, configuration: Option[String]) extends ClasspathDep[ProjectReference]
object Project extends ProjectExtra {
@deprecated("Use Def.Setting", "0.13.0")
type Setting[T] = Def.Setting[T]
@deprecated("Use Def.Setting", "0.13.0")
type SettingsDefinition = Def.SettingsDefinition
@deprecated("Use Def.Setting", "0.13.0")
val SettingsDefinition = Def.SettingsDefinition
@deprecated("Use Def.Initialize", "0.13.0")
type Initialize[T] = Def.Initialize[T]
def showContextKey(state: State): Show[ScopedKey[_]] =
showContextKey(state, None)
def showContextKey(state: State, keyNameColor: Option[String]): Show[ScopedKey[_]] =
if (isProjectLoaded(state)) showContextKey(session(state), structure(state), keyNameColor) else Def.showFullKey
def showContextKey(session: SessionSettings, structure: BuildStructure, keyNameColor: Option[String] = None): Show[ScopedKey[_]] =
Def.showRelativeKey(session.current, structure.allProjects.size > 1, keyNameColor)
def showLoadingKey(loaded: LoadedBuild, keyNameColor: Option[String] = None): Show[ScopedKey[_]] =
Def.showRelativeKey(ProjectRef(loaded.root, loaded.units(loaded.root).rootProjects.head), loaded.allProjectRefs.size > 1, keyNameColor)
private abstract class ProjectDef[PR <: ProjectReference](val id: String, val base: File, aggregate0: => Seq[PR], dependencies0: => Seq[ClasspathDep[PR]],
delegates0: => Seq[PR], settings0: => Seq[Def.Setting[_]], val configurations: Seq[Configuration], val auto: AddSettings,
val plugins: Plugins, val autoPlugins: Seq[AutoPlugin]) extends ProjectDefinition[PR] {
lazy val aggregate = aggregate0
lazy val dependencies = dependencies0
lazy val delegates = delegates0
lazy val settings = settings0
Dag.topologicalSort(configurations)(_.extendsConfigs) // checks for cyclic references here instead of having to do it in Scope.delegates
}
// TODO: add parameter for plugins in 0.14.0
// TODO: Modify default settings to be the core settings, and automatically add the IvyModule + JvmPlugins.
def apply(id: String, base: File, aggregate: => Seq[ProjectReference] = Nil, dependencies: => Seq[ClasspathDep[ProjectReference]] = Nil,
delegates: => Seq[ProjectReference] = Nil, settings: => Seq[Def.Setting[_]] = Nil, configurations: Seq[Configuration] = Nil,
auto: AddSettings = AddSettings.allDefaults): Project =
unresolved(id, base, aggregate, dependencies, delegates, settings, configurations, auto, Plugins.empty, Nil) // Note: JvmModule/IvyModule auto included...
/** This is a variation of def apply that mixes in GeneratedRootProject. */
private[sbt] def mkGeneratedRoot(id: String, base: File, aggregate: => Seq[ProjectReference]): Project =
{
validProjectID(id).foreach(errMsg => sys.error("Invalid project ID: " + errMsg))
new ProjectDef[ProjectReference](id, base, aggregate, Nil, Nil, Nil, Nil, AddSettings.allDefaults, Plugins.empty, Nil) with Project with GeneratedRootProject
}
/** Returns None if `id` is a valid Project ID or Some containing the parser error message if it is not.*/
def validProjectID(id: String): Option[String] = DefaultParsers.parse(id, DefaultParsers.ID).left.toOption
private[this] def validProjectIDStart(id: String): Boolean = DefaultParsers.parse(id, DefaultParsers.IDStart).isRight
/** Constructs a valid Project ID based on `id` and returns it in Right or returns the error message in Left if one cannot be constructed.*/
def normalizeProjectID(id: String): Either[String, String] =
{
val attempt = normalizeBase(id)
val refined =
if (attempt.length < 1) "root"
else if (!validProjectIDStart(attempt.substring(0, 1))) "root-" + attempt
else attempt
validProjectID(refined).toLeft(refined)
}
private[this] def normalizeBase(s: String) = s.toLowerCase(Locale.ENGLISH).replaceAll("""\\W+""", "-")
/**
* Normalize a String so that it is suitable for use as a dependency management module identifier.
* This is a best effort implementation, since valid characters are not documented or consistent.
*/
def normalizeModuleID(id: String): String = normalizeBase(id)
@deprecated("Will be removed.", "0.13.2")
def resolved(id: String, base: File, aggregate: => Seq[ProjectRef], dependencies: => Seq[ResolvedClasspathDependency], delegates: => Seq[ProjectRef],
settings: Seq[Def.Setting[_]], configurations: Seq[Configuration], auto: AddSettings): ResolvedProject =
resolved(id, base, aggregate, dependencies, delegates, settings, configurations, auto, Plugins.empty, Nil)
private def resolved(id: String, base: File, aggregate: => Seq[ProjectRef], dependencies: => Seq[ClasspathDep[ProjectRef]],
delegates: => Seq[ProjectRef], settings: Seq[Def.Setting[_]], configurations: Seq[Configuration], auto: AddSettings,
plugins: Plugins, autoPlugins: Seq[AutoPlugin]): ResolvedProject =
new ProjectDef[ProjectRef](id, base, aggregate, dependencies, delegates, settings, configurations, auto, plugins, autoPlugins) with ResolvedProject
private def unresolved(id: String, base: File, aggregate: => Seq[ProjectReference], dependencies: => Seq[ClasspathDep[ProjectReference]],
delegates: => Seq[ProjectReference], settings: => Seq[Def.Setting[_]], configurations: Seq[Configuration], auto: AddSettings,
plugins: Plugins, autoPlugins: Seq[AutoPlugin]): Project =
{
validProjectID(id).foreach(errMsg => sys.error("Invalid project ID: " + errMsg))
new ProjectDef[ProjectReference](id, base, aggregate, dependencies, delegates, settings, configurations, auto, plugins, autoPlugins) with Project
}
@deprecated("Use Defaults.coreDefaultSettings instead, combined with AutoPlugins.", "0.13.2")
def defaultSettings: Seq[Def.Setting[_]] = Defaults.defaultSettings
final class Constructor(p: ProjectReference) {
def %(conf: Configuration): ClasspathDependency = %(conf.name)
def %(conf: String): ClasspathDependency = new ClasspathDependency(p, Some(conf))
}
def getOrError[T](state: State, key: AttributeKey[T], msg: String): T = state get key getOrElse sys.error(msg)
def structure(state: State): BuildStructure = getOrError(state, stateBuildStructure, "No build loaded.")
def session(state: State): SessionSettings = getOrError(state, sessionSettings, "Session not initialized.")
def isProjectLoaded(state: State): Boolean = (state has sessionSettings) && (state has stateBuildStructure)
def extract(state: State): Extracted = extract(session(state), structure(state))
def extract(se: SessionSettings, st: BuildStructure): Extracted = Extracted(st, se, se.current)(showContextKey(se, st))
def getProjectForReference(ref: Reference, structure: BuildStructure): Option[ResolvedProject] =
ref match { case pr: ProjectRef => getProject(pr, structure); case _ => None }
def getProject(ref: ProjectRef, structure: BuildStructure): Option[ResolvedProject] = getProject(ref, structure.units)
def getProject(ref: ProjectRef, structure: LoadedBuild): Option[ResolvedProject] = getProject(ref, structure.units)
def getProject(ref: ProjectRef, units: Map[URI, LoadedBuildUnit]): Option[ResolvedProject] =
(units get ref.build).flatMap(_.defined get ref.project)
def runUnloadHooks(s: State): State =
{
val previousOnUnload = orIdentity(s get Keys.onUnload.key)
previousOnUnload(s.runExitHooks())
}
def setProject(session: SessionSettings, structure: BuildStructure, s: State): State =
{
val unloaded = runUnloadHooks(s)
val (onLoad, onUnload) = getHooks(structure.data)
val newAttrs = unloaded.attributes.put(stateBuildStructure, structure).put(sessionSettings, session).put(Keys.onUnload.key, onUnload)
val newState = unloaded.copy(attributes = newAttrs)
onLoad(LogManager.setGlobalLogLevels(updateCurrent(newState), structure.data))
}
def orIdentity[T](opt: Option[T => T]): T => T = opt getOrElse idFun
def getHook[T](key: SettingKey[T => T], data: Settings[Scope]): T => T = orIdentity(key in GlobalScope get data)
def getHooks(data: Settings[Scope]): (State => State, State => State) = (getHook(Keys.onLoad, data), getHook(Keys.onUnload, data))
def current(state: State): ProjectRef = session(state).current
def updateCurrent(s: State): State =
{
val structure = Project.structure(s)
val ref = Project.current(s)
val project = Load.getProject(structure.units, ref.build, ref.project)
val msg = Keys.onLoadMessage in ref get structure.data getOrElse ""
if (!msg.isEmpty) s.log.info(msg)
def get[T](k: SettingKey[T]): Option[T] = k in ref get structure.data
def commandsIn(axis: ResolvedReference) = commands in axis get structure.data toList;
val allCommands = commandsIn(ref) ++ commandsIn(BuildRef(ref.build)) ++ (commands in Global get structure.data toList)
val history = get(historyPath) flatMap idFun
val prompt = get(shellPrompt)
val watched = get(watch)
val commandDefs = allCommands.distinct.flatten[Command].map(_ tag (projectCommand, true))
val newDefinedCommands = commandDefs ++ BasicCommands.removeTagged(s.definedCommands, projectCommand)
val newAttrs = setCond(Watched.Configuration, watched, s.attributes).put(historyPath.key, history)
s.copy(attributes = setCond(shellPrompt.key, prompt, newAttrs), definedCommands = newDefinedCommands)
}
def setCond[T](key: AttributeKey[T], vopt: Option[T], attributes: AttributeMap): AttributeMap =
vopt match { case Some(v) => attributes.put(key, v); case None => attributes.remove(key) }
@deprecated("Use Def.make", "0.13.0")
def makeSettings(settings: Seq[Def.Setting[_]], delegates: Scope => Seq[Scope], scopeLocal: ScopedKey[_] => Seq[Def.Setting[_]])(implicit display: Show[ScopedKey[_]]) =
Def.make(settings)(delegates, scopeLocal, display)
private[sbt] def checkTargets(data: Settings[Scope]): Option[String] =
{
val dups = overlappingTargets(allTargets(data))
if (dups.isEmpty)
None
else {
val dupStrs = dups map {
case (dir, scopes) =>
s"${dir.getAbsolutePath}:\\n\\t${scopes.mkString("\\n\\t")}"
}
Some(s"Overlapping output directories:${dupStrs.mkString}")
}
}
private[this] def overlappingTargets(targets: Seq[(ProjectRef, File)]): Map[File, Seq[ProjectRef]] =
targets.groupBy(_._2).filter(_._2.size > 1).mapValues(_.map(_._1))
private[this] def allTargets(data: Settings[Scope]): Seq[(ProjectRef, File)] =
{
import ScopeFilter._
val allProjects = ScopeFilter(Make.inAnyProject)
val targetAndRef = Def.setting { (Keys.thisProjectRef.value, Keys.target.value) }
new SettingKeyAll(Def.optional(targetAndRef)(idFun)).all(allProjects).evaluate(data).flatMap(x => x)
}
def equal(a: ScopedKey[_], b: ScopedKey[_], mask: ScopeMask): Boolean =
a.key == b.key && Scope.equal(a.scope, b.scope, mask)
def fillTaskAxis(scoped: ScopedKey[_]): ScopedKey[_] =
ScopedKey(Scope.fillTaskAxis(scoped.scope, scoped.key), scoped.key)
def mapScope(f: Scope => Scope) = new (ScopedKey ~> ScopedKey) {
def apply[T](key: ScopedKey[T]) =
ScopedKey(f(key.scope), key.key)
}
def transform(g: Scope => Scope, ss: Seq[Def.Setting[_]]): Seq[Def.Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapKey f mapReferenced f)
}
def transformRef(g: Scope => Scope, ss: Seq[Def.Setting[_]]): Seq[Def.Setting[_]] = {
val f = mapScope(g)
ss.map(_ mapReferenced f)
}
def delegates(structure: BuildStructure, scope: Scope, key: AttributeKey[_]): Seq[ScopedKey[_]] =
structure.delegates(scope).map(d => ScopedKey(d, key))
def scopedKeyData(structure: BuildStructure, scope: Scope, key: AttributeKey[_]): Option[ScopedKeyData[_]] =
structure.data.get(scope, key) map { v => ScopedKeyData(ScopedKey(scope, key), v) }
def details(structure: BuildStructure, actual: Boolean, scope: Scope, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): String =
{
val scoped = ScopedKey(scope, key)
val data = scopedKeyData(structure, scope, key) map { _.description } getOrElse { "No entry for key." }
val description = key.description match { case Some(desc) => "Description:\\n\\t" + desc + "\\n"; case None => "" }
val definingScope = structure.data.definingScope(scope, key)
val providedBy = definingScope match {
case Some(sc) => "Provided by:\\n\\t" + Scope.display(sc, key.label) + "\\n"
case None => ""
}
val definingScoped = definingScope match { case Some(sc) => ScopedKey(sc, key); case None => scoped }
val comp = Def.compiled(structure.settings, actual)(structure.delegates, structure.scopeLocal, display)
val definedAt = comp get definingScoped map { c => Def.definedAtString(c.settings).capitalize } getOrElse ""
val cMap = Def.flattenLocals(comp)
val related = cMap.keys.filter(k => k.key == key && k.scope != scope)
def derivedDependencies(c: ScopedKey[_]): List[ScopedKey[_]] =
comp.get(c).map(_.settings.flatMap(s => if (s.isDerived) s.dependencies else Nil)).toList.flatten
val depends = cMap.get(scoped) match { case Some(c) => c.dependencies.toSet; case None => Set.empty }
val derivedDepends: Set[ScopedKey[_]] = derivedDependencies(definingScoped).toSet
val reverse = reverseDependencies(cMap, scoped)
val derivedReverse = reverse.filter(r => derivedDependencies(r).contains(definingScoped)).toSet
def printDepScopes(baseLabel: String, derivedLabel: String, scopes: Iterable[ScopedKey[_]], derived: Set[ScopedKey[_]]): String =
{
val label = s"$baseLabel${if (derived.isEmpty) "" else s" (D=$derivedLabel)"}"
val prefix: ScopedKey[_] => String = if (derived.isEmpty) const("") else sk => if (derived(sk)) "D " else " "
printScopes(label, scopes, prefix = prefix)
}
def printScopes(label: String, scopes: Iterable[ScopedKey[_]], max: Int = Int.MaxValue, prefix: ScopedKey[_] => String = const("")) =
if (scopes.isEmpty) ""
else {
val (limited, more) = if (scopes.size <= max) (scopes, "\\n") else (scopes.take(max), "\\n...\\n")
limited.map(sk => prefix(sk) + display(sk)).mkString(label + ":\\n\\t", "\\n\\t", more)
}
data + "\\n" +
description +
providedBy +
definedAt +
printDepScopes("Dependencies", "derived from", depends, derivedDepends) +
printDepScopes("Reverse dependencies", "derives", reverse, derivedReverse) +
printScopes("Delegates", delegates(structure, scope, key)) +
printScopes("Related", related, 10)
}
def settingGraph(structure: BuildStructure, basedir: File, scoped: ScopedKey[_])(implicit display: Show[ScopedKey[_]]): SettingGraph =
SettingGraph(structure, basedir, scoped, 0)
def graphSettings(structure: BuildStructure, basedir: File)(implicit display: Show[ScopedKey[_]]): Unit = {
def graph(actual: Boolean, name: String) = graphSettings(structure, actual, name, new File(basedir, name + ".dot"))
graph(true, "actual_dependencies")
graph(false, "declared_dependencies")
}
def graphSettings(structure: BuildStructure, actual: Boolean, graphName: String, file: File)(implicit display: Show[ScopedKey[_]]): Unit = {
val rel = relation(structure, actual)
val keyToString = display.apply _
DotGraph.generateGraph(file, graphName, rel, keyToString, keyToString)
}
def relation(structure: BuildStructure, actual: Boolean)(implicit display: Show[ScopedKey[_]]): Relation[ScopedKey[_], ScopedKey[_]] =
relation(structure.settings, actual)(structure.delegates, structure.scopeLocal, display)
private[sbt] def relation(settings: Seq[Def.Setting[_]], actual: Boolean)(implicit delegates: Scope => Seq[Scope], scopeLocal: Def.ScopeLocal, display: Show[ScopedKey[_]]): Relation[ScopedKey[_], ScopedKey[_]] =
{
type Rel = Relation[ScopedKey[_], ScopedKey[_]]
val cMap = Def.flattenLocals(Def.compiled(settings, actual))
((Relation.empty: Rel) /: cMap) {
case (r, (key, value)) =>
r + (key, value.dependencies)
}
}
def showDefinitions(key: AttributeKey[_], defs: Seq[Scope])(implicit display: Show[ScopedKey[_]]): String =
showKeys(defs.map(scope => ScopedKey(scope, key)))
def showUses(defs: Seq[ScopedKey[_]])(implicit display: Show[ScopedKey[_]]): String =
showKeys(defs)
private[this] def showKeys(s: Seq[ScopedKey[_]])(implicit display: Show[ScopedKey[_]]): String =
s.map(display.apply).sorted.mkString("\\n\\t", "\\n\\t", "\\n\\n")
def definitions(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): Seq[Scope] =
relation(structure, actual)(display)._1s.toSeq flatMap { sk => if (sk.key == key) sk.scope :: Nil else Nil }
def usedBy(structure: BuildStructure, actual: Boolean, key: AttributeKey[_])(implicit display: Show[ScopedKey[_]]): Seq[ScopedKey[_]] =
relation(structure, actual)(display).all.toSeq flatMap { case (a, b) => if (b.key == key) List[ScopedKey[_]](a) else Nil }
def reverseDependencies(cMap: Map[ScopedKey[_], Flattened], scoped: ScopedKey[_]): Iterable[ScopedKey[_]] =
for ((key, compiled) <- cMap; dep <- compiled.dependencies if dep == scoped) yield key
//@deprecated("Use SettingCompletions.setAll when available.", "0.13.0")
def setAll(extracted: Extracted, settings: Seq[Def.Setting[_]]): SessionSettings =
SettingCompletions.setAll(extracted, settings).session
val ExtraBuilds = AttributeKey[List[URI]]("extra-builds", "Extra build URIs to load in addition to the ones defined by the project.")
def extraBuilds(s: State): List[URI] = getOrNil(s, ExtraBuilds)
def getOrNil[T](s: State, key: AttributeKey[List[T]]): List[T] = s get key getOrElse Nil
def setExtraBuilds(s: State, extra: List[URI]): State = s.put(ExtraBuilds, extra)
def addExtraBuilds(s: State, extra: List[URI]): State = setExtraBuilds(s, extra ::: extraBuilds(s))
def removeExtraBuilds(s: State, remove: List[URI]): State = updateExtraBuilds(s, _.filterNot(remove.toSet))
def updateExtraBuilds(s: State, f: List[URI] => List[URI]): State = setExtraBuilds(s, f(extraBuilds(s)))
object LoadAction extends Enumeration {
val Return, Current, Plugins = Value
}
import LoadAction._
import DefaultParsers._
val loadActionParser = token(Space ~> ("plugins" ^^^ Plugins | "return" ^^^ Return)) ?? Current
val ProjectReturn = AttributeKey[List[File]]("project-return", "Maintains a stack of builds visited using reload.")
def projectReturn(s: State): List[File] = getOrNil(s, ProjectReturn)
def inPluginProject(s: State): Boolean = projectReturn(s).length > 1
def setProjectReturn(s: State, pr: List[File]): State = s.copy(attributes = s.attributes.put(ProjectReturn, pr))
def loadAction(s: State, action: LoadAction.Value) = action match {
case Return =>
projectReturn(s) match {
case current :: returnTo :: rest => (setProjectReturn(s, returnTo :: rest), returnTo)
case _ => sys.error("Not currently in a plugin definition")
}
case Current =>
val base = s.configuration.baseDirectory
projectReturn(s) match { case Nil => (setProjectReturn(s, base :: Nil), base); case x :: xs => (s, x) }
case Plugins =>
val (newBase, oldStack) = if (Project.isProjectLoaded(s))
(Project.extract(s).currentUnit.unit.plugins.base, projectReturn(s))
else // support changing to the definition project if it fails to load
(BuildPaths.projectStandard(s.baseDir), s.baseDir :: Nil)
val newS = setProjectReturn(s, newBase :: oldStack)
(newS, newBase)
}
@deprecated("This method does not apply state changes requested during task execution. Use 'runTask' instead, which does.", "0.11.1")
def evaluateTask[T](taskKey: ScopedKey[Task[T]], state: State, checkCycles: Boolean = false, maxWorkers: Int = EvaluateTask.SystemProcessors): Option[Result[T]] =
runTask(taskKey, state, EvaluateConfig(true, EvaluateTask.defaultRestrictions(maxWorkers), checkCycles)).map(_._2)
def runTask[T](taskKey: ScopedKey[Task[T]], state: State, checkCycles: Boolean = false): Option[(State, Result[T])] =
{
val extracted = Project.extract(state)
val ch = EvaluateTask.cancelStrategy(extracted, extracted.structure, state)
val p = EvaluateTask.executeProgress(extracted, extracted.structure, state)
val r = EvaluateTask.restrictions(state)
val fgc = EvaluateTask.forcegc(extracted, extracted.structure)
runTask(taskKey, state, EvaluateTaskConfig(r, checkCycles, p, ch, fgc))
}
@deprecated("Use EvaluateTaskConfig option instead.", "0.13.5")
def runTask[T](taskKey: ScopedKey[Task[T]], state: State, config: EvaluateConfig): Option[(State, Result[T])] =
{
val extracted = Project.extract(state)
EvaluateTask(extracted.structure, taskKey, state, extracted.currentRef, config)
}
def runTask[T](taskKey: ScopedKey[Task[T]], state: State, config: EvaluateTaskConfig): Option[(State, Result[T])] = {
val extracted = Project.extract(state)
EvaluateTask(extracted.structure, taskKey, state, extracted.currentRef, config)
}
implicit def projectToRef(p: Project): ProjectReference = LocalProject(p.id)
final class RichTaskSessionVar[S](i: Def.Initialize[Task[S]]) {
import SessionVar.{ persistAndSet, resolveContext, set, transform => tx }
def updateState(f: (State, S) => State): Def.Initialize[Task[S]] = i(t => tx(t, f))
def storeAs(key: TaskKey[S])(implicit f: sbinary.Format[S]): Def.Initialize[Task[S]] = (Keys.resolvedScoped, i) { (scoped, task) =>
tx(task, (state, value) => persistAndSet(resolveContext(key, scoped.scope, state), state, value)(f))
}
def keepAs(key: TaskKey[S]): Def.Initialize[Task[S]] =
(i, Keys.resolvedScoped)((t, scoped) => tx(t, (state, value) => set(resolveContext(key, scoped.scope, state), state, value)))
}
import scala.reflect._
import reflect.macros._
def projectMacroImpl(c: Context): c.Expr[Project] =
{
import c.universe._
val enclosingValName = std.KeyMacro.definingValName(c, methodName => s"""$methodName must be directly assigned to a val, such as `val x = $methodName`.""")
val name = c.Expr[String](Literal(Constant(enclosingValName)))
reify { Project(name.splice, new File(name.splice)) }
}
}
private[sbt] trait GeneratedRootProject
trait ProjectExtra {
implicit def configDependencyConstructor[T <% ProjectReference](p: T): Constructor = new Constructor(p)
implicit def classpathDependency[T <% ProjectReference](p: T): ClasspathDependency = new ClasspathDependency(p, None)
// These used to be in Project so that they didn't need to get imported (due to Initialize being nested in Project).
// Moving Initialize and other settings types to Def and decoupling Project, Def, and Structure means these go here for now
implicit def richInitializeTask[T](init: Initialize[Task[T]]): Scoped.RichInitializeTask[T] = new Scoped.RichInitializeTask(init)
implicit def richInitializeInputTask[T](init: Initialize[InputTask[T]]): Scoped.RichInitializeInputTask[T] = new Scoped.RichInitializeInputTask(init)
implicit def richInitialize[T](i: Initialize[T]): Scoped.RichInitialize[T] = new Scoped.RichInitialize[T](i)
implicit def richTaskSessionVar[T](init: Initialize[Task[T]]): Project.RichTaskSessionVar[T] = new Project.RichTaskSessionVar(init)
def inThisBuild(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(project = Select(ThisBuild)))(ss)
def inConfig(conf: Configuration)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(config = Select(conf)))((configuration :== conf) +: ss)
def inTask(t: Scoped)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
inScope(ThisScope.copy(task = Select(t.key)))(ss)
def inScope(scope: Scope)(ss: Seq[Setting[_]]): Seq[Setting[_]] =
Project.transform(Scope.replaceThis(scope), ss)
private[sbt] def inThisBuild[T](i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(project = Select(ThisBuild)), i)
private[sbt] def inConfig[T](conf: Configuration, i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(config = Select(conf)), i)
private[sbt] def inTask[T](t: Scoped, i: Initialize[T]): Initialize[T] =
inScope(ThisScope.copy(task = Select(t.key)), i)
private[sbt] def inScope[T](scope: Scope, i: Initialize[T]): Initialize[T] =
i mapReferenced Project.mapScope(Scope.replaceThis(scope))
/**
* Creates a new Project. This is a macro that expects to be assigned directly to a val.
* The name of the val is used as the project ID and the name of the base directory of the project.
*/
def project: Project = macro Project.projectMacroImpl
}
| jasonchaffee/sbt | main/src/main/scala/sbt/Project.scala | Scala | bsd-3-clause | 34,859 |
import spray.json._
object Lang extends Enumeration {
type Lang = Value
val scala, java, css, html, md, xml, sql, javascript = Value
implicit object LangFormat extends RootJsonFormat[Lang.Value] {
override def read(json: JsValue): Lang.Value = json match {
case JsString(v) => Lang.withName(v)
case _ => throw new DeserializationException("Programming Language not found")
}
override def write(obj: Lang.Value): JsValue = JsString(obj.toString)
}
}
| hermanbanken/play-metrics | src/main/scala/Lang.scala | Scala | mit | 483 |
package controllers
import javax.inject._
import models.film.{Film, FilmRepository, Isbn}
import play.api.Logger
import play.api.libs.json._
import play.api.mvc._
import scala.concurrent.{ExecutionContext, Future}
class FilmController @Inject()(filmRepository: FilmRepository)(implicit exec: ExecutionContext) extends Controller {
def createFilm = Action.async(parse.json) { implicit request =>
request.body.validate[Film].map { film =>
Logger.debug(s"Creating film using isbn: ${film.isbn}")
filmRepository.create(film)
.map { f =>
Logger.info("Successfully created a film")
Created(Json.toJson(f))
}
.recover {
case e: Throwable =>
Logger.error("Error creating film", e)
InternalServerError(Json.obj("error" -> "Error creating film"))
}
}.recoverTotal { e =>
Logger.warn(s"Unable to create a film: ${JsError.toJson(e)}")
Future.successful(BadRequest(Json.obj("error" -> s"unable to create film. Cause ${e}")))
}
}
def findFilmBy(isbn: Isbn) = Action.async { implicit request =>
filmRepository.get(isbn).map { maybeFilm =>
maybeFilm.fold(NotFound(Json.obj("error" -> s"${isbn.value} not found")))(f => Ok(Json.toJson(f)))
}.recover {
case e: Throwable =>
Logger.error(s"Error finding film using isbn: $isbn", e)
InternalServerError(Json.obj("error" -> "Error finding films"))
}
}
}
| fagossa/play-reactive-couchbase | app/controllers/FilmController.scala | Scala | mit | 1,460 |
package pl.gosub.akka.online.follow.the.leader
class FollowTheLeaderLogic(val hypotheses: Seq[Double => Double], lossFunction: ((Double, Double) => Double), windowSize: Int) {
private var past: Seq[(Double, Double)] = Seq.empty;
private var pastX = 0.0
def predict(x: Double, y: Double): Double = {
past = past :+ (pastX, y)
past.dropWhile(_ => past.size >= windowSize)
val leader = if(past isEmpty) {
hypotheses.head
} else {
hypotheses
.map(hypothesis => (hypothesis, past.map{ case (x, y) => lossFunction(hypothesis(x), y)}.reduce(_+_)))
.sortBy(_._2)
.map(_._1)
.head
}
val prediction = leader(x)
pastX = x
prediction
}
}
| gosubpl/akka-online | src/main/scala/pl/gosub/akka/online/follow/the/leader/FollowTheLeaderLogic.scala | Scala | apache-2.0 | 720 |
/* ___ ____ ___ __ ___ ___
** / _// __// _ | / / / _ | / _ \\ Scala classfile decoder
** __\\ \\/ /__/ __ |/ /__/ __ |/ ___/ (c) 2003-2010, LAMP/EPFL
** /____/\\___/_/ |_/____/_/ |_/_/ http://scala-lang.org/
**
*/
package scala.tools.scalap
import scala.collection.mutable._
object Arguments {
case class Parser(optionPrefix: Char) {
val options: Set[String] = new HashSet
val prefixes: Set[String] = new HashSet
val optionalArgs: Set[String] = new HashSet
val prefixedBindings: Map[String, Char] = new HashMap
val optionalBindings: Map[String, Char] = new HashMap
def error(message: String): Unit = Console.println(message)
def withOption(option: String): Parser = {
options += option
this
}
def withOptionalArg(option: String): Parser = {
optionalArgs += option
this
}
def withOptionalBinding(option: String, separator: Char): Parser = {
optionalBindings(option) = separator
this
}
def withPrefixedArg(prefix: String): Parser = {
prefixes += prefix
this
}
def withPrefixedBinding(prefix: String, separator: Char): Parser = {
prefixedBindings(prefix) = separator
this
}
def parseBinding(str: String, separator: Char): (String, String) = {
val eqls = str.indexOf(separator)
if (eqls < 0) {
error("missing '" + separator + "' in binding '" + str + "'")
Pair("", "")
} else
Pair(str.substring(0, eqls).trim(),
str.substring(eqls + 1).trim())
}
def parse(args: Array[String]): Arguments = {
val res = new Arguments
parse(args, res)
res
}
def parse(args: Array[String], res: Arguments) {
if (args != null) {
var i = 0
while (i < args.length)
if ((args(i) == null) || (args(i).length() == 0))
i += 1
else if (args(i).charAt(0) != optionPrefix) {
res.addOther(args(i))
i += 1
} else if (options contains args(i)) {
res.addOption(args(i))
i += 1
} else if (optionalArgs contains args(i)) {
if ((i + 1) == args.length) {
error("missing argument for '" + args(i) + "'")
i += 1
} else {
res.addArgument(args(i), args(i + 1))
i += 2
}
} else if (optionalBindings contains args(i)) {
if ((i + 1) == args.length) {
error("missing argument for '" + args(i) + "'")
i += 1
} else {
res.addBinding(args(i),
parseBinding(args(i + 1), optionalBindings(args(i))));
i += 2
}
} else {
var iter = prefixes.iterator
val j = i
while ((i == j) && iter.hasNext) {
val prefix = iter.next
if (args(i) startsWith prefix) {
res.addPrefixed(prefix, args(i).substring(prefix.length()).trim());
i += 1
}
}
if (i == j) {
val iter = prefixedBindings.keysIterator;
while ((i == j) && iter.hasNext) {
val prefix = iter.next
if (args(i) startsWith prefix) {
val arg = args(i).substring(prefix.length()).trim()
i = i + 1
res.addBinding(prefix,
parseBinding(arg, prefixedBindings(prefix)));
}
}
if (i == j) {
error("unknown option '" + args(i) + "'")
i = i + 1
}
}
}
}
}
}
def parse(options: String*)(args: Array[String]): Arguments = {
val parser = new Parser('-')
val iter = options.iterator
while (iter.hasNext)
parser withOption iter.next
parser.parse(args)
}
}
class Arguments {
private val options: Set[String] = new HashSet
private val arguments: Map[String, String] = new HashMap
private val prefixes: Map[String, Set[String]] = new HashMap
private val bindings: Map[String, Map[String, String]] = new HashMap
private val others: Buffer[String] = new ListBuffer
def addOption(option: String): Unit = options += option
def addArgument(option: String, arg: String) {
arguments(option) = arg
}
def addPrefixed(prefix: String, arg: String): Unit =
if (prefixes isDefinedAt prefix)
prefixes(prefix) += arg
else {
prefixes(prefix) = new HashSet
prefixes(prefix) += arg
}
def addBinding(tag: String, key: String, value: String): Unit =
if (key.length() > 0) {
if (bindings isDefinedAt tag)
bindings(tag)(key) = value
else {
bindings(tag) = new HashMap
bindings(tag)(key) = value
}
}
def addBinding(tag: String, binding: Pair[String, String]) {
addBinding(tag, binding._1, binding._2)
}
def addOther(arg: String): Unit = others += arg
def contains(option: String): Boolean = options contains option
def getArgument(option: String): Option[String] = arguments get option
def getSuffixes(prefix: String): Set[String] =
prefixes.get(prefix) match {
case None => new HashSet
case Some(set) => set
}
def containsSuffix(prefix: String, suffix: String): Boolean =
prefixes.get(prefix) match {
case None => false
case Some(set) => set contains suffix
}
def getBindings(tag: String): Map[String, String] =
bindings.get(tag) match {
case None => new HashMap
case Some(map) => map
}
def getBinding(option: String, key: String): Option[String] =
bindings.get(option) match {
case None => None
case Some(map) => map get key
}
def getOthers: List[String] = others.toList
}
| LPTK/intellij-scala | scalap/src/scalap/Arguments.scala | Scala | apache-2.0 | 5,854 |
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni
import djinni.ast.TypeDef
import scala.collection.immutable
package object meta {
case class MExpr(base: Meta, args: Seq[MExpr])
abstract sealed class Meta
{
val numParams: Int
}
case class MParam(name: String) extends Meta { val numParams = 0 }
case class MDef(name: String, override val numParams: Int, defType: DefType, body: TypeDef) extends Meta
abstract sealed class MOpaque extends Meta { val idlName: String }
abstract sealed class DefType
case object DEnum extends DefType
case object DInterface extends DefType
case object DRecord extends DefType
case class MPrimitive(_idlName: String, jName: String, jniName: String, cName: String, jBoxed: String, jSig: String, objcName: String, objcBoxed: String) extends MOpaque { val numParams = 0; val idlName = _idlName }
case object MString extends MOpaque { val numParams = 0; val idlName = "string" }
case object MDate extends MOpaque { val numParams = 0; val idlName = "date" }
case object MBinary extends MOpaque { val numParams = 0; val idlName = "binary" }
case object MOptional extends MOpaque { val numParams = 1; val idlName = "optional" }
case object MList extends MOpaque { val numParams = 1; val idlName = "list" }
case object MSet extends MOpaque { val numParams = 1; val idlName = "set" }
case object MMap extends MOpaque { val numParams = 2; val idlName = "map" }
val defaults: Map[String,MOpaque] = immutable.HashMap(
("i8", MPrimitive("i8", "byte", "jbyte", "int8_t", "Byte", "B", "int8_t", "NSNumber")),
("i16", MPrimitive("i16", "short", "jshort", "int16_t", "Short", "S", "int16_t", "NSNumber")),
("i32", MPrimitive("i32", "int", "jint", "int32_t", "Integer", "I", "int32_t", "NSNumber")),
("i64", MPrimitive("i64", "long", "jlong", "int64_t", "Long", "J", "int64_t", "NSNumber")),
("f64", MPrimitive("f64", "double", "jdouble", "double", "Double", "D", "double", "NSNumber")),
("bool", MPrimitive("bool", "boolean", "jboolean", "bool", "Boolean", "Z", "BOOL", "NSNumber")),
("string", MString),
("binary", MBinary),
("optional", MOptional),
("date", MDate),
("list", MList),
("set", MSet),
("map", MMap))
}
| mrdomino/djinni | src/source/meta.scala | Scala | apache-2.0 | 2,800 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tutorial.libcog.fields
import libcog._
import cogx.runtime.ComputeGraph
/**
* Shows various legal combinations of different fields. Uncomment ILLEGAL lines to throw exceptions.
*/
object CombiningFields extends App {
val cg = new ComputeGraph {
val fieldShape = Shape(100,100)
val bigFieldShape = Shape(200, 200)
val vectorShape = Shape(5)
val scalarField = ScalarField(fieldShape)
val bigScalarField = ScalarField(bigFieldShape)
val vectorField = VectorField(fieldShape, vectorShape)
val matrixField = MatrixField(fieldShape, Shape(7, 7))
val complexField = ComplexField(fieldShape)
val complexVectorField = ComplexVectorField(fieldShape, vectorShape)
val zeroDimScalarField = ScalarField(1.234f)
val zeroDimVectorField = VectorField(new Vector(5))
val x1 = scalarField + scalarField // OK
val x1c = complexField + complexField // OK
// val x2 = scalarField - bigScalarField // ILLEGAL, different input field shapes
val x3 = vectorField * scalarField // OK
val x3c = complexVectorField * scalarField // OK
val x4 = vectorField / complexField // OK
val x4c = complexVectorField / complexField // OK
val x5v = vectorField * vectorField // OK (element-wise multiplication)
val x5m = matrixField * matrixField // OK (element-wise multiplication)
val x5c = complexVectorField * complexVectorField // OK (element-wise multiplication)
val x5c2 = complexVectorField * vectorField // OK (complex and real fields can be combined)
val x6 = vectorField + zeroDimVectorField // OK, vector field <op> 0D vector field.
val x7 = vectorField + zeroDimScalarField // OK, vector field <op> 0D scalar field.
val x8 = scalarField + zeroDimScalarField // OK, scalar field <op> 0D scalar field.
// val x9 = vectorField * matrixField // ILLEGAL, arithmetically incompatible tensor shapes
// val x10 = scalarField + zeroDimVectorField // ILLEGAL, scalar field <op> 0D vector field.
probeAll
}
cg.withRelease { cg.reset }
}
| hpe-cct/cct-tutorial | src/main/scala/tutorial/libcog/fields/CombiningFields.scala | Scala | apache-2.0 | 2,839 |
trait Super[@specialized(Int) A] {
def superb = 0
}
object Sub extends Super[Int] {
// it is expected that super[Super].superb crashes, since
// specialization does parent class rewiring, and the super
// of Sub becomes Super$mcII$sp and not Super. But I consider
// this normal behavior -- if you want, I can modify duplicatiors
// to make this work, but I consider it's best to keep this
// let the user know Super is not the superclass anymore.
// super[Super].superb - Vlad
super.superb // okay
override def superb: Int = super.superb // okay
}
| yusuke2255/dotty | tests/untried/pos/SI-4012-b.scala | Scala | bsd-3-clause | 577 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.junit
import org.scalatest._
import collection.immutable.TreeSet
import helpers._
import org.scalatest.events._
class JUnit3SuiteSpec extends FunSpec with SharedHelpers {
describe("A JUnit3Suite") {
it("should return the test names in alphabetical order from testNames") {
val a = new JUnit3Suite {
def testThis() {}
def testThat() {}
}
expect(List("testThat", "testThis")) {
a.testNames.iterator.toList
}
val b = new JUnit3Suite {}
expect(List[String]()) {
b.testNames.iterator.toList
}
val c = new JUnit3Suite {
def testThat() {}
def testThis() {}
}
expect(List("testThat", "testThis")) {
c.testNames.iterator.toList
}
}
it("should return the proper testNames for test methods whether or not they take an Informer") {
val a = new JUnit3Suite {
def testThis() = ()
def testThat(info: Informer) = ()
}
assert(a.testNames === TreeSet("testThis"))
val b = new JUnit3Suite {}
assert(b.testNames === TreeSet[String]())
}
it("should not return names of methods that start with test, take no params, but have a return type " +
"other than Unit from testNames") {
val a = new TestWithNonUnitMethod
assert(a.testNames === TreeSet("testThat", "testThis"))
}
it("should include in testNames a method simply named 'test', that takes no params and has a return type " +
"of Unit") {
val a = new TestWithMethodNamedTest
assert(a.testNames === TreeSet("test", "testThat", "testThis"))
}
it("should return an empty tags map from the tags method, because a tag-like concept isn't supported in JUnit 3") {
val a = new JUnit3Suite {
@Ignore
def testThis() = ()
def testThat(info: Informer) = ()
}
assert(a.tags.isEmpty)
val b = new JUnit3Suite {
def testThis() = ()
@Ignore
def testThat(info: Informer) = ()
}
assert(b.tags.isEmpty)
val c = new JUnit3Suite {
@Ignore
def testThis() = ()
@Ignore
def testThat(info: Informer) = ()
}
assert(c.tags.isEmpty)
val d = new JUnit3Suite {
@SlowAsMolasses
def testThis() = ()
@SlowAsMolasses
@Ignore
def testThat(info: Informer) = ()
}
assert(d.tags.isEmpty)
val e = new JUnit3Suite {}
assert(e.tags.isEmpty)
}
it("should execute all tests when run is called with testName None") {
TestWasCalledSuite.reinitialize()
val b = new TestWasCalledSuite
b.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(TestWasCalledSuite.theTestThisCalled)
assert(TestWasCalledSuite.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
TestWasCalledSuite.reinitialize()
val a = new TestWasCalledSuite
a.run(Some("testThis"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(TestWasCalledSuite.theTestThisCalled)
assert(!TestWasCalledSuite.theTestThatCalled)
}
it("should throw IllegalArgumentException if run is passed a testName that does not exist") {
val a = new TestWasCalledSuite
intercept[IllegalArgumentException] {
// Here, they forgot that the name is actually testThis(Fixture)
a.run(Some("misspelled"), SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
}
}
it("should run no tests if tags to include is non-empty") {
TestWasCalledSuite.reinitialize()
val a = new TestWasCalledSuite
a.run(None, SilentReporter, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(!TestWasCalledSuite.theTestThisCalled)
assert(!TestWasCalledSuite.theTestThatCalled)
}
it("should return the correct test count from its expectedTestCount method") {
val a = new ASuite
assert(a.expectedTestCount(Filter()) === 1)
val b = new BSuite
assert(b.expectedTestCount(Filter()) === 1)
val c = new CSuite
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
val d = new DSuite
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 4)
assert(d.expectedTestCount(Filter()) === 4)
val e = new ESuite
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 0)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 0)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(e.expectedTestCount(Filter()) === 1)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
val a = new ShouldFailSuite
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
}
}
| yyuu/scalatest | src/test/scala/org/scalatest/junit/JUnit3SuiteSpec.scala | Scala | apache-2.0 | 6,255 |
package com.datascience.education.tutorial.lecture5
object SineExample extends App {
import Sine._
import breeze.linalg._
import breeze.plot._
sine.print(32)
val f = Figure()
val p = f.subplot(0)
val x = linspace(0.0,2.5*math.Pi, 16)
val out: List[Double] = sine.toListFinite(16)
p+= plot(x, out)
p+= plot(x, out, '.')
p.xlabel = "theta radians"
p.ylabel = "sin(theta)"
f.saveas("sine_wave.png")
println("done plotting")
}
object StepperExample extends App {
import Stepper._
import breeze.linalg._
import breeze.plot._
val stepperOut = stepperSine.toListFinite(32)
val f = Figure()
val p2 = f.subplot(0)
val x2 = linspace(0.0,2.5*math.Pi, 32)
p2 += plot(x2, stepperOut)
p2 += plot(x2, stepperOut, '.')
p2.xlabel = "theta radians"
p2.ylabel = "sin(theta)"
f.saveas("sine_wave_stepper.png")
println("done plotting")
}
object InterpolationExample extends App {
import Interpolation._
import breeze.linalg._
import breeze.plot._
val linearInterpolatedOut = linearInterpolated.toListFinite(32)
println(linearInterpolatedOut)
val f = Figure()
val p3 = f.subplot(0)
val x3 = linspace(0.0,2.5*math.Pi, 32)
p3 += plot(x3, linearInterpolatedOut)
p3 += plot(x3, linearInterpolatedOut, '.')
p3.xlabel = "theta radians"
p3.ylabel = "sin(theta)"
f.saveas("sine_wave_linear_interpolation.png")
println("done plotting")
}
| DS12/scala-class | tutorial/src/main/scala/com/datascience/education/tutorial/lecture5/InterpolationExamples.scala | Scala | cc0-1.0 | 1,426 |
package org.scalaide.debug.internal.launching
import com.sun.jdi.connect.Connector
import org.eclipse.jdt.launching.IVMConnector
import java.util.{Map => JMap}
import org.eclipse.debug.core.ILaunch
import org.scalaide.debug.internal.ScalaDebugPlugin
import com.sun.jdi.connect.Connector.Argument
object SocketConnectorScala {
/** Configuration key for the 'allow termination of remote VM' option */
final val AllowTerminateKey = "org.eclipse.jdt.launching.ALLOW_TERMINATE"
/** Configuration key for the port number */
final val PortKey = "port"
/** Configuration key for the hostname */
final val HostnameKey = "hostname"
final val DefaultPort = 8000
/* magic names */
final val SocketListenName = "com.sun.jdi.SocketListen"
final val SocketAttachName = "com.sun.jdi.SocketAttach"
/**
* Returns <code>true</code> if AllowTerminate was set to true in the launch configuration, <code>false</code> otherwise.
*/
def allowTerminate(launch: ILaunch):Boolean =
launch.getLaunchConfiguration().getAttribute(AllowTerminateKey, false)
}
/**
* Trait providing common methods for Scala VM connectors.
*/
trait SocketConnectorScala extends IVMConnector {
import SocketConnectorScala._
/**
* Return the JDI connector be used by this VM connector.
*/
def connector(): Connector
/**
* Return the default arguments for this connector.
*/
override val getDefaultArguments: JMap[String, Connector.Argument] = {
val args = connector.defaultArguments()
// set a default value for port, otherwise an NPE is thrown. This is required by launcher UI
import scala.collection.JavaConverters._
args.asScala.get(PortKey) match {
case Some(e: Connector.IntegerArgument) =>
e.setValue(DefaultPort)
case _ =>
}
args
}
/**
* Create an argument map containing the values provided in the params map.
*/
def generateArguments(params: JMap[String, String]): JMap[String, Argument] = {
import scala.collection.JavaConverters._
// convert to a usable type
val p = params.asScala
val arguments= connector.defaultArguments()
// set the values from the params to the the connector arguments
arguments.asScala.foreach {
case (key, value) =>
p.get(key) match {
case Some(v: String) =>
value.setValue(v)
case _ =>
throw ScalaDebugPlugin.wrapInCoreException("Unable to initialize connection, argument '%s' is not available".format(key), null)
}
}
arguments
}
} | Kwestor/scala-ide | org.scala-ide.sdt.debug/src/org/scalaide/debug/internal/launching/SocketConnectorScala.scala | Scala | bsd-3-clause | 2,549 |
/*
* ModTruncateTail.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape.modules
import de.sciss.lucre.Txn
import de.sciss.proc.{FScape, Widget}
import scala.Predef.{any2stringadd => _}
object ModTruncateTail extends Module {
val name = "Truncate Tail"
/**
* Attributes:
*
* - `"in"`: audio file input
* - `"out"`: audio file output
* - `"out-type"`: audio file output type (AIFF: 0, Wave: 1, Wave64: 2, IRCAM: 3, NeXT: 4)
* - `"out-format"`: audio file output sample format (Int16: 0, Int24: 1, Float: 2, Int32: 3, Double: 4, UInt8: 5, Int8: 6)
* - `"gain-type"`: either `0` (normalized) or `1` (relative)
* - `"thresh-db"`: threshold in decibels, below which the tail is truncated
* - `"fade-len-ms"`: tail fade-out duration in milliseconds
*/
def apply[T <: Txn[T]]()(implicit tx: T): FScape[T] = {
import de.sciss.fscape.graph.{AudioFileIn => _, AudioFileOut => _, _}
import de.sciss.fscape.lucre.graph.Ops._
import de.sciss.fscape.lucre.graph._
val f = FScape[T]()
import de.sciss.fscape.lucre.MacroImplicits._
f.setGraph {
// version: 13-Oct-2021
val in = AudioFileIn("in")
val inLen = in.numFrames
val sr = in.sampleRate
val fileType = "out-type" .attr(0)
val smpFmt = "out-format" .attr(2)
val thresh = "thresh-db" .attr(-60.0).dbAmp
val fadeLenMs = "fade-len-ms" .attr(100.0).max(0.0)
val fadeLen = (fadeLenMs * 0.001 * sr).toInt
val rev = Slices(in, inLen ++ 0L) // reverse trick
val amp = OnePole(rev.abs, 0.995) * 2
val silent = amp.takeWhile(amp < thresh)
val tailLen = Length(silent)
// (tailLen / sr).poll("tail len [s]")
val outLen = (inLen - tailLen).max(0L)
val trunc = AudioFileIn("in").take(outLen)
val fadeLenT = fadeLen.min(outLen)
val env = DEnvGen(
levels = ValueDoubleSeq(1.0, 1.0, 0.0),
lengths = (outLen - fadeLenT) ++ fadeLenT
)
val sig = trunc * env
val written = AudioFileOut("out", sig, fileType = fileType,
sampleFormat = smpFmt, sampleRate = sr)
ProgressFrames(written, outLen)
()
}
f
}
def ui[T <: Txn[T]]()(implicit tx: T): Widget[T] = {
import de.sciss.lucre.expr.ExImport._
import de.sciss.lucre.expr.graph._
import de.sciss.lucre.swing.graph._
val w = Widget[T]()
import de.sciss.proc.MacroImplicits._
w.setGraph {
// version: 13-Oct-2021
val r = Runner("run")
val m = r.messages
m.changed.filter(m.nonEmpty) --> PrintLn(m.mkString("\n"))
val in = AudioFileIn()
in.value <-> Artifact("run:in")
val out = AudioFileOut()
out.value <-> Artifact("run:out")
out.fileType <-> "run:out-type".attr(0)
out.sampleFormat <-> "run:out-format".attr(2)
val ggThresh = DoubleField()
ggThresh.unit = "dB FS"
ggThresh.min = -320.0
ggThresh.max = 0.0
ggThresh.value <-> "run:thresh-db".attr(-60.0)
val ggFadeLen = DoubleField()
ggFadeLen.unit = "ms"
ggFadeLen.min = 0.0
ggFadeLen.max = 60 * 1000.0
ggFadeLen.value <-> "run:fade-len-ms".attr(100.0)
def mkLabel(text: String) = {
val l = Label(text)
l.hAlign = Align.Trailing
l
}
def left(c: Component*): Component = {
val f = FlowPanel(c: _*)
f.align = Align.Leading
f.vGap = 0
f
}
val p = GridPanel(
mkLabel("Input:" ), in,
mkLabel("Output:"), out,
Label(" "), Empty(),
mkLabel("Threshold:"), left(ggThresh),
mkLabel("Fade Out Length:"), left(ggFadeLen),
)
p.columns = 2
p.hGap = 8
p.compact = true
val ggRender = Button(" Render ")
val ggCancel = Button(" X ")
ggCancel.tooltip = "Cancel Rendering"
val pb = ProgressBar()
ggRender.clicked --> r.run
ggCancel.clicked --> r.stop
val stopped = (r.state sig_== 0) || (r.state > 3)
ggRender.enabled = stopped
ggCancel.enabled = !stopped
pb.value = (r.progress * 100).toInt
val bot = BorderPanel(
center = pb,
east = {
val f = FlowPanel(ggCancel, ggRender)
f.vGap = 0
f
}
)
bot.vGap = 0
val bp = BorderPanel(
north = p,
south = bot
)
bp.vGap = 8
bp.border = Border.Empty(8, 8, 0, 4)
bp
}
w
}
}
| Sciss/FScape-next | modules/src/main/scala/de/sciss/fscape/modules/ModTruncateTail.scala | Scala | agpl-3.0 | 4,829 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.datastream.storm
import java.util
import com.typesafe.config.Config
import org.apache.eagle.datastream._
case class StormExecutorForAlertWrapper(delegate: StormStreamExecutor[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]], streamName: String)
extends StormStreamExecutor3[String, String, util.SortedMap[Object, Object]]{
override def prepareConfig(config: Config): Unit = {
delegate.prepareConfig(config)
}
override def init: Unit = {
delegate.init
}
override def flatMap(input: Seq[AnyRef], collector: Collector[Tuple3[String, String, util.SortedMap[Object, Object]]]): Unit = {
delegate.flatMap(input, new Collector[Tuple2[String, util.SortedMap[AnyRef, AnyRef]]] {
override def collect(r: Tuple2[String, util.SortedMap[AnyRef, AnyRef]]): Unit = {
collector.collect(Tuple3(r.f0, streamName, r.f1))
}
})
}
} | sunlibin/incubator-eagle | eagle-core/eagle-data-process/eagle-stream-process-api/src/main/scala/org/apache/eagle/datastream/storm/StormExecutorForAlertWrapper.scala | Scala | apache-2.0 | 1,688 |
package com.github.diegopacheco.scala.akkastreams
object SeqRangeMain extends App {
import akka.stream._
import akka.stream.scaladsl._
import akka.{ NotUsed, Done }
import akka.actor.ActorSystem
import akka.util.ByteString
import scala.concurrent._
import scala.concurrent.duration._
import java.nio.file.Paths
implicit val system = ActorSystem("QuickStart")
implicit val materializer = ActorMaterializer()
val source: Source[Int, NotUsed] = Source(1 to 100)
source.runForeach(i ⇒ println(i))(materializer)
} | diegopacheco/scala-playground | akkastreams-2.5.7-fun/src/main/scala/com/github/diegopacheco/scala/akkastreams/SeqRangeMain.scala | Scala | unlicense | 553 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import org.apache.spark.util._
/*
* Allows us access to a manual clock. Note that the manual clock changed between 1.1.1 and 1.3
*/
class TestManualClock(var time: Long) extends Clock {
/**
* @return `ManualClock` with initial time 0
*/
def this() = this(0L)
def getTime(): Long = getTimeMillis() // Compat
def currentTime(): Long = getTimeMillis() // Compat
def getTimeMillis(): Long =
synchronized {
time
}
/**
* @param timeToSet new time (in milliseconds) that the clock should represent
*/
def setTime(timeToSet: Long) =
synchronized {
time = timeToSet
notifyAll()
}
/**
* @param timeToAdd time (in milliseconds) to add to the clock's time
*/
def advance(timeToAdd: Long) =
synchronized {
time += timeToAdd
notifyAll()
}
def addToTime(timeToAdd: Long) = advance(timeToAdd) // Compat
/**
* @param targetTime block until the clock time is set or advanced to at least this time
* @return current time reported by the clock when waiting finishes
*/
def waitTillTime(targetTime: Long): Long =
synchronized {
while (time < targetTime) {
wait(100)
}
getTimeMillis()
}
}
| eyeem/spark-testing-base | src/main/scala/org/apache/spark/streaming/TestManualClock.scala | Scala | apache-2.0 | 2,058 |
package pimpathon.scalaz.std
import org.junit.Test
import scalaz.{NonEmptyList ⇒ NEL, Failure, Success}
import pimpathon.scalaz.std.option._
class OptionTest {
@Test def toSuccessNel(): Unit = calling(_.toSuccessNel("fail")).produces(Success(1), Failure(NEL("fail")))
@Test def toFailureNel(): Unit = calling(_.toFailureNel("succeed")).produces(Failure(NEL(1)), Success("succeed"))
private def calling[A](f: Option[Int] ⇒ A) = pimpathon.util.on(Some(1), None).calling(f)
} | raymanoz/pimpathon | src/test/scala/pimpathon/scalaz/std/option.scala | Scala | apache-2.0 | 487 |
package com.arcusys.learn.facades
import com.arcusys.learn.models.report._
import com.arcusys.learn.models.report.OverallByPeriodResponse
import com.arcusys.learn.models.report.VerbResponse
import com.arcusys.learn.models.report.StudentMostActiveResponse
import com.arcusys.learn.models.report.OverallByTimeResponse
import com.arcusys.learn.models.response.CollectionResponse
import com.arcusys.valamis.lrs.api.StatementApi
import com.arcusys.valamis.lrs.api.valamis.VerbApi
import com.arcusys.valamis.lrs.tincan.Statement
trait ReportFacadeContract {
def getParticipantReport(groupBy: String): Seq[ParticipantResponse]
def getStudentsLeaderboard(statementApi: StatementApi, period: String, offset: Int, amount: Int): CollectionResponse[StudentMostActiveResponse]
def getStatementVerbs(statementApi: StatementApi): VerbResponse
def getOverallByTime(verbApi: VerbApi): OverallByTimeResponse
def getOverallByPeriod(statementApi: StatementApi, period: String, from: Long, to: Long): OverallByPeriodResponse
def getCourseEvent(group: String, groupPeriod: Option[String], period: String, from: Long, to: Long): Seq[CourseEventResponse]
def getUserLatestStatements(statementApi: StatementApi, currentUserID: Int, offset: Int, amount: Int): CollectionResponse[Statement]
def getStudentsLatestStatements(statementApi: StatementApi, currentUserID: Int, offset: Int, amount: Int): CollectionResponse[Statement]
def getMostActive(statementApi: StatementApi, currentUserID: Int, offset: Int, amount: Int): CollectionResponse[StudentMostActiveResponse]
def getCourseReport(statementApi: StatementApi, isInstanceScope: Boolean, courseID: Option[Int]): CourseReportResponse
}
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/facades/ReportFacadeContract.scala | Scala | lgpl-3.0 | 1,693 |
class Test {
case class Foo(x: Int, private var y: Int)
}
object Test {
val test = new Test
val x = test.Foo(1, 2)
x match {
case test.Foo(x, y) => println(x); println(y)
}
}
| yusuke2255/dotty | tests/untried/pos/caseaccs.scala | Scala | bsd-3-clause | 190 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.mongo.geospatial
import play.api.libs.json.{Format, JsArray, Writes}
import uk.gov.hmrc.mongo.json.TupleFormats
case class Coordinates(lon: Double, lat: Double) {
lazy val tuple = (lon, lat)
}
object Coordinates {
val reads = TupleFormats.tuple2Reads[Coordinates, Double, Double](Coordinates.apply _)
def writes(implicit aWrites: Writes[Double]) = new Writes[Coordinates] {
def writes(coordinates: Coordinates) =
JsArray(Seq(aWrites.writes(coordinates.lon), aWrites.writes(coordinates.lat)))
}
implicit val formats = Format(reads, writes)
}
| hmrc/simple-reactivemongo | src/main/scala/uk/gov/hmrc/mongo/geospatial/Coordinates.scala | Scala | apache-2.0 | 1,193 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.fpm
import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext
//频繁模式挖掘-Association Rules
class AssociationRulesSuite extends SparkFunSuite with MLlibTestSparkContext {
test("association rules using String type") {//使用字符串类型的关联规则
val freqItemsets = sc.parallelize(Seq(//频繁项集
(Set("s"), 3L), (Set("z"), 5L), (Set("x"), 4L), (Set("t"), 3L), (Set("y"), 3L),
(Set("r"), 3L),
(Set("x", "z"), 3L), (Set("t", "y"), 3L), (Set("t", "x"), 3L), (Set("s", "x"), 3L),
(Set("y", "x"), 3L), (Set("y", "z"), 3L), (Set("t", "z"), 3L),
(Set("y", "x", "z"), 3L), (Set("t", "x", "z"), 3L), (Set("t", "y", "z"), 3L),
(Set("t", "y", "x"), 3L),
(Set("t", "y", "x", "z"), 3L)
).map {
case (items, freq) => new FPGrowth.FreqItemset(items.toArray, freq)
})
//频繁模式挖掘-Association Rules
val ar = new AssociationRules()
val results1 = ar
.setMinConfidence(0.9)
.run(freqItemsets)
.collect()
/* Verify results using the `R` code:
* 使用“R”代码的验证结果:
transactions = as(sapply(
list("r z h k p",
"z y x w v u t s",
"s x o n r",
"x z y m t s q e",
"z",
"x z y r q t p"),
FUN=function(x) strsplit(x," ",fixed=TRUE)),
"transactions")
ars = apriori(transactions,
parameter = list(support = 0.0, confidence = 0.5, target="rules", minlen=2))
arsDF = as(ars, "data.frame")
arsDF$support = arsDF$support * length(transactions)
names(arsDF)[names(arsDF) == "support"] = "freq"
> nrow(arsDF)
[1] 23
> sum(arsDF$confidence == 1)
[1] 23
*/
assert(results1.size === 23)
//math.abs返回数的绝对值
assert(results1.count(rule => math.abs(rule.confidence - 1.0D) < 1e-6) == 23)
val results2 = ar
.setMinConfidence(0)
.run(freqItemsets)
.collect()
/* Verify results using the `R` code:
ars = apriori(transactions,
parameter = list(support = 0.5, confidence = 0.5, target="rules", minlen=2))
arsDF = as(ars, "data.frame")
arsDF$support = arsDF$support * length(transactions)
names(arsDF)[names(arsDF) == "support"] = "freq"
nrow(arsDF)
sum(arsDF$confidence == 1)
> nrow(arsDF)
[1] 30
> sum(arsDF$confidence == 1)
[1] 23
*/
assert(results2.size === 30)
//math.abs返回数的绝对值
assert(results2.count(rule => math.abs(rule.confidence - 1.0D) < 1e-6) == 23)
}
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/mllib/fpm/AssociationRulesSuite.scala | Scala | apache-2.0 | 3,500 |
// Copyright 2010 Twitter, Inc.
//
// Encapsulate communication to a single RPC endpoint.
//
// The "client" is the abstract endpoint, a "connection" is a concrete
// connection to said client.
package com.twitter.rpcclient
import scala.reflect.Manifest
import java.util.concurrent.atomic.AtomicInteger
import org.apache.commons.pool.PoolableObjectFactory
import org.apache.commons.pool.impl.StackObjectPool
import com.twitter.ostrich.Stats
import com.twitter.xrayspecs.{Time, Duration}
import com.twitter.xrayspecs.TimeConversions._
// Events that occur in individual clients. They may be observed.
sealed abstract class ClientEvent
case class UnhealthyEvent(timestamp: Time) extends ClientEvent
case class HealthyEvent(timestamp: Time, unhealthyTime: Time) extends ClientEvent
case class TimeoutEvent(timestamp: Time) extends ClientEvent
// Translated errors - used when unwrapping exceptions.
sealed abstract class ClientError
object TimeoutError extends ClientError
object IgnoreError extends ClientError
object UnknownError extends ClientError
// We based our exceptions on RuntimeException because they are
// unchecked, and so we avoid Java runtime errors when passing them
// through the proxy.
class ClientException extends RuntimeException
class ClientUnavailableException extends ClientException
class ClientTimedOutException extends ClientException
// The client wraps an underlying proxy stub, abstracting away
// connection management. It also adorns it with some useful
// methods. Access the underlying client interface through ``proxy''.
trait Client[T] {
def proxy: T
def isHealthy:Boolean
}
// ``Connection[T]'' describes an individual connection to a client of
// type `T'. This wraps the RPC client itself, and adorns it with the
// ability to check connection health, client health, and etc.
trait Connection[+T] {
val client: T
val host: String
val port: Int
// Ensure that the underlying connection is open. This is always
// called at least once prior to use.
def ensureOpen(): Unit
// Tear down the underlying connection. Called before relinquishing
// this node from the pool.
def teardown(): Unit
// Flush is called every time the node is given back to the pool.
def flush(): Unit
// Interpret an exception that occured during use.
def unwrapException: PartialFunction[Exception, ClientError] = {
case _ => UnknownError
}
// Failure accrual management.
// Defines whether this node is considered healthy (eligible for
// use).
var didFail = false
def isHealthy: Boolean = !didFail
def markFailed() { didFail = true }
}
abstract class PooledClient[T <: AnyRef](implicit manifest: Manifest[T]) extends Client[T] {
// Minimal complete definition: createConnection
def createConnection: Connection[T]
def handleEvent: PartialFunction[ClientEvent, Unit] = { case _ => () }
val name: String
// Additional parameters for health checks:
val maxAllowableFailures: Int = 5
val retryInterval: Duration = 10.seconds
// Pool stats.
def numActive = pool.getNumActive
def numIdle = pool.getNumIdle
def poolSize = numActive + numIdle
class ConnectionFactory extends PoolableObjectFactory {
def makeObject: Object = {
val c = createConnection
c.ensureOpen()
c
}
def validateObject (o: Object) = o.asInstanceOf[Connection[T]].isHealthy
def destroyObject (o: Object) = o.asInstanceOf[Connection[T]].teardown()
def activateObject (o: Object) = o.asInstanceOf[Connection[T]].ensureOpen()
def passivateObject(o: Object) = o.asInstanceOf[Connection[T]].flush()
}
private val pool = new StackObjectPool(new ConnectionFactory)
@volatile var wentUnhealthyAt:Option[Time] = None
val numFailures = new AtomicInteger(0)
def isHealthy = wentUnhealthyAt match {
case None =>
true
case Some(time) if time < retryInterval.ago =>
markHealthy()
true
case _ =>
false
}
def didSucceed() {
markHealthy()
}
def didFail() {
if (numFailures.incrementAndGet > maxAllowableFailures)
markUnhealthy()
}
def markUnhealthy() {
wentUnhealthyAt = Some(Time.now)
for (now <- wentUnhealthyAt)
logEvent(UnhealthyEvent(now))
}
def markHealthy() {
for (unhealthyTime <- wentUnhealthyAt)
logEvent(HealthyEvent(Time.now, unhealthyTime))
wentUnhealthyAt = None
numFailures.set(0)
}
def get:Option[Connection[T]] =
if (isHealthy) {
try {
Some(pool.borrowObject().asInstanceOf[Connection[T]])
} catch {
case _: NoSuchElementException =>
didFail()
None
}
} else {
None
}
def put(conn: Connection[T]) {
if (conn.didFail || !conn.isHealthy) {
// it's useless to return it to the pool at this point.
didFail()
} else {
pool.returnObject(conn)
didSucceed()
}
}
val _proxy = Proxy[T](manifest.erasure) { invocation =>
val conn = get match {
case Some(conn) => conn
case None => throw new ClientUnavailableException
}
try {
val (rv, msec) = Stats.duration(invocation(conn.client))
Stats.addTiming("rpcclient_%s_rpc_%s".format(name, invocation.name), msec.toInt)
Stats.addTiming(
"rpcclient_%s_hostport_%s_%d_rpc_%s".format(name, conn.host, conn.port, invocation.name),
msec.toInt)
rv
} catch {
case e: Exception =>
val unwrappedException =
if (conn.unwrapException.isDefinedAt(e))
conn.unwrapException(e)
else
UnknownError
unwrappedException match {
case IgnoreError => () // does not affect health accounting
case TimeoutError =>
Stats.incr("rpcclient_%s_%s_timeout".format(name, invocation.name))
logEvent(TimeoutEvent(Time.now))
conn.markFailed()
throw new ClientTimedOutException
case UnknownError =>
Stats.incr("rpcclient_%s_%s_exception".format(name, invocation.name))
conn.markFailed()
}
throw e
} finally {
put(conn)
}
}
def proxy = _proxy
def logEvent(e: ClientEvent) {
if (handleEvent.isDefinedAt(e))
handleEvent(e)
}
}
| kmonkeyjam/rpc-client | src/main/scala/com/twitter/rpcclient/Client.scala | Scala | apache-2.0 | 6,311 |
package com.aluxian.tweeather.transformers
import java.text.SimpleDateFormat
import java.util.{Calendar, Date, Locale}
import com.aluxian.tweeather.RichDate
import com.aluxian.tweeather.models.{Coordinates, LocationBox}
import com.aluxian.tweeather.utils.LocationBoxParam
import org.apache.spark.ml.UnaryTransformer
import org.apache.spark.ml.param._
import org.apache.spark.ml.util.{BasicParamsReadable, BasicParamsWritable, Identifiable}
import org.apache.spark.sql.types._
/**
* A transformer that adds a download url for the GRIB file (weather data).
*/
class GribUrlGenerator(override val uid: String)
extends UnaryTransformer[Long, String, GribUrlGenerator] with BasicParamsWritable {
def this() = this(Identifiable.randomUID("gribUrlGenerator"))
/**
* Param for the location box to be used.
* @group param
*/
final val locationBox: LocationBoxParam =
new LocationBoxParam(this, "locationBox", "location box to be used")
/** @group setParam */
def setLocationBox(box: LocationBox): this.type = set(locationBox, box)
/** @group getParam */
def getLocationBox: LocationBox = $(locationBox)
setDefault(locationBox -> LocationBox(Coordinates(-90, -180), Coordinates(90, 180)))
override protected def createTransformFunc: Long => String = {
val dateFormatter = new SimpleDateFormat("yyyyMMdd", Locale.US)
(timestamp: Long) => {
val date = new Date(timestamp)
val dateStr = dateFormatter.format(date)
val cycle = date.toCalendar.get(Calendar.HOUR_OF_DAY) match {
case h if h < 6 => "00"
case h if h < 12 => "06"
case h if h < 18 => "12"
case _ => "18"
}
val location = $(locationBox)
Seq(
s"http://nomads.ncep.noaa.gov/cgi-bin/filter_gfs_0p25.pl?file=gfs.t${cycle}z.pgrb2.0p25.f000",
"lev_2_m_above_ground=on",
"lev_surface=on",
"var_PRES=on",
"var_TMP=on",
"var_RH=on",
"subregion=",
"leftlon=" + location.sw.lon.toInt,
"rightlon=" + location.ne.lon.toInt,
"toplat=" + location.ne.lat.toInt,
"bottomlat=" + location.sw.lat.toInt,
"dir=%2Fgfs." + dateStr + cycle
).mkString("&")
}
}
override protected def validateInputType(inputType: DataType): Unit = {
require(inputType == LongType, s"Input type must be long type but got $inputType.")
}
override protected def outputDataType: DataType = StringType
override def copy(extra: ParamMap): GribUrlGenerator = defaultCopy(extra)
}
object GribUrlGenerator extends BasicParamsReadable[GribUrlGenerator] {
override def load(path: String): GribUrlGenerator = super.load(path)
}
| Aluxian/Tweeather | src/main/scala/com/aluxian/tweeather/transformers/GribUrlGenerator.scala | Scala | apache-2.0 | 2,674 |
package com.aurelpaulovic.transaction.config
case class Retries (val maxRetries: Int) extends TransactionConfigProperty {
} | AurelPaulovic/transactions-api | src/main/scala/com/aurelpaulovic/transaction/config/Retries.scala | Scala | apache-2.0 | 125 |
package net.rrm.ehour.persistence.backup.dao
import java.util
trait BackupDao {
def findAll(table: String): util.List[util.Map[String, Object]]
}
| momogentoo/ehour | eHour-persistence/src/main/scala/net/rrm/ehour/persistence/backup/dao/BackupDao.scala | Scala | gpl-2.0 | 152 |
package spatutorial.client.services
import autowire._
import rx._
import spatutorial.client.ukko._
import spatutorial.shared.{TodoItem, Api, _}
import boopickle.Default._
import scala.scalajs.concurrent.JSExecutionContext.Implicits.runNow
import scala.scalajs.js
case object RefreshTodos
case class UpdateAllTodos(todos: Seq[TodoItem])
case class UpdateTodo(item: TodoItem)
trait TodoStore extends Actor {
override val name = "TodoStore"
// define a reactive variable
private val items = Var(Seq.empty[TodoItem])
private def updateItems(newItems: Seq[TodoItem]): Unit = {
// check if todos have really changed
if (newItems != items()) {
// use Rx to update, which propagates down to dependencies
items() = newItems
newItems.foreach(println)
}
}
override def receive = {
case RefreshTodos =>
// load all todos from the server
AjaxClient[Api].getTodos().call().foreach { todos =>
updateItems(todos)
}
AjaxClient[Api].getMessage("1234").call().foreach {
case Success =>
js.Dynamic.global.console("yeepee")
case Failure =>
js.Dynamic.global.console("Failure")
}
case UpdateAllTodos(todos) =>
updateItems(todos)
}
// return as Rx to prevent mutation in dependencies
def todos:Rx[Seq[TodoItem]] = Rx { items() }
}
// create a singleton instance of TodoStore
object TodoStore extends TodoStore {
// register this actor with the dispatcher
MainDispatcher.register(this)
}
object TodoActions {
def updateTodo(item: TodoItem) = {
// inform the server to update/add the item
AjaxClient[Api].updateTodo(item).call().foreach { todos =>
MainDispatcher.dispatch(UpdateAllTodos(todos))
}
}
def deleteTodo(item: TodoItem) = {
// tell server to delete a todo
AjaxClient[Api].deleteTodo(item.id).call().foreach { todos =>
MainDispatcher.dispatch(UpdateAllTodos(todos))
}
}
}
| MikaelMayer/scalajs-spa-tutorial | client/src/main/scala/spatutorial/client/services/TodoStore.scala | Scala | apache-2.0 | 1,952 |
package controllers
import master.Mastermind
import play.api.Logger
import play.api.libs.json.Json
import play.api.mvc._
object MastermindController extends Controller {
def addWorker(ip: String) = Action {
val worker = Mastermind.registerWorker(ip)
Logger.info(s"$ip registered as worker ${worker.id}.")
Mastermind.assignWork(worker.id).map {
work =>
Mastermind.sendWork(worker.id, work.id)
Ok(Json.obj("success" -> "ok"))
}.getOrElse(Ok(Json.obj("error" -> "not authorized")))
}
def workDone(workerId: Long) = Action {
Logger.info(s"Worker $workerId finished a job.")
Mastermind.createWork()
Mastermind.assignWork(workerId).map {
work =>
Mastermind.sendWork(workerId, work.id)
Ok(Json.obj("success" -> "ok"))
}.getOrElse(Ok(Json.obj("error" -> "not authorized")))
}
} | rtfpessoa/distributed-twitter-crawler | app/controllers/MastermindController.scala | Scala | mit | 861 |
import scala.language.strictEquality
/**
* Multiversal Equality: http://dotty.epfl.ch/docs/reference/multiversal-equality.html
* scala.Eq definition: https://github.com/lampepfl/dotty/blob/master/library/src/scala/Eq.scala
*/
object MultiversalEquality {
def test: Unit = {
// Values of types Int and String cannot be compared with == or !=,
// unless we add a custom implicit like:
implicit def eqIntString: Eq[Int, String] = Eq
println(3 == "3")
// By default, all numbers are comparable, because of;
// implicit def eqNumber : Eq[Number, Number] = Eq
println(3 == 5.1)
// By default, all Sequences are comparable, because of;
// implicit def eqSeq[T, U](implicit eq: Eq[T, U]): Eq[Seq[T], Seq[U]] = Eq
println(List(1, 2) == Vector(1, 2))
class A(a: Int)
class B(b: Int)
val a = new A(4)
val b = new B(4)
// scala.language.strictEquality is enabled, therefore we need some extra implicits
// to compare instances of A and B.
implicit def eqAB: Eq[A, B] = Eq
implicit def eqBA: Eq[B, A] = Eq
println(a != b)
println(b == a)
}
}
| smarter/dotty-example-project | src/main/scala/MultiversalEquality.scala | Scala | bsd-3-clause | 1,128 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.