code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
import sbt._
object Dependencies {
object Compile {
val hadoop = "org.apache.hadoop" % "hadoop-core" % "0.20.205.0" % "compile"
}
object Test {
val spec2 = "org.specs2" %% "specs2" % "1.8.2" % "test"
}
}
|
amir343/saaloop
|
project/Dependencies.scala
|
Scala
|
apache-2.0
| 330 |
package org.jetbrains.plugins.scala
package annotator.createFromUsage
import com.intellij.codeInsight.intention.IntentionAction
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.psi.{PsiDocumentManager, PsiFile}
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaCodeFragment
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
/**
* Nikolay.Tropin
* 2014-07-28
*/
abstract class CreateFromUsageQuickFixBase(ref: ScReferenceElement, description: String) extends IntentionAction {
val getText = s"Create $description '${ref.nameId.getText}'"
val getFamilyName = s"Create $description"
override def isAvailable(project: Project, editor: Editor, file: PsiFile): Boolean = {
if (!ref.isValid) return false
if (!file.isInstanceOf[ScalaFile]) return false
if (!ref.getManager.isInProject(file)) return false
if (file.isInstanceOf[ScalaCodeFragment]) return false
true
}
override def startInWriteAction() = false
override def invoke(project: Project, editor: Editor, file: PsiFile) {
PsiDocumentManager.getInstance(project).commitAllDocuments()
if (!ref.isValid) return
invokeInner(project, editor, file)
}
protected def invokeInner(project: Project, editor: Editor, file: PsiFile)
}
|
SergeevPavel/intellij-scala
|
src/org/jetbrains/plugins/scala/annotator/createFromUsage/CreateFromUsageQuickFixBase.scala
|
Scala
|
apache-2.0
| 1,386 |
package net.categoricaldata.category
trait Equivalence { equivalence =>
val source: Category
val target: Category
trait Source2Target extends Functor {
override val source: equivalence.source.type= equivalence.source
override val target: equivalence.target.type= equivalence.target
}
trait Target2Source extends Functor {
override val source: equivalence.target.type= equivalence.target
override val target: equivalence.source.type= equivalence.source
}
val functor: Source2Target
val inverse: Target2Source
val sourceEquivalence: NaturalTransformation
val targetEquivalence: NaturalTransformation
}
|
JasonGross/categoricaldata
|
src/main/scala/net/categoricaldata/category/Equivalence.scala
|
Scala
|
mit
| 629 |
package org.json4s
package scalaz
import _root_.scalaz._
import Scalaz._
import native.JsonMethods._
import org.json4s.native.scalaz._
import JsonScalaz._
import org.specs2.mutable.Specification
object Example extends Specification {
case class Address(street: String, zipCode: String)
case class Person(name: String, age: Int, address: Address)
"Parse address in an Applicative style" in {
val json = parse(""" {"street": "Manhattan 2", "zip": "00223" } """)
val a1 = field[String]("zip")(json) ap (field[String]("street")(json) map Address.curried)
val a2 = (field[String]("street")(json) |@| field[String]("zip")(json)) { Address }
val a3 = Address.applyJSON(field[String]("street"), field[String]("zip"))(json)
a2 mustEqual Success(Address("Manhattan 2", "00223"))
a3 mustEqual a2
a1 mustEqual a2
}
"Failed address parsing" in {
val json = parse(""" {"street": "Manhattan 2", "zip": "00223" } """)
val a = (field[String]("streets")(json) |@| field[String]("zip")(json)) { Address }
a.swap.toOption.get.list mustEqual IList(NoSuchFieldError("streets", json))
}
"Parse Person with Address" in {
implicit def addrJSON: JSONR[Address] = new JSONR[Address] {
def read(json: JValue) = Address.applyJSON(field[String]("street"), field[String]("zip"))(json)
}
val p = parse(""" {"name":"joe","age":34,"address":{"street": "Manhattan 2", "zip": "00223" }} """)
val person = Person.applyJSON(field[String]("name"), field[Int]("age"), field[Address]("address"))(p)
person mustEqual Success(Person("joe", 34, Address("Manhattan 2", "00223")))
}
"Format Person with Address" in {
implicit def addrJSON: JSONW[Address] = new JSONW[Address] {
def write(a: Address) =
makeObj(("street" -> toJSON(a.street)) :: ("zip" -> toJSON(a.zipCode)) :: Nil)
}
val p = Person("joe", 34, Address("Manhattan 2", "00223"))
val json = makeObj(("name" -> toJSON(p.name)) ::
("age" -> toJSON(p.age)) ::
("address" -> toJSON(p.address)) :: Nil)
json.shows mustEqual
"""{"name":"joe","age":34,"address":{"street":"Manhattan 2","zip":"00223"}}"""
}
"Parse Map" in {
val json = parse(""" {"street": "Manhattan 2", "zip": "00223" } """)
fromJSON[Map[String, String]](json) mustEqual Success(Map("street" -> "Manhattan 2", "zip" -> "00223"))
}
"Format Map" in {
toJSON(Map("street" -> "Manhattan 2", "zip" -> "00223")).shows mustEqual
"""{"street":"Manhattan 2","zip":"00223"}"""
}
}
|
karolx/json4s
|
tests/src/test/scala/org/json4s/scalaz/Example.scala
|
Scala
|
apache-2.0
| 2,558 |
/*
* Copyright 2017 Simeon Simeonov and Swoop, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.swoop.spark.records
/** Definitions for the meaning of bits in the `features` bit mask of [[com.swoop.spark.records.Record]].
*
* The least significant 16 bits are reserved for framework use.
* The next 15 bits are available for application use.
* The most significant bit is reserved for framework use also.
*/
trait Features {
// @formatter:off
final val ERROR = 1 << 0 // 1 record has ERROR issue(s)
final val WARNING = 1 << 1 // 2 record has WARNING issue(s)
final val INFO = 1 << 2 // 4 record has INFO issue(s)
final val DEBUG = 1 << 3 // 8 record has DEBUG issue(s)
final val SKIP = 1 << 4 // 16 record must be skipped and not persisted
final val INVALID = 1 << 5 // 32 e.g., unexpected schema
final val CORRUPT = 1 << 6 // 64 e.g., unparseable JSON
final val RESERVED_7 = 1 << 7 // 128 reserved for framework use
final val INTERNAL_RECORD = 1 << 8 // 256 e.g., test records generated by the system
final val UNKNOWN = 1 << 9 // 512 e.g., unknown source type
final val QUALITY_CONCERN = 1 << 10 // 1024 e.g., strange client timestamp
final val LATE_ARRIVAL = 1 << 11 // 2048 e.g., input outside typical processing window
final val RESERVED_12 = 1 << 12 // 4096 reserved for framework use
final val RESERVED_13 = 1 << 12 // 8192 reserved for framework use
final val RESERVED_14 = 1 << 14 // 16384 reserved for framework use
final val RESERVED_15 = 1 << 15 // 32768 reserved for framework use
// @formatter:on
}
object Features extends Features
|
swoop-inc/spark-records
|
src/main/scala/com/swoop/spark/records/Features.scala
|
Scala
|
apache-2.0
| 2,383 |
package org.jetbrains.plugins.scala.lang.typeInference
import org.jetbrains.plugins.scala.{LatestScalaVersions, ScalaVersion}
class Scala3DerivingTest extends ImplicitParametersTestBase {
override protected def supportedIn(version: ScalaVersion): Boolean =
version >= LatestScalaVersions.Scala_3_0
def testSimple(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Eq[A]
|object Eq { def derived[A]: Eq[A] = ??? }
|
|case class Foo(x: Int) derives Eq
|object A {
| ${START}implicitly[Eq[Foo]]$END
|}
|""".stripMargin
)
def testEnum(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Eq[A]
|object Eq { def derived[A]: Eq[A] = ??? }
|
|enum Tree[T](x: Int) derives Eq
|object A {
| implicit val eqInt: Eq[Int] = ???
| ${START}implicitly[Eq[Tree[Int]]$END
|}
|""".stripMargin
)
def testMultipleTypeParameters(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Eq[A]
|object Eq { def derived[A]: Eq[A] = ??? }
|
|case class Foo[A, B, C](a: A, b: B, c: C) derives Eq
|object A {
| given eqInt: Eq[Int] = ???
| given eqString: Eq[String] = ???
| given eqDouble: Eq[Double] = ???
| ${START}implicitly[Eq[Foo[Int, String, Double]]$END
|}
|
|""".stripMargin
)
def testCanEqual(): Unit = checkNoImplicitParameterProblems(
s"""
|class Foo[A, B, C[_], D[_, _]](a: A, b: B, c: C[A], d: D[A, B]) derives scala.CanEqual
|object Foo {
| given cq: CanEqual[Double, Int] = ???
| ${START}implicitly[CanEqual[Foo[Double, String, List, [X, Y] =>> Int], Foo[Int, String, Option, [X, Y] =>> String]]]$END
|}
|""".stripMargin
)
def testDeriveForTypeConstructorTC(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Functor[F[_]]
|object Functor { def derived[F[_]]: Functor[F] = ??? }
|
|case class Foo[A](a: A) derives Functor
|object A {
| ${START}implicitly[Functor[Foo]]$END
|}
|""".stripMargin
)
def testCurriedDeriveTooManyTypeParams(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Functor[F[_]]
|object Functor { def derived[F[_]]: Functor[F] = ??? }
|
|case class Foo[A, B, C](a: A) derives Functor
|object A {
| ${START}implicitly[Functor[[X] =>> Foo[Int, String, X]]]$END
|}
|""".stripMargin
)
def testCurriedDeriveTooFewTypeParams(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Functor[F[_, _]]
|object Functor { def derived[F[_]]: Functor[F] = ??? }
|
|case class Foo[A](a: A) derives Functor
|object A {
| ${START}implicitly[Functor[[X, Y] =>> Foo[Y]]]$END
|}
|""".stripMargin
)
def testDerivedWithImplicitParameters(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Ord[A]
|trait Eq[A]
|object Ord {
| def derived[A](implicit ev: Eq[A]): Ord[A] = ???
|}
|
|case class Foo() derives Ord
|object Foo {
| given eqFoo: Eq[Foo] = ???
|}
|
|object A {
| ${START}implicitly[Ord[Foo]]$END
|}
|
|""".stripMargin
)
def testDerivedObject(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Eq[+A]
|object Eq {
| object derived extends Eq[Any]
|}
|
|trait Bar derives Eq
|object A {
| ${START}implicitly[Eq[Bar]]$END
|}
|""".stripMargin
)
def testDerivedVal(): Unit = checkNoImplicitParameterProblems(
s"""
|trait Eq[+A]
|object Eq {
| val derived: Eq[Any] = ???
|}
|
|trait Bar derives Eq
|object A {
| ${START}implicitly[Eq[Bar]]$END
|}
|""".stripMargin
)
}
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/typeInference/Scala3DerivingTest.scala
|
Scala
|
apache-2.0
| 3,966 |
package net.sansa_stack.rdf.common.partition.layout
import scala.reflect.runtime.universe.{Type, typeOf}
import org.apache.jena.graph.Triple
import net.sansa_stack.rdf.common.partition.core.RdfPartitionerDefault
import net.sansa_stack.rdf.common.partition.schema.SchemaStringFloat
object TripleLayoutFloat
extends TripleLayout {
override def schema: Type = typeOf[SchemaStringFloat]
override def fromTriple(t: Triple): SchemaStringFloat = {
val s = t.getSubject
val o = t.getObject
val v = if (o.isLiteral && o.getLiteralValue.isInstanceOf[Number]) {
o.getLiteralValue.asInstanceOf[Number]
} else throw new RuntimeException(s"Layout only for float values: $t")
val sStr = RdfPartitionerDefault.getUriOrBNodeString(s)
SchemaStringFloat(sStr, v.floatValue())
}
}
|
SANSA-Stack/SANSA-RDF
|
sansa-rdf/sansa-rdf-common/src/main/scala/net/sansa_stack/rdf/common/partition/layout/TripleLayoutFloat.scala
|
Scala
|
apache-2.0
| 806 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen
import org.apache.flink.api.common.functions.{MapFunction, RichMapFunction}
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.data.binary.{BinaryStringData, BinaryStringDataUtil}
import org.apache.flink.table.data.{DecimalData, GenericRowData, TimestampData}
import org.apache.flink.table.functions.{ConstantFunctionContext, FunctionContext, UserDefinedFunction}
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.codegen.FunctionCodeGenerator.generateFunction
import org.apache.flink.table.planner.functions.sql.FlinkSqlOperatorTable.{JSON_ARRAY, JSON_OBJECT}
import org.apache.flink.table.planner.plan.utils.PythonUtil.containsPythonCall
import org.apache.flink.table.planner.utils.Logging
import org.apache.flink.table.types.DataType
import org.apache.flink.table.types.logical.RowType
import org.apache.flink.table.planner.utils.TimestampStringUtils.fromLocalDateTime
import org.apache.calcite.avatica.util.ByteString
import org.apache.calcite.rex.{RexBuilder, RexCall, RexExecutor, RexLiteral, RexNode, RexUtil}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.sql.`type`.SqlTypeName
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
/**
* Evaluates constant expressions with code generator.
*
* @param allowChangeNullability If the reduced expr's nullability can be changed, e.g. a null
* literal is definitely nullable and the other literals are
* not null.
*/
class ExpressionReducer(
config: TableConfig,
allowChangeNullability: Boolean = false)
extends RexExecutor
with Logging {
private val EMPTY_ROW_TYPE = RowType.of()
private val EMPTY_ROW = new GenericRowData(0)
private val nonReducibleJsonFunctions = Seq(JSON_OBJECT, JSON_ARRAY)
override def reduce(
rexBuilder: RexBuilder,
constExprs: java.util.List[RexNode],
reducedValues: java.util.List[RexNode]): Unit = {
val pythonUDFExprs = new ListBuffer[RexNode]()
val literals = skipAndValidateExprs(rexBuilder, constExprs, pythonUDFExprs)
val literalTypes = literals.map(e => FlinkTypeFactory.toLogicalType(e.getType))
val resultType = RowType.of(literalTypes: _*)
// generate MapFunction
val ctx = new ConstantCodeGeneratorContext(config)
val exprGenerator = new ExprCodeGenerator(ctx, false)
.bindInput(EMPTY_ROW_TYPE)
val literalExprs = literals.map(exprGenerator.generateExpression)
val result = exprGenerator.generateResultExpression(
literalExprs, resultType, classOf[GenericRowData])
val generatedFunction = generateFunction[MapFunction[GenericRowData, GenericRowData]](
ctx,
"ExpressionReducer",
classOf[MapFunction[GenericRowData, GenericRowData]],
s"""
|${result.code}
|return ${result.resultTerm};
|""".stripMargin,
resultType,
EMPTY_ROW_TYPE)
val function = generatedFunction.newInstance(Thread.currentThread().getContextClassLoader)
val richMapFunction = function match {
case r: RichMapFunction[GenericRowData, GenericRowData] => r
case _ =>
throw new TableException("RichMapFunction[GenericRowData, GenericRowData] required here")
}
val parameters = config.getConfiguration
val reduced = try {
richMapFunction.open(parameters)
// execute
richMapFunction.map(EMPTY_ROW)
} catch { case t: Throwable =>
// maybe a function accesses some cluster specific context information
// skip the expression reduction and try it again during runtime
LOG.warn(
"Unable to perform constant expression reduction. " +
"An exception occurred during the evaluation. " +
"One or more expressions will be executed unreduced.",
t)
reducedValues.addAll(constExprs)
return
} finally {
richMapFunction.close()
}
// add the reduced results or keep them unreduced
var i = 0
var reducedIdx = 0
while (i < constExprs.size()) {
val unreduced = constExprs.get(i)
// use eq to compare reference
if (pythonUDFExprs.exists(_ eq unreduced)) {
// if contains python function then just insert the original expression.
reducedValues.add(unreduced)
} else unreduced match {
case call: RexCall if nonReducibleJsonFunctions.contains(call.getOperator) =>
reducedValues.add(unreduced)
case _ =>
unreduced.getType.getSqlTypeName match {
// we insert the original expression for object literals
case SqlTypeName.ANY |
SqlTypeName.OTHER |
SqlTypeName.ROW |
SqlTypeName.STRUCTURED |
SqlTypeName.ARRAY |
SqlTypeName.MAP |
SqlTypeName.MULTISET =>
reducedValues.add(unreduced)
case SqlTypeName.VARCHAR | SqlTypeName.CHAR =>
val escapeVarchar = BinaryStringDataUtil.safeToString(
reduced.getField(reducedIdx).asInstanceOf[BinaryStringData])
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, escapeVarchar, unreduced))
reducedIdx += 1
case SqlTypeName.VARBINARY | SqlTypeName.BINARY =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (null != reducedValue) {
new ByteString(reduced.getField(reducedIdx).asInstanceOf[Array[Byte]])
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (reducedValue != null) {
val dt = reducedValue.asInstanceOf[TimestampData].toLocalDateTime
fromLocalDateTime(dt)
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case SqlTypeName.DECIMAL =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (reducedValue != null) {
reducedValue.asInstanceOf[DecimalData].toBigDecimal
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case SqlTypeName.TIMESTAMP =>
val reducedValue = reduced.getField(reducedIdx)
val value = if (reducedValue != null) {
val dt = reducedValue.asInstanceOf[TimestampData].toLocalDateTime
fromLocalDateTime(dt)
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
case _ =>
val reducedValue = reduced.getField(reducedIdx)
// RexBuilder handle double literal incorrectly, convert it into BigDecimal manually
val value = if (reducedValue != null &&
unreduced.getType.getSqlTypeName == SqlTypeName.DOUBLE) {
new java.math.BigDecimal(reducedValue.asInstanceOf[Number].doubleValue())
} else {
reducedValue
}
reducedValues.add(maySkipNullLiteralReduce(rexBuilder, value, unreduced))
reducedIdx += 1
}
}
i += 1
}
}
// We may skip the reduce if the original constant is invalid and casted as a null literal,
// cause now this may change the RexNode's and it's parent node's nullability.
def maySkipNullLiteralReduce(
rexBuilder: RexBuilder,
value: Object,
unreduced: RexNode): RexNode = {
if (!allowChangeNullability
&& value == null
&& !unreduced.getType.isNullable) {
return unreduced
}
// used for table api to '+' of two strings.
val valueArg = if (SqlTypeName.CHAR_TYPES.contains(unreduced.getType.getSqlTypeName) &&
value != null) {
value.toString
} else {
value
}
// if allowChangeNullability is allowed, we can reduce the outer abstract cast if the unreduced
// expr type is nullable.
val targetType = if (allowChangeNullability && unreduced.getType.isNullable) {
rexBuilder.getTypeFactory.createTypeWithNullability(unreduced.getType, false)
} else {
unreduced.getType
}
rexBuilder.makeLiteral(
valueArg,
targetType,
true)
}
/**
* skip the expressions that can't be reduced now
* and validate the expressions
*/
private def skipAndValidateExprs(
rexBuilder: RexBuilder,
constExprs: java.util.List[RexNode],
pythonUDFExprs: ListBuffer[RexNode]): List[RexNode] ={
constExprs.asScala.map(e => (e.getType.getSqlTypeName, e)).flatMap {
// Skip expressions that contain python functions because it's quite expensive to
// call Python UDFs during optimization phase. They will be optimized during the runtime.
case (_, e) if containsPythonCall(e) =>
pythonUDFExprs += e
None
// we don't support object literals yet, we skip those constant expressions
case (SqlTypeName.ANY, _) |
(SqlTypeName.OTHER, _) |
(SqlTypeName.ROW, _) |
(SqlTypeName.STRUCTURED, _) |
(SqlTypeName.ARRAY, _) |
(SqlTypeName.MAP, _) |
(SqlTypeName.MULTISET, _) => None
case (_, call: RexCall) => {
// to ensure the division is non-zero when the operator is DIVIDE
if (call.getOperator.getKind.equals(SqlKind.DIVIDE)) {
val ops = call.getOperands
val divisionLiteral = ops.get(ops.size() - 1)
// according to BuiltInFunctionDefinitions, the DEVIDE's second op must be numeric
assert(RexUtil.isDeterministic(divisionLiteral))
val divisionComparable =
divisionLiteral.asInstanceOf[RexLiteral].getValue.asInstanceOf[Comparable[Any]]
val zeroComparable = rexBuilder.makeExactLiteral(
new java.math.BigDecimal(0))
.getValue.asInstanceOf[Comparable[Any]]
if (divisionComparable.compareTo(zeroComparable) == 0) {
throw new ArithmeticException("Division by zero")
}
}
// Exclude some JSON functions which behave differently
// when called as an argument of another call of one of these functions.
if (nonReducibleJsonFunctions.contains(call.getOperator)) {
None
} else {
Some(call)
}
}
case (_, e) => Some(e)
}.toList
}
}
/**
* Constant expression code generator context.
*/
class ConstantCodeGeneratorContext(tableConfig: TableConfig)
extends CodeGeneratorContext(tableConfig) {
override def addReusableFunction(
function: UserDefinedFunction,
functionContextClass: Class[_ <: FunctionContext] = classOf[FunctionContext],
runtimeContextTerm: String = null): String = {
super.addReusableFunction(function, classOf[ConstantFunctionContext], "parameters")
}
override def addReusableConverter(
dataType: DataType,
classLoaderTerm: String = null)
: String = {
super.addReusableConverter(dataType, "this.getClass().getClassLoader()")
}
}
|
zjureel/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/ExpressionReducer.scala
|
Scala
|
apache-2.0
| 12,458 |
package com.zhranklin.homepage.im
import java.io.{BufferedReader, InputStreamReader}
import java.net._
import com.typesafe.config.ConfigFactory
import scala.concurrent.Future
object ImPortRegisterer {
val localPort = ConfigFactory.load().getInt("settings.socket.server_port")
val ips = collection.mutable.Map.empty[String, String]
import scala.concurrent.ExecutionContext.Implicits.global
Future {
while(true) {
val ss = new ServerSocket(localPort)
val socket = ss.accept()
val (address, port) = (socket.getInetAddress.getHostName, socket.getPort)
val rdr = new BufferedReader(new InputStreamReader(socket.getInputStream))
println("begin readline")
val result = rdr.readLine()
ips += (result β s"$address:$port")
println(s"add ip: $address:$port")
rdr.close()
socket.close()
ss.close()
}
}
}
|
zhranklin/Private_Blog
|
server/src/main/scala/com/zhranklin/homepage/im/ImPortRegisterer.scala
|
Scala
|
gpl-3.0
| 882 |
package org.scalajs.jsenv
import scala.concurrent.duration.Duration
trait ComJSRunner extends AsyncJSRunner {
/** Send a message to the JS VM. Throws if the message cannot be sent. */
def send(msg: String): Unit
/** Blocks until a message is received and returns it.
*
* @throws ComClosedException if the channel is closed before a message
* is received
*/
final def receive(): String = receive(Duration.Inf)
/** Blocks until a message is received and returns it.
*
* @throws ComClosedException if the channel is closed before a message
* is received
* @throws scala.concurrent.TimeoutException if the timeout expires
* before a message is received and the channel is still open
*/
def receive(timeout: Duration): String
/** Close the communication channel. Allows the VM to terminate if it is
* still waiting for callback. The JVM side **must** call close in
* order to be able to expect termination of the VM.
*
* Calling [[stop]] on a [ComJSRunner]] automatically closes the
* channel.
*/
def close(): Unit
/** Abort the associated run. Also closes the communication channel. */
abstract override def stop(): Unit = {
close()
super.stop()
}
}
|
doron123/scala-js
|
js-envs/src/main/scala/org/scalajs/jsenv/ComJSRunner.scala
|
Scala
|
bsd-3-clause
| 1,248 |
package com.technophobia.substeps.domain
import com.technophobia.substeps.domain.repositories.SubstepRepository
import com.technophobia.substeps.domain.execution.RunResult
import com.technophobia.substeps.domain.events.{ExecutionCompleted, ExecutionStarted, DomainEventPublisher}
/**
* @author rbarefield
*/
case class Background(val title: String, steps: List[SubstepInvocation]) {
def run() : RunResult = {
DomainEventPublisher.instance().publish(ExecutionStarted(this))
val result = steps.foldLeft[RunResult](RunResult.NoneRun)((b,a) => b.combine(a.run()))
DomainEventPublisher.instance().publish(ExecutionCompleted(this, result))
result
}
}
object Background {
def apply(substepRepository: SubstepRepository, title: String, steps: List[String]) : Background = {
new Background(title, steps.map(SubstepInvocation(substepRepository, _)))
}
}
|
G2G3Digital/substeps-scala-core
|
src/main/scala/com/technophobia/substeps/domain/Background.scala
|
Scala
|
lgpl-3.0
| 883 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.singleTest
import org.jetbrains.plugins.scala.testingSupport.scalatest.generators.FreeSpecGenerator
/**
* @author Roman.Shein
* @since 20.01.2015.
*/
trait FreeSpecSingleTestTest extends FreeSpecGenerator {
val freeSpecTestPath = List("[root]", freeSpecClassName, "A FreeSpecTest", "should be able to run single tests")
def testFreeSpec() {
runTestByLocation(6, 3, freeSpecFileName,
checkConfigAndSettings(_, freeSpecClassName, "A FreeSpecTest should be able to run single tests"),
root => checkResultTreeHasExactNamedPath(root, freeSpecTestPath:_*) &&
checkResultTreeDoesNotHaveNodes(root, "should not run tests that are not selected")
)
}
}
|
ilinum/intellij-scala
|
test/org/jetbrains/plugins/scala/testingSupport/scalatest/singleTest/FreeSpecSingleTestTest.scala
|
Scala
|
apache-2.0
| 746 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.io._
import java.net._
import java.nio.ByteBuffer
import java.nio.channels.SocketChannel
import java.util.{HashMap, Properties, Random}
import com.yammer.metrics.core.{Gauge, Meter}
import com.yammer.metrics.{Metrics => YammerMetrics}
import javax.net.ssl._
import kafka.security.CredentialProvider
import kafka.server.{KafkaConfig, ThrottledChannel}
import kafka.utils.Implicits._
import kafka.utils.TestUtils
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.memory.MemoryPool
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.network.KafkaChannel.ChannelMuteState
import org.apache.kafka.common.network.{ChannelBuilder, ChannelState, KafkaChannel, ListenerName, NetworkReceive, NetworkSend, Selector, Send}
import org.apache.kafka.common.protocol.{ApiKeys, Errors}
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.requests.{AbstractRequest, ProduceRequest, RequestHeader}
import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol}
import org.apache.kafka.common.security.scram.internals.ScramMechanism
import org.apache.kafka.common.utils.{LogContext, MockTime, Time}
import org.apache.log4j.Level
import org.junit.Assert._
import org.junit._
import org.scalatest.junit.JUnitSuite
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.control.ControlThrowable
class SocketServerTest extends JUnitSuite {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
props.put("listeners", "PLAINTEXT://localhost:0")
props.put("num.network.threads", "1")
props.put("socket.send.buffer.bytes", "300000")
props.put("socket.receive.buffer.bytes", "300000")
props.put("queued.max.requests", "50")
props.put("socket.request.max.bytes", "50")
props.put("max.connections.per.ip", "5")
props.put("connections.max.idle.ms", "60000")
val config = KafkaConfig.fromProps(props)
val metrics = new Metrics
val credentialProvider = new CredentialProvider(ScramMechanism.mechanismNames, null)
val localAddress = InetAddress.getLoopbackAddress
// Clean-up any metrics left around by previous tests
for (metricName <- YammerMetrics.defaultRegistry.allMetrics.keySet.asScala)
YammerMetrics.defaultRegistry.removeMetric(metricName)
val server = new SocketServer(config, metrics, Time.SYSTEM, credentialProvider)
server.startup()
val sockets = new ArrayBuffer[Socket]
private val kafkaLogger = org.apache.log4j.LogManager.getLogger("kafka")
private var logLevelToRestore: Level = _
@Before
def setUp(): Unit = {
// Run the tests with TRACE logging to exercise request logging path
logLevelToRestore = kafkaLogger.getLevel
kafkaLogger.setLevel(Level.TRACE)
}
@After
def tearDown() {
shutdownServerAndMetrics(server)
sockets.foreach(_.close())
sockets.clear()
kafkaLogger.setLevel(logLevelToRestore)
}
def sendRequest(socket: Socket, request: Array[Byte], id: Option[Short] = None, flush: Boolean = true) {
val outgoing = new DataOutputStream(socket.getOutputStream)
id match {
case Some(id) =>
outgoing.writeInt(request.length + 2)
outgoing.writeShort(id)
case None =>
outgoing.writeInt(request.length)
}
outgoing.write(request)
if (flush)
outgoing.flush()
}
def receiveResponse(socket: Socket): Array[Byte] = {
val incoming = new DataInputStream(socket.getInputStream)
val len = incoming.readInt()
val response = new Array[Byte](len)
incoming.readFully(response)
response
}
private def receiveRequest(channel: RequestChannel, timeout: Long = 2000L): RequestChannel.Request = {
channel.receiveRequest(timeout) match {
case request: RequestChannel.Request => request
case RequestChannel.ShutdownRequest => fail("Unexpected shutdown received")
case null => fail("receiveRequest timed out")
}
}
/* A simple request handler that just echos back the response */
def processRequest(channel: RequestChannel) {
processRequest(channel, receiveRequest(channel))
}
def processRequest(channel: RequestChannel, request: RequestChannel.Request) {
val byteBuffer = request.body[AbstractRequest].serialize(request.header)
byteBuffer.rewind()
val send = new NetworkSend(request.context.connectionId, byteBuffer)
channel.sendResponse(new RequestChannel.SendResponse(request, send, Some(request.header.toString), None))
}
def connect(s: SocketServer = server, listenerName: ListenerName = ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), localAddr: InetAddress = null, port: Int = 0) = {
val socket = new Socket("localhost", s.boundPort(listenerName), localAddr, port)
sockets += socket
socket
}
// Create a client connection, process one request and return (client socket, connectionId)
def connectAndProcessRequest(s: SocketServer): (Socket, String) = {
val socket = connect(s)
val request = sendAndReceiveRequest(socket, s)
processRequest(s.dataPlaneRequestChannel, request)
(socket, request.context.connectionId)
}
def sendAndReceiveRequest(socket: Socket, server: SocketServer): RequestChannel.Request = {
sendRequest(socket, producerRequestBytes())
receiveRequest(server.dataPlaneRequestChannel)
}
def shutdownServerAndMetrics(server: SocketServer): Unit = {
server.shutdown()
server.metrics.close()
}
private def producerRequestBytes(ack: Short = 0): Array[Byte] = {
val correlationId = -1
val clientId = ""
val ackTimeoutMs = 10000
val emptyRequest = ProduceRequest.Builder.forCurrentMagic(ack, ackTimeoutMs,
new HashMap[TopicPartition, MemoryRecords]()).build()
val emptyHeader = new RequestHeader(ApiKeys.PRODUCE, emptyRequest.version, clientId, correlationId)
val byteBuffer = emptyRequest.serialize(emptyHeader)
byteBuffer.rewind()
val serializedBytes = new Array[Byte](byteBuffer.remaining)
byteBuffer.get(serializedBytes)
serializedBytes
}
@Test
def simpleRequest() {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
// Test PLAINTEXT socket
sendRequest(plainSocket, serializedBytes)
processRequest(server.dataPlaneRequestChannel)
assertEquals(serializedBytes.toSeq, receiveResponse(plainSocket).toSeq)
verifyAcceptorBlockedPercent("PLAINTEXT", expectBlocked = false)
}
@Test
def testControlPlaneRequest(): Unit = {
val testProps = new Properties
testProps ++= props
testProps.put("listeners", "PLAINTEXT://localhost:0,CONTROLLER://localhost:5000")
testProps.put("listener.security.protocol.map", "PLAINTEXT:PLAINTEXT,CONTROLLER:PLAINTEXT")
testProps.put("control.plane.listener.name", "CONTROLLER")
val config = KafkaConfig.fromProps(testProps)
withTestableServer(config, { testableServer =>
val socket = connect(testableServer, config.controlPlaneListenerName.get, localAddr = InetAddress.getLocalHost, port = 5000)
sendAndReceiveControllerRequest(socket, testableServer)
})
}
@Test
def tooBigRequestIsRejected() {
val tooManyBytes = new Array[Byte](server.config.socketRequestMaxBytes + 1)
new Random().nextBytes(tooManyBytes)
val socket = connect()
val outgoing = new DataOutputStream(socket.getOutputStream)
outgoing.writeInt(tooManyBytes.length)
try {
// Server closes client connection when it processes the request length because
// it is too big. The write of request body may fail if the connection has been closed.
outgoing.write(tooManyBytes)
outgoing.flush()
receiveResponse(socket)
} catch {
case _: IOException => // thats fine
}
}
@Test
def testGracefulClose() {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
for (_ <- 0 until 10)
sendRequest(plainSocket, serializedBytes)
plainSocket.close()
for (_ <- 0 until 10) {
val request = receiveRequest(server.dataPlaneRequestChannel)
assertNotNull("receiveRequest timed out", request)
server.dataPlaneRequestChannel.sendResponse(new RequestChannel.NoOpResponse(request))
}
}
@Test
def testNoOpAction(): Unit = {
val plainSocket = connect()
val serializedBytes = producerRequestBytes()
for (_ <- 0 until 3)
sendRequest(plainSocket, serializedBytes)
for (_ <- 0 until 3) {
val request = receiveRequest(server.dataPlaneRequestChannel)
assertNotNull("receiveRequest timed out", request)
server.dataPlaneRequestChannel.sendResponse(new RequestChannel.NoOpResponse(request))
}
}
@Test
def testConnectionId() {
val sockets = (1 to 5).map(_ => connect())
val serializedBytes = producerRequestBytes()
val requests = sockets.map{socket =>
sendRequest(socket, serializedBytes)
receiveRequest(server.dataPlaneRequestChannel)
}
requests.zipWithIndex.foreach { case (request, i) =>
val index = request.context.connectionId.split("-").last
assertEquals(i.toString, index)
}
sockets.foreach(_.close)
}
@Test
def testIdleConnection() {
val idleTimeMs = 60000
val time = new MockTime()
props.put(KafkaConfig.ConnectionsMaxIdleMsProp, idleTimeMs.toString)
val serverMetrics = new Metrics
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics, time, credentialProvider)
def openChannel(request: RequestChannel.Request): Option[KafkaChannel] =
overrideServer.dataPlaneProcessor(request.processor).channel(request.context.connectionId)
def openOrClosingChannel(request: RequestChannel.Request): Option[KafkaChannel] =
overrideServer.dataPlaneProcessor(request.processor).openOrClosingChannel(request.context.connectionId)
try {
overrideServer.startup()
val serializedBytes = producerRequestBytes()
// Connection with no staged receives
val socket1 = connect(overrideServer)
sendRequest(socket1, serializedBytes)
val request1 = receiveRequest(overrideServer.dataPlaneRequestChannel)
assertTrue("Channel not open", openChannel(request1).nonEmpty)
assertEquals(openChannel(request1), openOrClosingChannel(request1))
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request1).isEmpty, "Failed to close idle channel")
assertTrue("Channel not removed", openChannel(request1).isEmpty)
processRequest(overrideServer.dataPlaneRequestChannel, request1)
// Connection with staged receives
val socket2 = connect(overrideServer)
val request2 = sendRequestsUntilStagedReceive(overrideServer, socket2, serializedBytes)
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openChannel(request2).isEmpty, "Failed to close idle channel")
TestUtils.waitUntilTrue(() => openOrClosingChannel(request2).nonEmpty, "Channel removed without processing staged receives")
processRequest(overrideServer.dataPlaneRequestChannel, request2) // this triggers a failed send since channel has been closed
TestUtils.waitUntilTrue(() => openOrClosingChannel(request2).isEmpty, "Failed to remove channel with failed sends")
assertNull("Received request after failed send", overrideServer.dataPlaneRequestChannel.receiveRequest(200))
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testConnectionIdReuse() {
val idleTimeMs = 60000
val time = new MockTime()
props.put(KafkaConfig.ConnectionsMaxIdleMsProp, idleTimeMs.toString)
props.put("listeners", "PLAINTEXT://localhost:0")
val serverMetrics = new Metrics
@volatile var selector: TestableSelector = null
val overrideConnectionId = "127.0.0.1:1-127.0.0.1:2-0"
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics, time, credentialProvider) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, memoryPool, new LogContext()) {
override protected[network] def connectionId(socket: Socket): String = overrideConnectionId
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics)
selector = testableSelector
testableSelector
}
}
}
}
def openChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).channel(overrideConnectionId)
def openOrClosingChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).openOrClosingChannel(overrideConnectionId)
def connectionCount = overrideServer.connectionCount(InetAddress.getByName("127.0.0.1"))
// Create a client connection and wait for server to register the connection with the selector. For
// test scenarios below where `Selector.register` fails, the wait ensures that checks are performed
// only after `register` is processed by the server.
def connectAndWaitForConnectionRegister(): Socket = {
val connections = selector.operationCounts(SelectorOperation.Register)
val socket = connect(overrideServer)
TestUtils.waitUntilTrue(() =>
selector.operationCounts(SelectorOperation.Register) == connections + 1, "Connection not registered")
socket
}
try {
overrideServer.startup()
val socket1 = connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1 && openChannel.isDefined, "Failed to create channel")
val channel1 = openChannel.getOrElse(throw new RuntimeException("Channel not found"))
// Create new connection with same id when `channel1` is still open and in Selector.channels
// Check that new connection is closed and openChannel still contains `channel1`
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1, "Failed to close channel")
assertSame(channel1, openChannel.getOrElse(throw new RuntimeException("Channel not found")))
// Send requests to `channel1` until a receive is staged and advance time beyond idle time so that `channel1` is
// closed with staged receives and is in Selector.closingChannels
val serializedBytes = producerRequestBytes()
val request = sendRequestsUntilStagedReceive(overrideServer, socket1, serializedBytes)
time.sleep(idleTimeMs + 1)
TestUtils.waitUntilTrue(() => openChannel.isEmpty, "Idle channel not closed")
TestUtils.waitUntilTrue(() => openOrClosingChannel.isDefined, "Channel removed without processing staged receives")
// Create new connection with same id when `channel1` is in Selector.closingChannels
// Check that new connection is closed and openOrClosingChannel still contains `channel1`
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1, "Failed to close channel")
assertSame(channel1, openOrClosingChannel.getOrElse(throw new RuntimeException("Channel not found")))
// Complete request with failed send so that `channel1` is removed from Selector.closingChannels
processRequest(overrideServer.dataPlaneRequestChannel, request)
TestUtils.waitUntilTrue(() => connectionCount == 0 && openOrClosingChannel.isEmpty, "Failed to remove channel with failed send")
// Check that new connections can be created with the same id since `channel1` is no longer in Selector
connectAndWaitForConnectionRegister()
TestUtils.waitUntilTrue(() => connectionCount == 1 && openChannel.isDefined, "Failed to open new channel")
val newChannel = openChannel.getOrElse(throw new RuntimeException("Channel not found"))
assertNotSame(channel1, newChannel)
newChannel.disconnect()
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
private def sendRequestsUntilStagedReceive(server: SocketServer, socket: Socket, requestBytes: Array[Byte]): RequestChannel.Request = {
def sendTwoRequestsReceiveOne(): RequestChannel.Request = {
sendRequest(socket, requestBytes, flush = false)
sendRequest(socket, requestBytes, flush = true)
receiveRequest(server.dataPlaneRequestChannel)
}
val (request, hasStagedReceives) = TestUtils.computeUntilTrue(sendTwoRequestsReceiveOne()) { req =>
val connectionId = req.context.connectionId
val hasStagedReceives = server.dataPlaneProcessor(0).numStagedReceives(connectionId) > 0
if (!hasStagedReceives) {
processRequest(server.dataPlaneRequestChannel, req)
processRequest(server.dataPlaneRequestChannel)
}
hasStagedReceives
}
assertTrue(s"Receives not staged for ${org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS} ms", hasStagedReceives)
request
}
// Prepares test setup for throttled channel tests. throttlingDone controls whether or not throttling has completed
// in quota manager.
def throttledChannelTestSetUp(socket: Socket, serializedBytes: Array[Byte], noOpResponse: Boolean,
throttlingInProgress: Boolean): RequestChannel.Request = {
sendRequest(socket, serializedBytes)
// Mimic a primitive request handler that fetches the request from RequestChannel and place a response with a
// throttled channel.
val request = receiveRequest(server.dataPlaneRequestChannel)
val byteBuffer = request.body[AbstractRequest].serialize(request.header)
val send = new NetworkSend(request.context.connectionId, byteBuffer)
def channelThrottlingCallback(response: RequestChannel.Response): Unit = {
server.dataPlaneRequestChannel.sendResponse(response)
}
val throttledChannel = new ThrottledChannel(request, new MockTime(), 100, channelThrottlingCallback)
val response =
if (!noOpResponse)
new RequestChannel.SendResponse(request, send, Some(request.header.toString), None)
else
new RequestChannel.NoOpResponse(request)
server.dataPlaneRequestChannel.sendResponse(response)
// Quota manager would call notifyThrottlingDone() on throttling completion. Simulate it if throttleingInProgress is
// false.
if (!throttlingInProgress)
throttledChannel.notifyThrottlingDone()
request
}
def openOrClosingChannel(request: RequestChannel.Request): Option[KafkaChannel] =
server.dataPlaneProcessor(0).openOrClosingChannel(request.context.connectionId)
@Test
def testSendActionResponseWithThrottledChannelWhereThrottlingInProgress() {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, false, true)
// receive response
assertEquals(serializedBytes.toSeq, receiveResponse(socket).toSeq)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.MUTED_AND_THROTTLED), "fail")
// Channel should still be muted.
assertTrue(openOrClosingChannel(request).exists(c => c.isMute()))
}
@Test
def testSendActionResponseWithThrottledChannelWhereThrottlingAlreadyDone() {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, false, false)
// receive response
assertEquals(serializedBytes.toSeq, receiveResponse(socket).toSeq)
// Since throttling is already done, the channel can be unmuted after sending out the response.
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.NOT_MUTED), "fail")
// Channel is now unmuted.
assertFalse(openOrClosingChannel(request).exists(c => c.isMute()))
}
@Test
def testNoOpActionResponseWithThrottledChannelWhereThrottlingInProgress() {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, true, true)
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.MUTED_AND_THROTTLED), "fail")
// Channel should still be muted.
assertTrue(openOrClosingChannel(request).exists(c => c.isMute()))
}
@Test
def testNoOpActionResponseWithThrottledChannelWhereThrottlingAlreadyDone() {
val socket = connect()
val serializedBytes = producerRequestBytes()
// SendAction with throttling in progress
val request = throttledChannelTestSetUp(socket, serializedBytes, true, false)
// Since throttling is already done, the channel can be unmuted.
TestUtils.waitUntilTrue(() => openOrClosingChannel(request).exists(c => c.muteState() == ChannelMuteState.NOT_MUTED), "fail")
// Channel is now unmuted.
assertFalse(openOrClosingChannel(request).exists(c => c.isMute()))
}
@Test
def testSocketsCloseOnShutdown() {
// open a connection
val plainSocket = connect()
plainSocket.setTcpNoDelay(true)
val bytes = new Array[Byte](40)
// send a request first to make sure the connection has been picked up by the socket server
sendRequest(plainSocket, bytes, Some(0))
processRequest(server.dataPlaneRequestChannel)
// the following sleep is necessary to reliably detect the connection close when we send data below
Thread.sleep(200L)
// make sure the sockets ar e open
server.dataPlaneAcceptors.asScala.values.foreach(acceptor => assertFalse(acceptor.serverChannel.socket.isClosed))
// then shutdown the server
shutdownServerAndMetrics(server)
val largeChunkOfBytes = new Array[Byte](1000000)
// doing a subsequent send should throw an exception as the connection should be closed.
// send a large chunk of bytes to trigger a socket flush
try {
sendRequest(plainSocket, largeChunkOfBytes, Some(0))
fail("expected exception when writing to closed plain socket")
} catch {
case _: IOException => // expected
}
}
@Test
def testMaxConnectionsPerIp() {
// make the maximum allowable number of connections
val conns = (0 until server.config.maxConnectionsPerIp).map(_ => connect())
// now try one more (should fail)
val conn = connect()
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
conn.close()
// it should succeed after closing one connection
val address = conns.head.getInetAddress
conns.head.close()
TestUtils.waitUntilTrue(() => server.connectionCount(address) < conns.length,
"Failed to decrement connection count after close")
val conn2 = connect()
val serializedBytes = producerRequestBytes()
sendRequest(conn2, serializedBytes)
val request = server.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
}
@Test
def testZeroMaxConnectionsPerIp() {
val newProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
newProps.setProperty(KafkaConfig.MaxConnectionsPerIpProp, "0")
newProps.setProperty(KafkaConfig.MaxConnectionsPerIpOverridesProp, "%s:%s".format("127.0.0.1", "5"))
val server = new SocketServer(KafkaConfig.fromProps(newProps), new Metrics(), Time.SYSTEM, credentialProvider)
try {
server.startup()
// make the maximum allowable number of connections
val conns = (0 until 5).map(_ => connect(server))
// now try one more (should fail)
val conn = connect(server)
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
conn.close()
// it should succeed after closing one connection
val address = conns.head.getInetAddress
conns.head.close()
TestUtils.waitUntilTrue(() => server.connectionCount(address) < conns.length,
"Failed to decrement connection count after close")
val conn2 = connect(server)
val serializedBytes = producerRequestBytes()
sendRequest(conn2, serializedBytes)
val request = server.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
// now try to connect from the external facing interface, which should fail
val conn3 = connect(s = server, localAddr = InetAddress.getLocalHost)
conn3.setSoTimeout(3000)
assertEquals(-1, conn3.getInputStream.read())
conn3.close()
} finally {
shutdownServerAndMetrics(server)
}
}
@Test
def testMaxConnectionsPerIpOverrides() {
val overrideNum = server.config.maxConnectionsPerIp + 1
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
overrideProps.put(KafkaConfig.MaxConnectionsPerIpOverridesProp, s"localhost:$overrideNum")
val serverMetrics = new Metrics()
val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), serverMetrics, Time.SYSTEM, credentialProvider)
try {
overrideServer.startup()
// make the maximum allowable number of connections
val conns = (0 until overrideNum).map(_ => connect(overrideServer))
// it should succeed
val serializedBytes = producerRequestBytes()
sendRequest(conns.last, serializedBytes)
val request = overrideServer.dataPlaneRequestChannel.receiveRequest(2000)
assertNotNull(request)
// now try one more (should fail)
val conn = connect(overrideServer)
conn.setSoTimeout(3000)
assertEquals(-1, conn.getInputStream.read())
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testSslSocketServer() {
val trustStoreFile = File.createTempFile("truststore", ".jks")
val overrideProps = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, interBrokerSecurityProtocol = Some(SecurityProtocol.SSL),
trustStoreFile = Some(trustStoreFile))
overrideProps.put(KafkaConfig.ListenersProp, "SSL://localhost:0")
val serverMetrics = new Metrics
val overrideServer = new SocketServer(KafkaConfig.fromProps(overrideProps), serverMetrics, Time.SYSTEM, credentialProvider)
try {
overrideServer.startup()
val sslContext = SSLContext.getInstance("TLSv1.2")
sslContext.init(null, Array(TestUtils.trustAllCerts), new java.security.SecureRandom())
val socketFactory = sslContext.getSocketFactory
val sslSocket = socketFactory.createSocket("localhost",
overrideServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.SSL))).asInstanceOf[SSLSocket]
sslSocket.setNeedClientAuth(false)
val correlationId = -1
val clientId = ""
val ackTimeoutMs = 10000
val ack = 0: Short
val emptyRequest = ProduceRequest.Builder.forCurrentMagic(ack, ackTimeoutMs,
new HashMap[TopicPartition, MemoryRecords]()).build()
val emptyHeader = new RequestHeader(ApiKeys.PRODUCE, emptyRequest.version, clientId, correlationId)
val byteBuffer = emptyRequest.serialize(emptyHeader)
byteBuffer.rewind()
val serializedBytes = new Array[Byte](byteBuffer.remaining)
byteBuffer.get(serializedBytes)
sendRequest(sslSocket, serializedBytes)
processRequest(overrideServer.dataPlaneRequestChannel)
assertEquals(serializedBytes.toSeq, receiveResponse(sslSocket).toSeq)
sslSocket.close()
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testSessionPrincipal() {
val socket = connect()
val bytes = new Array[Byte](40)
sendRequest(socket, bytes, Some(0))
assertEquals(KafkaPrincipal.ANONYMOUS, receiveRequest(server.dataPlaneRequestChannel).session.principal)
}
/* Test that we update request metrics if the client closes the connection while the broker response is in flight. */
@Test
def testClientDisconnectionUpdatesRequestMetrics() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
val serverMetrics = new Metrics
var conn: Socket = null
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics, Time.SYSTEM, credentialProvider) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, MemoryPool.NONE, new LogContext()) {
override protected[network] def sendResponse(response: RequestChannel.Response, responseSend: Send) {
conn.close()
super.sendResponse(response, responseSend)
}
}
}
}
try {
overrideServer.startup()
conn = connect(overrideServer)
val serializedBytes = producerRequestBytes()
sendRequest(conn, serializedBytes)
val channel = overrideServer.dataPlaneRequestChannel
val request = receiveRequest(channel)
val requestMetrics = channel.metrics(request.header.apiKey.name)
def totalTimeHistCount(): Long = requestMetrics.totalTimeHist.count
val expectedTotalTimeCount = totalTimeHistCount() + 1
// send a large buffer to ensure that the broker detects the client disconnection while writing to the socket channel.
// On Mac OS X, the initial write seems to always succeed and it is able to write up to 102400 bytes on the initial
// write. If the buffer is smaller than this, the write is considered complete and the disconnection is not
// detected. If the buffer is larger than 102400 bytes, a second write is attempted and it fails with an
// IOException.
val send = new NetworkSend(request.context.connectionId, ByteBuffer.allocate(550000))
channel.sendResponse(new RequestChannel.SendResponse(request, send, None, None))
TestUtils.waitUntilTrue(() => totalTimeHistCount() == expectedTotalTimeCount,
s"request metrics not updated, expected: $expectedTotalTimeCount, actual: ${totalTimeHistCount()}")
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testClientDisconnectionWithStagedReceivesFullyProcessed() {
val serverMetrics = new Metrics
@volatile var selector: TestableSelector = null
val overrideConnectionId = "127.0.0.1:1-127.0.0.1:2-0"
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics, Time.SYSTEM, credentialProvider) {
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, dataPlaneRequestChannel, connectionQuotas,
config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics,
credentialProvider, memoryPool, new LogContext()) {
override protected[network] def connectionId(socket: Socket): String = overrideConnectionId
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics)
selector = testableSelector
testableSelector
}
}
}
}
def openChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).channel(overrideConnectionId)
def openOrClosingChannel: Option[KafkaChannel] = overrideServer.dataPlaneProcessor(0).openOrClosingChannel(overrideConnectionId)
try {
overrideServer.startup()
val socket = connect(overrideServer)
TestUtils.waitUntilTrue(() => openChannel.nonEmpty, "Channel not found")
// Setup channel to client with staged receives so when client disconnects
// it will be stored in Selector.closingChannels
val serializedBytes = producerRequestBytes(1)
val request = sendRequestsUntilStagedReceive(overrideServer, socket, serializedBytes)
// Set SoLinger to 0 to force a hard disconnect via TCP RST
socket.setSoLinger(true, 0)
socket.close()
// Complete request with socket exception so that the channel is removed from Selector.closingChannels
processRequest(overrideServer.dataPlaneRequestChannel, request)
TestUtils.waitUntilTrue(() => openOrClosingChannel.isEmpty, "Channel not closed after failed send")
assertTrue("Unexpected completed send", selector.completedSends.isEmpty)
} finally {
overrideServer.shutdown()
serverMetrics.close()
}
}
/*
* Test that we update request metrics if the channel has been removed from the selector when the broker calls
* `selector.send` (selector closes old connections, for example).
*/
@Test
def testBrokerSendAfterChannelClosedUpdatesRequestMetrics() {
val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 0)
props.setProperty(KafkaConfig.ConnectionsMaxIdleMsProp, "110")
val serverMetrics = new Metrics
var conn: Socket = null
val overrideServer = new SocketServer(KafkaConfig.fromProps(props), serverMetrics, Time.SYSTEM, credentialProvider)
try {
overrideServer.startup()
conn = connect(overrideServer)
val serializedBytes = producerRequestBytes()
sendRequest(conn, serializedBytes)
val channel = overrideServer.dataPlaneRequestChannel
val request = receiveRequest(channel)
TestUtils.waitUntilTrue(() => overrideServer.dataPlaneProcessor(request.processor).channel(request.context.connectionId).isEmpty,
s"Idle connection `${request.context.connectionId}` was not closed by selector")
val requestMetrics = channel.metrics(request.header.apiKey.name)
def totalTimeHistCount(): Long = requestMetrics.totalTimeHist.count
val expectedTotalTimeCount = totalTimeHistCount() + 1
processRequest(channel, request)
TestUtils.waitUntilTrue(() => totalTimeHistCount() == expectedTotalTimeCount,
s"request metrics not updated, expected: $expectedTotalTimeCount, actual: ${totalTimeHistCount()}")
} finally {
shutdownServerAndMetrics(overrideServer)
}
}
@Test
def testRequestMetricsAfterStop(): Unit = {
server.stopProcessingRequests()
val version = ApiKeys.PRODUCE.latestVersion
val version2 = (version - 1).toShort
for (_ <- 0 to 1) server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version).mark()
server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version2).mark()
assertEquals(2, server.dataPlaneRequestChannel.metrics(ApiKeys.PRODUCE.name).requestRate(version).count())
server.dataPlaneRequestChannel.updateErrorMetrics(ApiKeys.PRODUCE, Map(Errors.NONE -> 1))
val nonZeroMeters = Map(s"kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce,version=$version" -> 2,
s"kafka.network:type=RequestMetrics,name=RequestsPerSec,request=Produce,version=$version2" -> 1,
"kafka.network:type=RequestMetrics,name=ErrorsPerSec,request=Produce,error=NONE" -> 1)
def requestMetricMeters = YammerMetrics
.defaultRegistry
.allMetrics.asScala
.filterKeys(k => k.getType == "RequestMetrics")
.collect { case (k, metric: Meter) => (k.toString, metric.count) }
assertEquals(nonZeroMeters, requestMetricMeters.filter { case (_, value) => value != 0 })
server.shutdown()
assertEquals(Map.empty, requestMetricMeters)
}
@Test
def testMetricCollectionAfterShutdown(): Unit = {
server.shutdown()
val nonZeroMetricNamesAndValues = YammerMetrics
.defaultRegistry
.allMetrics.asScala
.filterKeys(k => k.getName.endsWith("IdlePercent") || k.getName.endsWith("NetworkProcessorAvgIdlePercent"))
.collect { case (k, metric: Gauge[_]) => (k, metric.value().asInstanceOf[Double]) }
.filter { case (_, value) => value != 0.0 && !value.equals(Double.NaN) }
assertEquals(Map.empty, nonZeroMetricNamesAndValues)
}
@Test
def testProcessorMetricsTags(): Unit = {
val kafkaMetricNames = metrics.metrics.keySet.asScala.filter(_.tags.asScala.get("listener").nonEmpty)
assertFalse(kafkaMetricNames.isEmpty)
val expectedListeners = Set("PLAINTEXT")
kafkaMetricNames.foreach { kafkaMetricName =>
assertTrue(expectedListeners.contains(kafkaMetricName.tags.get("listener")))
}
// legacy metrics not tagged
val yammerMetricsNames = YammerMetrics.defaultRegistry.allMetrics.asScala
.filterKeys(_.getType.equals("Processor"))
.collect { case (k, _: Gauge[_]) => k }
assertFalse(yammerMetricsNames.isEmpty)
yammerMetricsNames.foreach { yammerMetricName =>
assertFalse(yammerMetricName.getMBeanName.contains("listener="))
}
}
/**
* Tests exception handling in [[Processor.configureNewConnections]]. Exception is
* injected into [[Selector.register]] which is used to register each new connection.
* Test creates two connections in a single iteration by waking up the selector only
* when two connections are ready.
* Verifies that
* - first failed connection is closed
* - second connection is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def configureNewConnectionException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
testableSelector.addFailure(SelectorOperation.Register)
val sockets = (1 to 2).map(_ => connect(testableServer))
testableSelector.waitForOperations(SelectorOperation.Register, 2)
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 1, "Failed channel not removed")
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processNewResponses]]. Exception is
* injected into [[Selector.send]] which is used to send the new response.
* Test creates two responses in a single iteration by waking up the selector only
* when two responses are ready.
* Verifies that
* - first failed channel is closed
* - second response is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processNewResponseException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sockets = (1 to 2).map(_ => connect(testableServer))
sockets.foreach(sendRequest(_, producerRequestBytes()))
testableServer.testableSelector.addFailure(SelectorOperation.Send)
sockets.foreach(_ => processRequest(testableServer.dataPlaneRequestChannel))
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processNewResponses]] when [[Selector.send]]
* fails with `CancelledKeyException`, which is handled by the selector using a different
* code path. Test scenario is similar to [[SocketServerTest.processNewResponseException]].
*/
@Test
def sendCancelledKeyException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sockets = (1 to 2).map(_ => connect(testableServer))
sockets.foreach(sendRequest(_, producerRequestBytes()))
val requestChannel = testableServer.dataPlaneRequestChannel
val requests = sockets.map(_ => receiveRequest(requestChannel))
val failedConnectionId = requests(0).context.connectionId
// `KafkaChannel.disconnect()` cancels the selection key, triggering CancelledKeyException during send
testableSelector.channel(failedConnectionId).disconnect()
requests.foreach(processRequest(requestChannel, _))
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(failedConnectionId, locallyClosed = false)
val successfulSocket = if (isSocketConnectionId(failedConnectionId, sockets(0))) sockets(1) else sockets(0)
assertProcessorHealthy(testableServer, Seq(successfulSocket))
})
}
/**
* Tests exception handling in [[Processor.processNewResponses]] when [[Selector.send]]
* to a channel in closing state throws an exception. Test scenario is similar to
* [[SocketServerTest.processNewResponseException]].
*/
@Test
def closingChannelException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
testableSelector.updateMinWakeup(2)
val sockets = (1 to 2).map(_ => connect(testableServer))
val serializedBytes = producerRequestBytes()
val request = sendRequestsUntilStagedReceive(testableServer, sockets(0), serializedBytes)
sendRequest(sockets(1), serializedBytes)
testableSelector.addFailure(SelectorOperation.Send)
sockets(0).close()
processRequest(testableServer.dataPlaneRequestChannel, request)
processRequest(testableServer.dataPlaneRequestChannel) // Also process request from other channel
testableSelector.waitForOperations(SelectorOperation.Send, 2)
testableServer.waitForChannelClose(request.context.connectionId, locallyClosed = true)
assertProcessorHealthy(testableServer, Seq(sockets(1)))
})
}
/**
* Tests exception handling in [[Processor.processCompletedReceives]]. Exception is
* injected into [[Selector.mute]] which is used to mute the channel when a receive is complete.
* Test creates two receives in a single iteration by caching completed receives until two receives
* are complete.
* Verifies that
* - first failed channel is closed
* - second receive is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processCompletedReceiveException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val sockets = (1 to 2).map(_ => connect(testableServer))
val testableSelector = testableServer.testableSelector
val requestChannel = testableServer.dataPlaneRequestChannel
testableSelector.cachedCompletedReceives.minPerPoll = 2
testableSelector.addFailure(SelectorOperation.Mute)
sockets.foreach(sendRequest(_, producerRequestBytes()))
val requests = sockets.map(_ => receiveRequest(requestChannel))
testableSelector.waitForOperations(SelectorOperation.Mute, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
requests.foreach(processRequest(requestChannel, _))
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processCompletedSends]]. Exception is
* injected into [[Selector.unmute]] which is used to unmute the channel after send is complete.
* Test creates two completed sends in a single iteration by caching completed sends until two
* sends are complete.
* Verifies that
* - first failed channel is closed
* - second send is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processCompletedSendException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val testableSelector = testableServer.testableSelector
val sockets = (1 to 2).map(_ => connect(testableServer))
val requests = sockets.map(sendAndReceiveRequest(_, testableServer))
testableSelector.addFailure(SelectorOperation.Unmute)
requests.foreach(processRequest(testableServer.dataPlaneRequestChannel, _))
testableSelector.waitForOperations(SelectorOperation.Unmute, 2)
testableServer.waitForChannelClose(testableSelector.allFailedChannels.head, locallyClosed = true)
assertProcessorHealthy(testableServer, testableSelector.notFailed(sockets))
})
}
/**
* Tests exception handling in [[Processor.processDisconnected]]. An invalid connectionId
* is inserted to the disconnected list just before the actual valid one.
* Verifies that
* - first invalid connectionId is ignored
* - second disconnected channel is processed successfully after the first fails with an exception
* - processor is healthy after the exception
*/
@Test
def processDisconnectedException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val (socket, connectionId) = connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
// Add an invalid connectionId to `Selector.disconnected` list before the actual disconnected channel
// and check that the actual id is processed and the invalid one ignored.
testableSelector.cachedDisconnected.minPerPoll = 2
testableSelector.cachedDisconnected.deferredValues += "notAValidConnectionId" -> ChannelState.EXPIRED
socket.close()
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
testableServer.waitForChannelClose(connectionId, locallyClosed = false)
assertProcessorHealthy(testableServer)
})
}
/**
* Tests that `Processor` continues to function correctly after a failed [[Selector.poll]].
*/
@Test
def pollException(): Unit = {
withTestableServer (testWithServer = { testableServer =>
val (socket, _) = connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
testableSelector.addFailure(SelectorOperation.Poll)
testableSelector.operationCounts.clear()
testableSelector.waitForOperations(SelectorOperation.Poll, 2)
assertProcessorHealthy(testableServer, Seq(socket))
})
}
/**
* Tests handling of `ControlThrowable`. Verifies that the selector is closed.
*/
@Test
def controlThrowable(): Unit = {
withTestableServer (testWithServer = { testableServer =>
connectAndProcessRequest(testableServer)
val testableSelector = testableServer.testableSelector
testableSelector.operationCounts.clear()
testableSelector.addFailure(SelectorOperation.Poll,
Some(new RuntimeException("ControlThrowable exception during poll()") with ControlThrowable))
testableSelector.waitForOperations(SelectorOperation.Poll, 1)
testableSelector.waitForOperations(SelectorOperation.CloseSelector, 1)
})
}
@Test
def testConnectionRateLimit(): Unit = {
shutdownServerAndMetrics(server)
val numConnections = 5
props.put("max.connections.per.ip", numConnections.toString)
val testableServer = new TestableSocketServer(KafkaConfig.fromProps(props), connectionQueueSize = 1)
testableServer.startup()
val testableSelector = testableServer.testableSelector
val errors = new mutable.HashSet[String]
def acceptorStackTraces: scala.collection.Map[Thread, String] = {
Thread.getAllStackTraces.asScala.filterKeys(_.getName.contains("kafka-socket-acceptor"))
.mapValues(_.toList.mkString("\\n"))
}
def acceptorBlocked: Boolean = {
val stackTraces = acceptorStackTraces
if (stackTraces.isEmpty)
errors.add(s"Acceptor thread not found, threads=${Thread.getAllStackTraces.keySet}")
stackTraces.exists { case (thread, stackTrace) =>
thread.getState == Thread.State.WAITING && stackTrace.contains("ArrayBlockingQueue")
}
}
def registeredConnectionCount: Int = testableSelector.operationCounts.getOrElse(SelectorOperation.Register, 0)
try {
// Block selector until Acceptor is blocked while connections are pending
testableSelector.pollCallback = () => {
try {
TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked,
"Acceptor not blocked", waitTimeMs = 10000)
} catch {
case _: Throwable => errors.add(s"Acceptor not blocked: $acceptorStackTraces")
}
}
testableSelector.operationCounts.clear()
val sockets = (1 to numConnections).map(_ => connect(testableServer))
TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount == numConnections,
"Connections not registered", waitTimeMs = 15000)
assertEquals(Set.empty, errors)
testableSelector.waitForOperations(SelectorOperation.Register, numConnections)
// In each iteration, SocketServer processes at most connectionQueueSize (1 in this test)
// new connections and then does poll() to process data from existing connections. So for
// 5 connections, we expect 5 iterations. Since we stop when the 5th connection is processed,
// we can safely check that there were atleast 4 polls prior to the 5th connection.
val pollCount = testableSelector.operationCounts(SelectorOperation.Poll)
assertTrue(s"Connections created too quickly: $pollCount", pollCount >= numConnections - 1)
verifyAcceptorBlockedPercent("PLAINTEXT", expectBlocked = true)
assertProcessorHealthy(testableServer, sockets)
} finally {
shutdownServerAndMetrics(testableServer)
}
}
private def withTestableServer(config : KafkaConfig = config, testWithServer: TestableSocketServer => Unit): Unit = {
props.put("listeners", "PLAINTEXT://localhost:0")
val testableServer = new TestableSocketServer(config)
testableServer.startup()
try {
testWithServer(testableServer)
} finally {
shutdownServerAndMetrics(testableServer)
}
}
def sendAndReceiveControllerRequest(socket: Socket, server: SocketServer): RequestChannel.Request = {
sendRequest(socket, producerRequestBytes())
receiveRequest(server.controlPlaneRequestChannelOpt.get)
}
private def assertProcessorHealthy(testableServer: TestableSocketServer, healthySockets: Seq[Socket] = Seq.empty): Unit = {
val selector = testableServer.testableSelector
selector.reset()
val requestChannel = testableServer.dataPlaneRequestChannel
// Check that existing channels behave as expected
healthySockets.foreach { socket =>
val request = sendAndReceiveRequest(socket, testableServer)
processRequest(requestChannel, request)
socket.close()
}
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 0, "Channels not removed")
// Check new channel behaves as expected
val (socket, connectionId) = connectAndProcessRequest(testableServer)
assertArrayEquals(producerRequestBytes(), receiveResponse(socket))
assertNotNull("Channel should not have been closed", selector.channel(connectionId))
assertNull("Channel should not be closing", selector.closingChannel(connectionId))
socket.close()
TestUtils.waitUntilTrue(() => testableServer.connectionCount(localAddress) == 0, "Channels not removed")
}
// Since all sockets use the same local host, it is sufficient to check the local port
def isSocketConnectionId(connectionId: String, socket: Socket): Boolean =
connectionId.contains(s":${socket.getLocalPort}-")
private def verifyAcceptorBlockedPercent(listenerName: String, expectBlocked: Boolean): Unit = {
val blockedPercentMetricMBeanName = "kafka.network:type=Acceptor,name=AcceptorBlockedPercent,listener=PLAINTEXT"
val blockedPercentMetrics = YammerMetrics.defaultRegistry.allMetrics.asScala
.filterKeys(_.getMBeanName == blockedPercentMetricMBeanName).values
assertEquals(1, blockedPercentMetrics.size)
val blockedPercentMetric = blockedPercentMetrics.head.asInstanceOf[Meter]
val blockedPercent = blockedPercentMetric.meanRate
if (expectBlocked) {
assertTrue(s"Acceptor blocked percent not recorded: $blockedPercent", blockedPercent > 0.0)
assertTrue(s"Unexpected blocked percent in acceptor: $blockedPercent", blockedPercent <= 1.0)
} else {
assertEquals(0.0, blockedPercent, 0.001)
}
}
class TestableSocketServer(config : KafkaConfig = config, val connectionQueueSize: Int = 20) extends SocketServer(config,
new Metrics, Time.SYSTEM, credentialProvider) {
@volatile var selector: Option[TestableSelector] = None
override def newProcessor(id: Int, requestChannel: RequestChannel, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
protocol: SecurityProtocol, memoryPool: MemoryPool): Processor = {
new Processor(id, time, config.socketRequestMaxBytes, requestChannel, connectionQuotas, config.connectionsMaxIdleMs,
config.failedAuthenticationDelayMs, listenerName, protocol, config, metrics, credentialProvider,
memoryPool, new LogContext(), connectionQueueSize) {
override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = {
val testableSelector = new TestableSelector(config, channelBuilder, time, metrics, metricTags.asScala)
selector = Some(testableSelector)
testableSelector
}
}
}
def testableSelector: TestableSelector =
selector.getOrElse(throw new IllegalStateException("Selector not created"))
def waitForChannelClose(connectionId: String, locallyClosed: Boolean): Unit = {
val selector = testableSelector
if (locallyClosed) {
TestUtils.waitUntilTrue(() => selector.allLocallyClosedChannels.contains(connectionId),
s"Channel not closed: $connectionId")
assertTrue("Unexpected disconnect notification", testableSelector.allDisconnectedChannels.isEmpty)
} else {
TestUtils.waitUntilTrue(() => selector.allDisconnectedChannels.contains(connectionId),
s"Disconnect notification not received: $connectionId")
assertTrue("Channel closed locally", testableSelector.allLocallyClosedChannels.isEmpty)
}
val openCount = selector.allChannels.size - 1 // minus one for the channel just closed above
TestUtils.waitUntilTrue(() => connectionCount(localAddress) == openCount, "Connection count not decremented")
TestUtils.waitUntilTrue(() =>
dataPlaneProcessor(0).inflightResponseCount == 0, "Inflight responses not cleared")
assertNull("Channel not removed", selector.channel(connectionId))
assertNull("Closing channel not removed", selector.closingChannel(connectionId))
}
}
sealed trait SelectorOperation
object SelectorOperation {
case object Register extends SelectorOperation
case object Poll extends SelectorOperation
case object Send extends SelectorOperation
case object Mute extends SelectorOperation
case object Unmute extends SelectorOperation
case object Wakeup extends SelectorOperation
case object Close extends SelectorOperation
case object CloseSelector extends SelectorOperation
}
class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: mutable.Map[String, String] = mutable.Map.empty)
extends Selector(config.socketRequestMaxBytes, config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs,
metrics, time, "socket-server", metricTags.asJava, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) {
val failures = mutable.Map[SelectorOperation, Exception]()
val operationCounts = mutable.Map[SelectorOperation, Int]().withDefaultValue(0)
val allChannels = mutable.Set[String]()
val allLocallyClosedChannels = mutable.Set[String]()
val allDisconnectedChannels = mutable.Set[String]()
val allFailedChannels = mutable.Set[String]()
// Enable data from `Selector.poll()` to be deferred to a subsequent poll() until
// the number of elements of that type reaches `minPerPoll`. This enables tests to verify
// that failed processing doesn't impact subsequent processing within the same iteration.
class PollData[T] {
var minPerPoll = 1
val deferredValues = mutable.Buffer[T]()
val currentPollValues = mutable.Buffer[T]()
def update(newValues: mutable.Buffer[T]): Unit = {
if (currentPollValues.nonEmpty || deferredValues.size + newValues.size >= minPerPoll) {
if (deferredValues.nonEmpty) {
currentPollValues ++= deferredValues
deferredValues.clear()
}
currentPollValues ++= newValues
} else
deferredValues ++= newValues
}
def reset(): Unit = {
currentPollValues.clear()
}
}
val cachedCompletedReceives = new PollData[NetworkReceive]()
val cachedCompletedSends = new PollData[Send]()
val cachedDisconnected = new PollData[(String, ChannelState)]()
val allCachedPollData = Seq(cachedCompletedReceives, cachedCompletedSends, cachedDisconnected)
@volatile var minWakeupCount = 0
@volatile var pollTimeoutOverride: Option[Long] = None
@volatile var pollCallback: () => Unit = () => {}
def addFailure(operation: SelectorOperation, exception: Option[Exception] = None) {
failures += operation ->
exception.getOrElse(new IllegalStateException(s"Test exception during $operation"))
}
private def onOperation(operation: SelectorOperation, connectionId: Option[String], onFailure: => Unit): Unit = {
operationCounts(operation) += 1
failures.remove(operation).foreach { e =>
connectionId.foreach(allFailedChannels.add)
onFailure
throw e
}
}
def waitForOperations(operation: SelectorOperation, minExpectedTotal: Int): Unit = {
TestUtils.waitUntilTrue(() =>
operationCounts.getOrElse(operation, 0) >= minExpectedTotal, "Operations not performed within timeout")
}
def runOp[T](operation: SelectorOperation, connectionId: Option[String],
onFailure: => Unit = {})(code: => T): T = {
// If a failure is set on `operation`, throw that exception even if `code` fails
try code
finally onOperation(operation, connectionId, onFailure)
}
override def register(id: String, socketChannel: SocketChannel): Unit = {
runOp(SelectorOperation.Register, Some(id), onFailure = close(id)) {
super.register(id, socketChannel)
}
}
override def send(s: Send): Unit = {
runOp(SelectorOperation.Send, Some(s.destination)) {
super.send(s)
}
}
override def poll(timeout: Long): Unit = {
try {
pollCallback.apply()
allCachedPollData.foreach(_.reset)
runOp(SelectorOperation.Poll, None) {
super.poll(pollTimeoutOverride.getOrElse(timeout))
}
} finally {
super.channels.asScala.foreach(allChannels += _.id)
allDisconnectedChannels ++= super.disconnected.asScala.keys
cachedCompletedReceives.update(super.completedReceives.asScala)
cachedCompletedSends.update(super.completedSends.asScala)
cachedDisconnected.update(super.disconnected.asScala.toBuffer)
}
}
override def mute(id: String): Unit = {
runOp(SelectorOperation.Mute, Some(id)) {
super.mute(id)
}
}
override def unmute(id: String): Unit = {
runOp(SelectorOperation.Unmute, Some(id)) {
super.unmute(id)
}
}
override def wakeup(): Unit = {
runOp(SelectorOperation.Wakeup, None) {
if (minWakeupCount > 0)
minWakeupCount -= 1
if (minWakeupCount <= 0)
super.wakeup()
}
}
override def disconnected: java.util.Map[String, ChannelState] = cachedDisconnected.currentPollValues.toMap.asJava
override def completedSends: java.util.List[Send] = cachedCompletedSends.currentPollValues.asJava
override def completedReceives: java.util.List[NetworkReceive] = cachedCompletedReceives.currentPollValues.asJava
override def close(id: String): Unit = {
runOp(SelectorOperation.Close, Some(id)) {
super.close(id)
allLocallyClosedChannels += id
}
}
override def close(): Unit = {
runOp(SelectorOperation.CloseSelector, None) {
super.close()
}
}
def updateMinWakeup(count: Int): Unit = {
minWakeupCount = count
// For tests that ignore wakeup to process responses together, increase poll timeout
// to ensure that poll doesn't complete before the responses are ready
pollTimeoutOverride = Some(1000L)
// Wakeup current poll to force new poll timeout to take effect
super.wakeup()
}
def reset(): Unit = {
failures.clear()
allCachedPollData.foreach(_.minPerPoll = 1)
}
def notFailed(sockets: Seq[Socket]): Seq[Socket] = {
// Each test generates failure for exactly one failed channel
assertEquals(1, allFailedChannels.size)
val failedConnectionId = allFailedChannels.head
sockets.filterNot(socket => isSocketConnectionId(failedConnectionId, socket))
}
}
}
|
gf53520/kafka
|
core/src/test/scala/unit/kafka/network/SocketServerTest.scala
|
Scala
|
apache-2.0
| 62,985 |
package com.twitter.finagle.stats
import org.scalatest.funsuite.AnyFunSuite
class LazyStatsReceiverTest extends AnyFunSuite {
test("Doesn't eagerly initialize counters") {
val underlying = new InMemoryStatsReceiver()
val wrapped = new LazyStatsReceiver(underlying)
val counter = wrapped.counter("foo")
assert(underlying.counters.isEmpty)
counter.incr()
assert(underlying.counters == Map(Seq("foo") -> 1))
}
test("Doesn't eagerly initialize histograms") {
val underlying = new InMemoryStatsReceiver()
val wrapped = new LazyStatsReceiver(underlying)
val histo = wrapped.stat("foo")
assert(underlying.stats.isEmpty)
histo.add(5)
assert(underlying.stats == Map(Seq("foo") -> Seq(5)))
}
}
|
twitter/util
|
util-stats/src/test/scala/com/twitter/finagle/stats/LazyStatsReceiverTest.scala
|
Scala
|
apache-2.0
| 743 |
package io.circe
import algebra.Eq
import cats.Show
import cats.data.Xor
import cats.std.list._
/**
* A data type representing possible JSON values.
*
* @author Travis Brown
* @author Tony Morris
* @author Dylan Just
* @author Mark Hibberd
*/
sealed abstract class Json extends Product with Serializable {
import Json._
/**
* The catamorphism for the JSON value data type.
*/
def fold[X](
jsonNull: => X,
jsonBoolean: Boolean => X,
jsonNumber: JsonNumber => X,
jsonString: String => X,
jsonArray: List[Json] => X,
jsonObject: JsonObject => X
): X = this match {
case JNull => jsonNull
case JBoolean(b) => jsonBoolean(b)
case JNumber(n) => jsonNumber(n)
case JString(s) => jsonString(s)
case JArray(a) => jsonArray(a.toList)
case JObject(o) => jsonObject(o)
}
/**
* Run on an array or object or return the given default.
*/
def arrayOrObject[X](
or: => X,
jsonArray: List[Json] => X,
jsonObject: JsonObject => X
): X = this match {
case JNull => or
case JBoolean(_) => or
case JNumber(_) => or
case JString(_) => or
case JArray(a) => jsonArray(a.toList)
case JObject(o) => jsonObject(o)
}
/**
* Construct a cursor from this JSON value.
*/
def cursor: Cursor = Cursor(this)
/**
* Construct a cursor with history from this JSON value.
*/
def hcursor: HCursor = Cursor(this).hcursor
def isNull: Boolean = false
def isBoolean: Boolean = false
def isNumber: Boolean = false
def isString: Boolean = false
def isArray: Boolean = false
def isObject: Boolean = false
def asBoolean: Option[Boolean] = None
def asNumber: Option[JsonNumber] = None
def asString: Option[String] = None
def asArray: Option[List[Json]] = None
def asObject: Option[JsonObject] = None
def withBoolean(f: Boolean => Json): Json = asBoolean.fold(this)(f)
def withNumber(f: JsonNumber => Json): Json = asNumber.fold(this)(f)
def withString(f: String => Json): Json = asString.fold(this)(f)
def withArray(f: List[Json] => Json): Json = asArray.fold(this)(f)
def withObject(f: JsonObject => Json): Json = asObject.fold(this)(f)
def mapBoolean(f: Boolean => Boolean): Json = this
def mapNumber(f: JsonNumber => JsonNumber): Json = this
def mapString(f: String => String): Json = this
def mapArray(f: List[Json] => List[Json]): Json = this
def mapObject(f: JsonObject => JsonObject): Json = this
/**
* The name of the type of the JSON value.
*/
def name: String =
this match {
case JNull => "Null"
case JBoolean(_) => "Boolean"
case JNumber(_) => "Number"
case JString(_) => "String"
case JArray(_) => "Array"
case JObject(_) => "Object"
}
/**
* Attempts to decode this JSON value to another data type.
*/
def as[A](implicit d: Decoder[A]): Decoder.Result[A] = d(cursor.hcursor)
/**
* Pretty-print this JSON value to a string using the given pretty-printer.
*/
def pretty(p: Printer): String = p.pretty(this)
/**
* Pretty-print this JSON value to a string with no spaces.
*/
def noSpaces: String = Printer.noSpaces.pretty(this)
/**
* Pretty-print this JSON value to a string indentation of two spaces.
*/
def spaces2: String = Printer.spaces2.pretty(this)
/**
* Pretty-print this JSON value to a string indentation of four spaces.
*/
def spaces4: String = Printer.spaces4.pretty(this)
/**
* Type-safe equality method.
*/
def ===(that: Json): Boolean = {
def arrayEq(x: Seq[Json], y: Seq[Json]): Boolean = {
val it0 = x.iterator
val it1 = y.iterator
while (it0.hasNext && it1.hasNext) {
if (it0.next =!= it1.next) return false
}
it0.hasNext == it1.hasNext
}
(this, that) match {
case ( JObject(a), JObject(b)) => a === b
case ( JString(a), JString(b)) => a == b
case ( JNumber(a), JNumber(b)) => a === b
case (JBoolean(a), JBoolean(b)) => a == b
case ( JArray(a), JArray(b)) => arrayEq(a, b)
case ( x, y) => x.isNull && y.isNull
}
}
/**
* Type-safe inequality.
*/
def =!=(that: Json): Boolean = !(this === that)
/**
* Compute a `String` representation for this JSON value.
*/
override def toString: String = spaces2
/**
* Universal equality derived from our type-safe equality.
*/
override def equals(that: Any): Boolean =
that match {
case j: Json => this === j
case _ => false
}
/**
* Hashing that is consistent with our universal equality.
*/
override def hashCode: Int = super.hashCode
}
object Json {
private[circe] case object JNull extends Json {
override def isNull: Boolean = true
}
private[circe] final case class JBoolean(b: Boolean) extends Json {
override def isBoolean: Boolean = true
override def asBoolean: Option[Boolean] = Some(b)
override def mapBoolean(f: Boolean => Boolean): Json = JBoolean(f(b))
}
private[circe] final case class JNumber(n: JsonNumber) extends Json {
override def isNumber: Boolean = true
override def asNumber: Option[JsonNumber] = Some(n)
override def mapNumber(f: JsonNumber => JsonNumber): Json = JNumber(f(n))
}
private[circe] final case class JString(s: String) extends Json {
override def isString: Boolean = true
override def asString: Option[String] = Some(s)
override def mapString(f: String => String): Json = JString(f(s))
}
private[circe] final case class JArray(a: Seq[Json]) extends Json {
override def isArray: Boolean = true
override def asArray: Option[List[Json]] = Some(a.toList)
override def mapArray(f: List[Json] => List[Json]): Json = JArray(f(a.toList))
}
private[circe] final case class JObject(o: JsonObject) extends Json {
override def isObject: Boolean = true
override def asObject: Option[JsonObject] = Some(o)
override def mapObject(f: JsonObject => JsonObject): Json = JObject(f(o))
}
def empty: Json = Empty
val Empty: Json = JNull
val True: Json = JBoolean(true)
val False: Json = JBoolean(false)
def bool(b: Boolean): Json = JBoolean(b)
def int(n: Int): Json = JNumber(JsonLong(n.toLong))
def long(n: Long): Json = JNumber(JsonLong(n))
def number(n: Double): Option[Json] = JsonDouble(n).asJson
def bigDecimal(n: BigDecimal): Json = JNumber(JsonBigDecimal(n))
def numberOrNull(n: Double): Json = JsonDouble(n).asJsonOrNull
def numberOrString(n: Double): Json = JsonDouble(n).asJsonOrString
def string(s: String): Json = JString(s)
def array(elements: Json*): Json = JArray(elements)
def obj(fields: (String, Json)*): Json = JObject(JsonObject.from(fields.toList))
def fromJsonNumber(num: JsonNumber): Json = JNumber(num)
def fromJsonObject(obj: JsonObject): Json = JObject(obj)
def fromFields(fields: Seq[(String, Json)]): Json = JObject(JsonObject.from(fields.toList))
def fromValues(values: Seq[Json]): Json = JArray(values)
implicit val eqJson: Eq[Json] = Eq.instance(_ === _)
implicit val showJson: Show[Json] = Show.fromToString[Json]
}
|
ktoso/circe
|
core/shared/src/main/scala/io/circe/Json.scala
|
Scala
|
apache-2.0
| 7,136 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils
import kafka.manager.{TopicPartitionIdentity, TopicIdentity}
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.KeeperException.NodeExistsException
import org.slf4j.LoggerFactory
import scala.util.Try
/**
* Borrowed from kafka 0.8.1.1, adapted to use curator framework
* https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=blob;f=core/src/main/scala/kafka/admin/ReassignPartitionsCommand.scala
*/
object ReassignPartitionCommand {
import ReassignPartitionErrors._
private[this] val logger = LoggerFactory.getLogger(this.getClass)
def generateAssignment(brokerList: Seq[Int], currentTopicIdentity : TopicIdentity) : Try[TopicIdentity] = {
Try {
val assignedReplicas = AdminUtils.assignReplicasToBrokers(
brokerList,
currentTopicIdentity.partitions,
currentTopicIdentity.replicationFactor)
val newTpi : Map[Int, TopicPartitionIdentity] = currentTopicIdentity.partitionsIdentity.map { case (part, tpi) =>
val newReplicaSet = assignedReplicas.get(part)
checkCondition(newReplicaSet.isDefined, MissingReplicaSetForPartition(part))
(part,tpi.copy(replicas = newReplicaSet.get.toSet))
}
logger.info(s"Generated topic replica assignment topic=${currentTopicIdentity.topic}, $newTpi")
TopicIdentity(currentTopicIdentity.topic,currentTopicIdentity.partitions,newTpi,currentTopicIdentity.numBrokers)
}
}
def validateAssignment(current: TopicIdentity, generated: TopicIdentity) : Unit = {
//perform validation
checkCondition(generated.partitionsIdentity.nonEmpty, ReassignmentDataEmptyForTopic(current.topic))
checkCondition(current.partitions == generated.partitions, PartitionsOutOfSync(current.partitions,generated.partitions))
checkCondition(current.replicationFactor == generated.replicationFactor, ReplicationOutOfSync(current.replicationFactor, generated.replicationFactor))
}
def getValidAssignments(currentTopicIdentity: Map[String, TopicIdentity],
generatedTopicIdentity: Map[String, TopicIdentity]) : Try[Map[TopicAndPartition, Seq[Int]]] = {
Try {
currentTopicIdentity.flatMap { case (topic, current) =>
generatedTopicIdentity.get(topic).fold {
logger.info(s"No generated assignment found for topic=$topic, skipping")
Map.empty[TopicAndPartition, Seq[Int]]
} { generated =>
validateAssignment(current, generated)
for {
//match up partitions from current to generated
(currentPart, currentTpi) <- current.partitionsIdentity
generatedTpi <- generated.partitionsIdentity.get(currentPart)
} yield {
logger.info("Reassigning replicas for topic=%s, partition=%s, current=%s, generated=%s"
.format(topic, currentPart, current.partitionsIdentity, generated.partitionsIdentity))
(TopicAndPartition(topic, currentPart), generatedTpi.replicas.toSeq)
}
}
}
}
}
def executeAssignment(curator: CuratorFramework,
currentTopicIdentity: Map[String, TopicIdentity],
generatedTopicIdentity: Map[String, TopicIdentity]) : Try[Unit] = {
getValidAssignments(currentTopicIdentity, generatedTopicIdentity).flatMap {
validAssignments =>
Try {
checkCondition(validAssignments.nonEmpty, NoValidAssignments)
val jsonReassignmentData = ZkUtils.getPartitionReassignmentZkData(validAssignments)
try {
logger.info(s"Creating reassign partitions path ${ZkUtils.ReassignPartitionsPath} : $jsonReassignmentData")
//validate parsing of generated json
parsePartitionReassignmentZkData(jsonReassignmentData)
ZkUtils.createPersistentPath(curator, ZkUtils.ReassignPartitionsPath, jsonReassignmentData)
} catch {
case ze: NodeExistsException =>
throwError(AlreadyInProgress)
case e: Throwable =>
throwError(FailedToReassignPartitionReplicas(e))
}
}
}
}
def parsePartitionReassignmentZkData(json : String) : Map[TopicAndPartition, Seq[Int]] = {
import org.json4s.JsonAST._
parseJson(json).findField(_._1 == "partitions") match {
case Some((_, arr)) =>
val result : List[(TopicAndPartition, Seq[Int])] = for {
JArray(elements) <- arr
JObject(children) <- elements
JField("topic", JString(t)) <- children
JField("partition", JInt(i)) <- children
JField("replicas", arr2) <- children
JArray(assignments) <- arr2
} yield (TopicAndPartition(t,i.toInt),assignments.map(_.extract[Int]))
checkCondition(result.nonEmpty, NoValidAssignments)
result.foreach { case (tAndP, a) =>
checkCondition(a.nonEmpty, ReassignmentDataEmptyForTopic(tAndP.topic))
}
result.toMap
case None =>
throwError(NoValidAssignments)
}
}
}
object ReassignPartitionErrors {
class MissingReplicaSetForPartition private[ReassignPartitionErrors](part: Int) extends UtilError(s"Failed to find new replica set for partition $part")
class ReassignmentDataEmptyForTopic private[ReassignPartitionErrors](topic: String) extends UtilError(s"Partition reassignment data is empty for topic $topic")
class PartitionsOutOfSync private[ReassignPartitionErrors](current: Int, generated: Int) extends UtilError(
"Current partitions and generated partition replicas are out of sync current=%s, generated=%s , please regenerate"
.format(current, generated))
class ReplicationOutOfSync private[ReassignPartitionErrors](current: Int, generated: Int) extends UtilError(
"Current replication factor and generated replication factor for replicas are out of sync current=%s, generated=%s , please regenerate"
.format(current, generated))
class NoValidAssignments private[ReassignPartitionErrors] extends UtilError("Cannot reassign partitions with no valid assignments!")
class ReassignmentAlreadyInProgress private[ReassignPartitionErrors] extends UtilError("Partition reassignment currently in " +
"progress for. Aborting operation")
class FailedToReassignPartitionReplicas private[ReassignPartitionErrors] (t: Throwable) extends UtilError(
s"Failed to reassign partition replicas ${t.getStackTrace.mkString("[","\\n","]")}")
def MissingReplicaSetForPartition(part: Int) = new MissingReplicaSetForPartition(part)
def ReassignmentDataEmptyForTopic(topic: String) = new ReassignmentDataEmptyForTopic(topic)
def PartitionsOutOfSync(current: Int, generated: Int) = new PartitionsOutOfSync(current,generated)
def ReplicationOutOfSync(current: Int, generated: Int) = new ReplicationOutOfSync(current,generated)
val NoValidAssignments = new NoValidAssignments
val AlreadyInProgress = new ReassignmentAlreadyInProgress
def FailedToReassignPartitionReplicas(t: Throwable) = new FailedToReassignPartitionReplicas(t)
}
|
patricklucas/kafka-manager
|
app/kafka/manager/utils/ReassignPartitionCommand.scala
|
Scala
|
apache-2.0
| 7,847 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import SharedHelpers.{thisLineNumber, createTempDirectory}
import enablers.Readability
import Matchers._
import exceptions.TestFailedException
class ShouldBeReadableLogicalOrImplicitSpec extends Spec {
val fileName: String = "ShouldBeReadableLogicalOrImplicitSpec.scala"
def wasEqualTo(left: Any, right: Any): String =
FailureMessages.wasEqualTo(left, right)
def wasNotEqualTo(left: Any, right: Any): String =
FailureMessages.wasNotEqualTo(left, right)
def equaled(left: Any, right: Any): String =
FailureMessages.equaled(left, right)
def didNotEqual(left: Any, right: Any): String =
FailureMessages.didNotEqual(left, right)
def wasNotReadable(left: Any): String =
FailureMessages.wasNotReadable(left)
def wasReadable(left: Any): String =
FailureMessages.wasReadable(left)
def allError(message: String, lineNumber: Int, left: Any): String = {
val messageWithIndex = UnquotedString(" " + FailureMessages.forAssertionsGenTraversableMessageWithStackDepth(0, UnquotedString(message), UnquotedString(fileName + ":" + lineNumber)))
FailureMessages.allShorthandFailed(messageWithIndex, left)
}
trait Thing {
def canRead: Boolean
}
val book = new Thing {
val canRead = true
}
val stone = new Thing {
val canRead = false
}
implicit def readabilityOfThing[T <: Thing]: Readability[T] =
new Readability[T] {
def isReadable(thing: T): Boolean = thing.canRead
}
object `Sorted matcher` {
object `when work with 'file should be (readable)'` {
def `should do nothing when file is readable` {
book should (be (readable) or be_== (book))
stone should (be (readable) or be_== (stone))
book should (be (readable) or be_== (stone))
book should (be_== (book) or be (readable))
book should (be_== (stone) or be (readable))
stone should (be_== (stone) or be (readable))
book should (be (readable) or equal (book))
stone should (be (readable) or equal (stone))
book should (be (readable) or equal (stone))
book should (equal (book) or be (readable))
book should (equal (stone) or be (readable))
stone should (equal (stone) or be (readable))
}
def `should throw TestFailedException with correct stack depth when file is not readable` {
val caught1 = intercept[TestFailedException] {
stone should (be (readable) or be_== (book))
}
assert(caught1.message === Some(wasNotReadable(stone) + ", and " + wasNotEqualTo(stone, book)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
stone should (be_== (book) or be (readable))
}
assert(caught2.message === Some(wasNotEqualTo(stone, book) + ", and " + wasNotReadable(stone)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
stone should (be (readable) or equal (book))
}
assert(caught3.message === Some(wasNotReadable(stone) + ", and " + didNotEqual(stone, book)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
stone should (equal (book) or be (readable))
}
assert(caught4.message === Some(didNotEqual(stone, book) + ", and " + wasNotReadable(stone)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'file should not be readable'` {
def `should do nothing when file is not readable` {
stone should (not be readable or not be_== book)
book should (not be readable or not be_== stone)
stone should (not be readable or not be_== stone)
stone should (not be_== book or not be readable)
stone should (not be_== stone or not be readable)
book should (not be_== stone or not be readable)
stone should (not be readable or not equal book)
book should (not be readable or not equal stone)
stone should (not be readable or not equal stone)
stone should (not equal book or not be readable)
stone should (not equal stone or not be readable)
book should (not equal stone or not be readable)
}
def `should throw TestFailedException with correct stack depth when file is readable` {
val caught1 = intercept[TestFailedException] {
book should (not be readable or not be_== book)
}
assert(caught1.message === Some(wasReadable(book) + ", and " + wasEqualTo(book, book)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught2 = intercept[TestFailedException] {
book should (not be_== book or not be readable)
}
assert(caught2.message === Some(wasEqualTo(book, book) + ", and " + wasReadable(book)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught3 = intercept[TestFailedException] {
book should (not be readable or not equal book)
}
assert(caught3.message === Some(wasReadable(book) + ", and " + equaled(book, book)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val caught4 = intercept[TestFailedException] {
book should (not equal book or not be readable)
}
assert(caught4.message === Some(equaled(book, book) + ", and " + wasReadable(book)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should be (readable)'` {
def `should do nothing when all(xs) is readable` {
all(List(book)) should (be (readable) or be_== (book))
all(List(stone)) should (be (readable) or be_== (stone))
all(List(book)) should (be (readable) or be_== (stone))
all(List(book)) should (be_== (book) or be (readable))
all(List(book)) should (be_== (stone) or be (readable))
all(List(stone)) should (be_== (stone) or be (readable))
all(List(book)) should (be (readable) or equal (book))
all(List(stone)) should (be (readable) or equal (stone))
all(List(book)) should (be (readable) or equal (stone))
all(List(book)) should (equal (book) or be (readable))
all(List(book)) should (equal (stone) or be (readable))
all(List(stone)) should (equal (stone) or be (readable))
}
def `should throw TestFailedException with correct stack depth when xs is not sorted` {
val left1 = List(stone)
val caught1 = intercept[TestFailedException] {
all(left1) should (be_== (book) or be (readable))
}
assert(caught1.message === Some(allError(wasNotEqualTo(stone, book) + ", and " + wasNotReadable(stone), thisLineNumber - 2, left1)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(stone)
val caught2 = intercept[TestFailedException] {
all(left2) should (be (readable) or be_== (book))
}
assert(caught2.message === Some(allError(wasNotReadable(stone) + ", and " + wasNotEqualTo(stone, book), thisLineNumber - 2, left2)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(stone)
val caught3 = intercept[TestFailedException] {
all(left3) should (equal (book) or be (readable))
}
assert(caught3.message === Some(allError(didNotEqual(stone, book) + ", and " + wasNotReadable(stone), thisLineNumber - 2, left3)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(stone)
val caught4 = intercept[TestFailedException] {
all(left4) should (be (readable) or equal (book))
}
assert(caught4.message === Some(allError(wasNotReadable(stone) + ", and " + didNotEqual(stone, book), thisLineNumber - 2, left4)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `when work with 'all(xs) should not be sorted'` {
def `should do nothing when xs is not sorted` {
all(List(stone)) should (not be readable or not be_== book)
all(List(book)) should (not be readable or not be_== stone)
all(List(stone)) should (not be readable or not be_== stone)
all(List(stone)) should (not be_== book or not be readable)
all(List(stone)) should (not be_== stone or not be readable)
all(List(book)) should (not be_== stone or not be readable)
all(List(stone)) should (not be readable or not equal book)
all(List(book)) should (not be readable or not equal stone)
all(List(stone)) should (not be readable or not equal stone)
all(List(stone)) should (not equal book or not be readable)
all(List(stone)) should (not equal stone or not be readable)
all(List(book)) should (not equal stone or not be readable)
}
def `should throw TestFailedException with correct stack depth when xs is not sorted` {
val left1 = List(book)
val caught1 = intercept[TestFailedException] {
all(left1) should (not be_== book or not be readable)
}
assert(caught1.message === Some(allError(wasEqualTo(book, book) + ", and " + wasReadable(book), thisLineNumber - 2, left1)))
assert(caught1.failedCodeFileName === Some(fileName))
assert(caught1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(book)
val caught2 = intercept[TestFailedException] {
all(left2) should (not be readable or not be_== book)
}
assert(caught2.message === Some(allError(wasReadable(book) + ", and " + wasEqualTo(book, book), thisLineNumber - 2, left2)))
assert(caught2.failedCodeFileName === Some(fileName))
assert(caught2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(book)
val caught3 = intercept[TestFailedException] {
all(left3) should (not equal book or not be readable)
}
assert(caught3.message === Some(allError(equaled(book, book) + ", and " + wasReadable(book), thisLineNumber - 2, left3)))
assert(caught3.failedCodeFileName === Some(fileName))
assert(caught3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(book)
val caught4 = intercept[TestFailedException] {
all(left4) should (not be readable or not equal book)
}
assert(caught4.message === Some(allError(wasReadable(book) + ", and " + equaled(book, book), thisLineNumber - 2, left4)))
assert(caught4.failedCodeFileName === Some(fileName))
assert(caught4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
}
}
|
SRGOM/scalatest
|
scalatest-test/src/test/scala/org/scalatest/ShouldBeReadableLogicalOrImplicitSpec.scala
|
Scala
|
apache-2.0
| 12,584 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.json4s
import java.util.Locale.ENGLISH
import java.io.StringWriter
import collection.immutable
object JsonAST {
/**
* Concatenates a sequence of <code>JValue</code>s.
* <p>
* Example:<pre>
* concat(JInt(1), JInt(2)) == JArray(List(JInt(1), JInt(2)))
* </pre>
*/
def concat(xs: JValue*) = xs.foldLeft(JNothing: JValue)(_ ++ _)
// sealed abstract class JsValue[+T] extends immutable.Seq[T] {
// def values: T
// }
// private trait SingleElementJsValue[+T] extends JsValue[T] {
// def length: Int = 1
// def apply(idx: Int): T = if (idx == 0) values else throw new IndexOutOfBoundsException("A JsString only has 1 element")
// def iterator: Iterator[T] = Iterator(values)
// override def isEmpty: Boolean = false
// override def head = values
// override def tail: immutable.Seq[T] = Nil
// override protected[this] def reversed: List[T] = List(values)
// override def nonEmpty: Boolean = true
// }
//
// class JsString(val values: String) extends SingleElementJsValue[String]
// class JsInt(val values: BigInt) extends SingleElementJsValue[BigInt]
// class JsDecimal(val values: BigDecimal) extends SingleElementJsValue[BigDecimal]
// class JsBool(val values: Boolean) extends SingleElementJsValue[Boolean]
// object JsNull extends SingleElementJsValue[Null] { val values: Null = null }
//
// class JsObject(val values: Seq[(String, JsValue[_])]) extends JsValue[(String, JsValue[_])] {
// def length: Int = values.length
// def apply(idx: Int): (String, JsValue[_]) = values(idx)
// def iterator: Iterator[(String, JsValue[_])] = values.iterator
// }
// class JsArray(val values: Seq[JsValue[_]]) extends JsValue[JsValue[_]] {
// def length: Int = values.length
// def apply(idx: Int): JsValue[_] = values.apply(idx)
// def iterator: Iterator[JsValue[_]] = values.iterator
// }
// object JsNothing extends JsValue[Nothing] {
// val values: Nothing = null.asInstanceOf[Nothing]
// val length: Int = 0
// def apply(idx: Int): Nothing = throw new IndexOutOfBoundsException("A JsNothing is empty")
// def iterator: Iterator[Nothing] = Iterator()
//
// override def isEmpty = true
// override def head: Nothing =
// throw new NoSuchElementException("head of JsNothing")
// override def tail: List[Nothing] =
// throw new UnsupportedOperationException("tail of JsNothing")
// // Removal of equals method here might lead to an infinite recursion similar to IntMap.equals.
// override def equals(that: Any) = that match {
// case that1: JsValue[_] => that1.isEmpty
// case _ => false
// }
// }
object JValue extends Merge.Mergeable
/**
* Data type for JSON AST.
*/
sealed abstract class JValue extends Diff.Diffable with Product with Serializable {
type Values
/**
* Return unboxed values from JSON
* <p>
* Example:<pre>
* JObject(JField("name", JString("joe")) :: Nil).values == Map("name" -> "joe")
* </pre>
*/
def values: Values
/**
* Return direct child elements.
* <p>
* Example:<pre>
* JArray(JInt(1) :: JInt(2) :: Nil).children == List(JInt(1), JInt(2))
* </pre>
*/
def children: List[JValue] = this match {
case JObject(l) β l map (_._2)
case JArray(l) β l
case _ β Nil
}
/**
* Return nth element from JSON.
* Meaningful only to JArray, JObject and JField. Returns JNothing for other types.
* <p>
* Example:<pre>
* JArray(JInt(1) :: JInt(2) :: Nil)(1) == JInt(2)
* </pre>
*/
def apply(i: Int): JValue = JNothing
/**
* Concatenate with another JSON.
* This is a concatenation monoid: (JValue, ++, JNothing)
* <p>
* Example:<pre>
* JArray(JInt(1) :: JInt(2) :: Nil) ++ JArray(JInt(3) :: Nil) ==
* JArray(List(JInt(1), JInt(2), JInt(3)))
* </pre>
*/
def ++(other: JValue) = {
def append(value1: JValue, value2: JValue): JValue = (value1, value2) match {
case (JNothing, x) β x
case (x, JNothing) β x
case (JArray(xs), JArray(ys)) β JArray(xs ::: ys)
case (JArray(xs), v: JValue) β JArray(xs ::: List(v))
case (v: JValue, JArray(xs)) β JArray(v :: xs)
case (x, y) β JArray(x :: y :: Nil)
}
append(this, other)
}
/**
* When this [[org.json4s.JsonAST.JValue]] is a [[org.json4s.JsonAST.JNothing]] or a [[org.json4s.JsonAST.JNull]], this method returns [[scala.None]]
* When it has a value it will return [[scala.Some]]
*/
@deprecated("Use toOption instead", "3.1.0")
def toOpt: Option[JValue] = toOption
/**
* When this [[org.json4s.JsonAST.JValue]] is a [[org.json4s.JsonAST.JNothing]] or a [[org.json4s.JsonAST.JNull]], this method returns [[scala.None]]
* When it has a value it will return [[scala.Some]]
*/
def toOption: Option[JValue] = this match {
case JNothing | JNull β None
case json β Some(json)
}
/**
* When this [[org.json4s.JsonAST.JValue]] is a [[org.json4s.JsonAST.JNothing]], this method returns [[scala.None]]
* When it has a value it will return [[scala.Some]]
*/
def toSome: Option[JValue] = this match {
case JNothing => None
case json => Some(json)
}
}
case object JNothing extends JValue {
type Values = None.type
def values = None
}
case object JNull extends JValue {
type Values = Null
def values = null
}
case class JString(s: String) extends JValue {
type Values = String
def values = s
}
trait JNumber
case class JDouble(num: Double) extends JValue with JNumber {
type Values = Double
def values = num
}
case class JDecimal(num: BigDecimal) extends JValue with JNumber {
type Values = BigDecimal
def values = num
}
case class JLong(num: Long) extends JValue with JNumber {
type Values = Long
def values = num
}
case class JInt(num: BigInt) extends JValue with JNumber {
type Values = BigInt
def values = num
}
case class JBool(value: Boolean) extends JValue {
type Values = Boolean
def values = value
}
object JBool {
val True = JBool(true)
val False = JBool(false)
}
case class JObject(obj: List[JField]) extends JValue {
type Values = Map[String, Any]
def values = obj.map { case (n, v) β (n, v.values) } toMap
override def equals(that: Any): Boolean = that match {
case o: JObject β obj.toSet == o.obj.toSet
case _ β false
}
override def hashCode = obj.toSet[JField].hashCode
}
case object JObject {
def apply(fs: JField*): JObject = JObject(fs.toList)
}
case class JArray(arr: List[JValue]) extends JValue {
type Values = List[Any]
def values = arr.map(_.values)
override def apply(i: Int): JValue = arr(i)
}
type JField = (String, JValue)
object JField {
def apply(name: String, value: JValue) = (name, value)
def unapply(f: JField): Option[(String, JValue)] = Some(f)
}
}
|
tmatvienko/json4s
|
ast/src/main/scala/org/json4s/JsonAST.scala
|
Scala
|
apache-2.0
| 7,682 |
package info.glennengstrand.news.model
case class Participant(
id: Option[Long],
name: Option[String],
link: Option[String]) {
def isValid: Boolean = {
!name.isEmpty
}
}
|
gengstrand/clojure-news-feed
|
server/feed12/app/info/glennengstrand/news/model/Participant.scala
|
Scala
|
epl-1.0
| 185 |
/*******************************************************************************
Copyright (c) 2013, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.analysis.typing.models.jquery
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolTrue=>T, BoolFalse=>F, _}
import kr.ac.kaist.jsaf.analysis.typing.Helper
import kr.ac.kaist.jsaf.analysis.typing.models.{JQueryModel, DOMHelper}
import kr.ac.kaist.jsaf.analysis.typing.models.DOMCore.{DOMNodeList, DOMElement}
import kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml.{HTMLDocument, HTMLTopElement}
import kr.ac.kaist.jsaf.analysis.cfg.CFGExpr
object JQueryHelper {
def NewJQueryObject(): Obj =
Helper.NewObject(JQuery.ProtoLoc)
.update("length", PropValue(ObjectValue(AbsNumber.alpha(0), T, T, T)))
def NewJQueryObject(len : Double): Obj =
Helper.NewObject(JQuery.ProtoLoc)
.update("length", PropValue(ObjectValue(AbsNumber.alpha(len), T, T, T)))
def NewJQueryObject(n_len : AbsNumber): Obj =
Helper.NewObject(JQuery.ProtoLoc)
.update("length", PropValue(ObjectValue(n_len, T, T, T)))
def MakeArray(h: Heap, arr: Value): Obj = {
MakeArray(h, arr, Helper.NewArrayObject(AbsNumber.alpha(0)))
}
def MakeArray(h: Heap, v_arr: Value, o_results: Obj): Obj = {
val n_len = o_results("length")._1._1._1._1._4 // number
AbsNumber.concretize(n_len) match {
case Some(n) =>
val o_1 =
if (v_arr._2.isEmpty)
o_results.
update(n.toString, PropValue(ObjectValue(v_arr._1, T, T, T))).
update("length", PropValue(ObjectValue(AbsNumber.alpha(n+1), T, T, T)))
else
ObjEmpty
val o_2 =
if (!v_arr._2.isEmpty) {
v_arr._2.foldLeft(o_results)((_o, l) => {
val n_arrlen = Helper.Proto(h, l, AbsString.alpha("length"))._1._4
AbsNumber.concretize(n_arrlen) match {
case Some(n_arr) =>
val oo = (0 until n_arr.toInt).foldLeft(_o)((_o1, i) =>
_o1.update((n+i).toString,
PropValue(ObjectValue(Helper.Proto(h,l,AbsString.alpha(i.toString)), T, T, T))))
oo.update("length", PropValue(ObjectValue(AbsNumber.alpha(n+n_arr), T, T, T)))
case None =>
if (n_arrlen <= NumBot)
_o
else
_o.update(NumStr, PropValue(ObjectValue(Helper.Proto(h,l,NumStr), T, T, T)))
}
})
}
else ObjEmpty
o_1 + o_2
case None =>
if (n_len <= NumBot)
ObjBot
else {
val o_1 =
if (v_arr._2.isEmpty)
o_results.update(Helper.toString(PValue(n_len)), PropValue(ObjectValue(v_arr._1, T, T, T)))
else
ObjEmpty
val o_2 =
if (!v_arr._2.isEmpty) {
v_arr._2.foldLeft(o_results)((_o, l) =>
_o.update(NumStr, PropValue(ObjectValue(Helper.Proto(h,l,NumStr), T, T, T))))
}
else ObjEmpty
o_1 + o_2
}
}
}
def addJQueryEvent(h: Heap, v_elem: Value, s_types: AbsString, v_handler: Value, v_data: Value, v_selector: Value) = {
val fun_table = h(EventFunctionTableLoc)
val target_table = h(EventTargetTableLoc)
val selector_table = h(EventSelectorTableLoc)
val propv_fun = PropValue(v_handler)
val propv_target = PropValue(v_elem)
val propv_selector = PropValue(v_selector)
val event_list = s_types match {
case StrTop | OtherStr => JQueryModel.aysnc_calls
case OtherStrSingle(s_ev) =>
if (DOMHelper.isLoadEventAttribute(s_ev)) List("#LOAD")
else if (DOMHelper.isUnloadEventAttribute(s_ev)) List("#UNLOAD")
else if (DOMHelper.isKeyboardEventAttribute(s_ev) || DOMHelper.isKeyboardEventProperty(s_ev)) List("#KEYBOARD")
else if (DOMHelper.isMouseEventAttribute(s_ev) || DOMHelper.isMouseEventProperty(s_ev)) List("#MOUSE")
else if (DOMHelper.isOtherEventAttribute(s_ev) || DOMHelper.isOtherEventProperty(s_ev))List("#OTHER")
else if (DOMHelper.isReadyEventProperty(s_ev)) List("#READY")
else List()
case NumStrSingle(_) => /* Error ?*/ List()
case NumStr => /* Error ?*/ List()
case StrBot => List()
}
val o_fun = event_list.foldLeft(fun_table)((o, s_ev) =>
o.update(s_ev, o(s_ev)._1 + propv_fun)
)
val o_target = event_list.foldLeft(target_table)((o, s_ev) =>
o.update(s_ev, o(s_ev)._1 + propv_target)
)
val o_selector = event_list.foldLeft(selector_table)((o, s_ev) =>
o.update(s_ev, o(s_ev)._1 + propv_selector)
)
h.update(EventFunctionTableLoc, o_fun).update(EventTargetTableLoc, o_target).update(EventSelectorTableLoc, o_selector)
}
private val reg_quick = """^(?:[^#<]*(<[\\w\\W]+>)[^>]*$|#([\\w\\-]*)$)""".r
private val reg_id = """([\\w]+)""".r
def init(h: Heap, v_selector: Value, v_context: Value,
l_jq: Loc, l_tag: Loc, l_child: Loc): (Heap, Value) = {
//val h_start = h
//val ctx_start = ctx_3
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
// 1) Handle $(""), $(null), $(undefined), $(false)
val (h_ret1, v_ret1) =
if (UndefTop <= v_selector._1._1 || NullTop <= v_selector._1._2 ||
F <= v_selector._1._3 || AbsString.alpha("") <= v_selector._1._5) {
// empty jQuery object
(h.update(l_jq, NewJQueryObject), Value(l_jq))
}
else
(HeapBot, ValueBot)
// 2) Handle $(DOMElement)
val (h_ret2, v_ret2) =
if (!v_selector._2.isEmpty) {
v_selector._2.foldLeft((h, ValueBot))((hv, l) => {
val v_nodeType = Helper.Proto(h, l, AbsString.alpha("nodeType"))
if (T <= Helper.toBoolean(v_nodeType)) {
// jQuery object
val o_jq = NewJQueryObject(1)
.update("context", PropValue(ObjectValue(v_selector, T, T, T)))
.update("0", PropValue(ObjectValue(v_selector, T, T, T)))
val _h1 = h.update(l_jq, o_jq)
(hv._1 + _h1, hv._2 + Value(lset_this))
}
else
hv
})
}
else
(HeapBot, ValueBot)
// 3) Handle HTML strings
val (h_ret3, v_ret3) = v_selector._1._5 match {
case OtherStrSingle(s) =>
val matches = reg_quick.unapplySeq(s)
matches match {
case Some(mlist) =>
val tag_name = mlist(0)
// HANDLE: $(html) -> $(array)
// unsoud, support only tag name
if (tag_name != null) {
//val s_tag = tag_name.filter((c) => c != '<' && c != '>').toUpperCase
val s_tag = reg_id.findFirstIn(tag_name).get
// jQuery object
val o_jq =NewJQueryObject(UInt)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(Value(JQuery.RootJQLoc), T, T, T)))
.update("0", PropValue(ObjectValue(Value(l_tag), T, T, T)))
val _h1 = DOMHelper.addTag(h, s_tag, l_tag, l_child).update(l_jq, o_jq)
(_h1, Value(l_jq))
}
// HANDLE: $(#id)
else {
val s_id = mlist(1)
// getElementById
val lset_id = DOMHelper.findById(h, AbsString.alpha(s_id))
// jQuery object
val o_jq = NewJQueryObject(lset_id.size)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
val o_jq1 =
if (lset_id.isEmpty)
o_jq
else
o_jq.update("0", PropValue(ObjectValue(Value(lset_id), T, T, T)))
(h.update(l_jq, o_jq1), Value(l_jq))
}
case None =>
// HANDLE: $(expr, $(...))
// else if ( !context || context.jquery ) {
val (h1, v1) =
// if (v_context._1._1 </ UndefBot) {
if (v_context._2 == LocSetBot) {
// prev = rootjQuery
val lset_find = DOMHelper.querySelectorAll(h, s)
// jQuery object
val o_jq =NewJQueryObject(lset_find.size)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(Value(JQuery.RootJQLoc), T, T, T)))
val o_jq1 =
if (lset_find.isEmpty)
o_jq
else
o_jq.update(NumStr, PropValue(ObjectValue(Value(lset_find), T, T, T)))
(h.update(l_jq, o_jq1), Value(l_jq))
}
else {
// TODO : we should find elements using selector in the context
// prev = rootjQuery
val lset_find = DOMHelper.querySelectorAll(h, s)
// jQuery object
val o_jq =NewJQueryObject(lset_find.size)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(Value(JQuery.RootJQLoc), T, T, T)))
val o_jq1 =
if (lset_find.isEmpty)
o_jq
else
o_jq.update(NumStr, PropValue(ObjectValue(Value(lset_find), T, T, T)))
(h.update(l_jq, o_jq1), Value(l_jq))
}
// (HeapBot, ValueBot)
val v_jquery = v_context._2.foldLeft(ValueBot)((v,l) =>
v + Helper.Proto(h, l, AbsString.alpha("jquery"))
)
val (h2, v2) =
if (UndefTop <= v_context._1._1 && v_jquery._1._1 </ UndefBot) {
// prev = context
val lset_context = v_context._2.foldLeft(LocSetBot)((lset, l) => lset ++ h(l)(NumStr)._1._1._1._2)
val lset_find = lset_context.foldLeft(LocSetBot)((lset, l) => lset ++ DOMHelper.querySelectorAll(h, s))
// jQuery object
val o_jq = NewJQueryObject(lset_find.size)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(v_context, T, T, T)))
val o_jq1 =
if (lset_find.isEmpty)
o_jq
else
o_jq.update(NumStr, PropValue(ObjectValue(Value(lset_find), T, T, T)))
(h.update(l_jq, o_jq1), Value(l_jq))
}
else
(HeapBot, ValueBot)
// TODO: HANDLE: $(expr, context)
// (which is just equivalent to: $(context).find(expr)
// return this.constructor( context ).find( selector );
(h1 + h2, v1 + v2)
}
case OtherStr | StrTop =>
// top element
val _h1 = DOMHelper.addTagTop(h, l_tag, l_jq)
// jQuery object
val o_jq = NewJQueryObject(UInt)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(Value(JQuery.RootJQLoc) + Value(l_jq), T, T, T)))
.update(NumStr, PropValue(ObjectValue(Value(l_tag) + Value(HTMLTopElement.getInsLoc(_h1)), T, T, T)))
(_h1.update(l_jq, o_jq), Value(l_jq))
case NumStrSingle(_) | NumStr =>
// jQuery object
val o = NewJQueryObject(0)
.update("selector", PropValue(ObjectValue(v_selector, T, T, T)))
.update("prevObject", PropValue(ObjectValue(Value(JQuery.RootJQLoc) + Value(l_jq), T, T, T)))
.update("context", PropValue(ObjectValue(HTMLDocument.GlobalDocumentLoc, T, T, T)))
(h.update(l_jq, o), Value(l_jq))
case StrBot =>
(HeapBot, ValueBot)
}
// 4) HANDLE: $(function), Shortcut for document ready event
val lset_f = v_selector._2.filter(l => T <= Helper.IsCallable(h, l))
val (h_ret4, v_ret4) =
if (!lset_f.isEmpty) {
val h1 = addJQueryEvent(h, Value(HTMLDocument.GlobalDocumentLoc),
AbsString.alpha("DOMContentLoaded"), Value(lset_f), ValueBot, ValueBot)
(h1, Value(JQuery.RootJQLoc))
}
else
(HeapBot, ValueBot)
// Handle: else
val (h_ret5, v_ret5) = v_selector._2.foldLeft((HeapBot, ValueBot))((hv, l) => {
// jquery object
val o_1 =
if (Helper.Proto(h, l, AbsString.alpha("selector"))._1._1 </ UndefBot) {
NewJQueryObject().
update("selector", PropValue(ObjectValue(Helper.Proto(h, l, AbsString.alpha("selector")), T, T, T))).
update("context", PropValue(ObjectValue(Helper.Proto(h, l, AbsString.alpha("context")), T, T, T)))
}
else
NewJQueryObject()
// make array
val o_2 = MakeArray(h, v_selector, o_1)
val _h1 = h.update(l_jq, o_2)
(hv._1 + _h1, hv._2 + Value(l_jq))
})
val h_ret = h_ret1 + h_ret2 + h_ret3 + h_ret4 + h_ret5
val v_ret = v_ret1 + v_ret2 + v_ret3 + v_ret4 + v_ret5
(h_ret, v_ret)
}
def extend(h: Heap, args: List[Value]): (Heap, Value) = {
val len = args.length
if (len <= 0) {
(HeapBot, ValueBot)
}
else if (len == 1) {
// target = this
val lset_this = h(SinglePureLocalLoc)("@this")._1._2._2
val lset_arg1 = args(0)._2
val h_ret = lset_this.foldLeft(h)((h1, l1) =>
lset_arg1.foldLeft(h1)((h2, l2) => {
val props = h2(l2).getProps
val h2_1 = props.foldLeft(h2)((h3, p) =>
Helper.PropStore(h3, l1, AbsString.alpha(p), Helper.Proto(h3, l2, AbsString.alpha(p)))
)
val o_arg1 = h2_1(l2)
val o_target = h2_1(l1)
val o_target_new = o_target
.update("@default_number", o_arg1("@default_number")._1 + o_target("@default_number")._1, AbsentTop)
.update("@default_other", o_arg1("@default_other")._1 + o_target("@default_other")._1, AbsentTop)
h2_1.update(l1, o_target_new)
})
)
(h_ret, Value(lset_this))
}
else {
val v_arg1 = args(0)
val (target, list_obj) =
if (v_arg1._1._3 </ BoolBot)
(args(1), args.tail.tail)
else
(v_arg1, args.tail)
val lset_target = target._2
val lset_obj = list_obj.foldLeft(LocSetBot)((lset, v) => lset ++ v._2)
val h_ret = lset_target.foldLeft(h)((h1, l1) =>
lset_obj.foldLeft(h1)((h2, l2) => {
val props = h2(l2).getProps
val h2_1 = props.foldLeft(h2)((h3, p) =>
Helper.PropStore(h3, l1, AbsString.alpha(p), Helper.Proto(h3, l2, AbsString.alpha(p)))
)
val o_arg1 = h2_1(l2)
val o_target = h2_1(l1)
val o_target_new = o_target
.update("@default_number", o_arg1("@default_number")._1 + o_target("@default_number")._1, AbsentTop)
.update("@default_other", o_arg1("@default_other")._1 + o_target("@default_other")._1, AbsentTop)
h2_1.update(l1, o_target_new)
})
)
(h_ret, Value(lset_target))
}
}
def pushStack(h: Heap, lset_prev: LocSet, lset_next: LocSet): Heap = {
val v_context = lset_prev.foldLeft(ValueBot)((v, l) => v+ Helper.Proto(h, l, AbsString.alpha("context")))
lset_next.foldLeft(h)((h1, l1) => {
val h1_1 = Helper.PropStore(h1, l1, AbsString.alpha("context"), v_context)
Helper.PropStore(h1_1, l1, AbsString.alpha("prevObject"), Value(lset_prev))
})
}
def isArraylike(h: Heap, l: Loc): AbsBool = {
val n_len = Helper.Proto(h, l, AbsString.alpha("length"))._1._4
val s_class = h(l)("@class")._1._2._1._5
val b1 =
if (n_len </ NumBot && AbsString.alpha("Function") </ s_class)
T
else
BoolBot
val b2 =
if (n_len <= NumBot || AbsString.alpha("Function") <= s_class)
F
else
BoolBot
b1 + b2
}
}
|
daejunpark/jsaf
|
src/kr/ac/kaist/jsaf/analysis/typing/models/jquery/JQueryHelper.scala
|
Scala
|
bsd-3-clause
| 16,205 |
package artisanal.pickle.maker
import models._
import parser._
import org.specs2._
import mutable._
import specification._
import scala.reflect.internal.pickling.ByteCodecs
import scala.tools.scalap.scalax.rules.scalasig._
import com.novus.salat.annotations.util._
import scala.reflect.ScalaSignature
class ListLongSpec extends mutable.Specification {
"a ScalaSig for case class MyRecord_ListLong(s: List[Long])" should {
"have the correct string" in {
val mySig = new artisanal.pickle.maker.ScalaSig(List("case class"), List("models", "MyRecord_ListLong"), List(("s", "List[Long]")))
val correctParsedSig = SigParserHelper.parseByteCodeFromAnnotation(classOf[MyRecord_ListLong]).map(ScalaSigAttributeParsers.parse(_)).get
val myParsedSig = SigParserHelper.parseByteCodeFromMySig(mySig).map(ScalaSigAttributeParsers.parse(_)).get
correctParsedSig.toString === myParsedSig.toString
}
}
}
|
julianpeeters/artisanal-pickle-maker
|
src/test/scala/singleValueMember/ListSpecs/List[Long]Spec.scala
|
Scala
|
apache-2.0
| 929 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.reactiveflows
import akka.actor.{
Actor,
ActorLogging,
ActorRef,
ActorSystem,
Props,
SupervisorStrategy,
Terminated
}
import akka.cluster.Cluster
import akka.cluster.ddata.DistributedData
import akka.cluster.pubsub.DistributedPubSub
import akka.stream.ActorMaterializer
object Main {
final class Root extends Actor with ActorLogging {
override val supervisorStrategy = SupervisorStrategy.stoppingStrategy
private implicit val mat = ActorMaterializer()
private val mediator = DistributedPubSub(context.system).mediator
private val flowShardRegion = {
val config = context.system.settings.config
val shardCount = config.getInt("reactive-flows.flow.shard-count")
val passivationTimeout = config.getDuration("reactive-flows.flow.passivation-timeout")
Flow.startSharding(context.system, mediator, shardCount, passivationTimeout)
}
private val flowFacade = {
val replicator = DistributedData(context.system).replicator
context.actorOf(FlowFacade(mediator, replicator, flowShardRegion), FlowFacade.Name)
}
private val api = {
val config = context.system.settings.config
val address = config.getString("reactive-flows.api.address")
val port = config.getInt("reactive-flows.api.port")
val timeout = config.getDuration("reactive-flows.api.flow-facade-timeout")
val bufferSize = config.getInt("reactive-flows.api.event-buffer-size")
val heartbeat = config.getDuration("reactive-flows.api.event-heartbeat")
context.actorOf(Api(address, port, flowFacade, timeout, mediator, bufferSize, heartbeat),
Api.Name)
}
context.watch(flowFacade)
context.watch(api)
log.info("{} up and running", context.system.name)
override def receive = {
case Terminated(actor) =>
log.error("Terminating the system because {} terminated!", actor.path)
context.system.terminate()
}
}
// Needed to terminate the actor system on initialization errors of root, e.g. missing configuration settings!
final class Terminator(root: ActorRef) extends Actor with ActorLogging {
context.watch(root)
override def receive = {
case Terminated(`root`) =>
log.error("Terminating the system because root terminated!")
context.system.terminate()
}
}
def main(args: Array[String]): Unit = {
val system = ActorSystem("reactive-flows")
Cluster(system).registerOnMemberUp {
val root = system.actorOf(Props(new Root), "root")
system.actorOf(Props(new Terminator(root)), "terminator")
}
}
}
|
hseeberger/reactive-flows
|
src/main/scala/de/heikoseeberger/reactiveflows/Main.scala
|
Scala
|
apache-2.0
| 3,271 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.core
import akka.actor._
import akka.event.LoggingReceive.withLabel
import org.ensime.api._
import org.ensime.config.richconfig._
import org.ensime.util.FileUtils.toSourceFileInfo
import org.ensime.util.file._
class AnalyzerManager(
broadcaster: ActorRef,
analyzerCreator: List[EnsimeProjectId] => Props,
implicit val config: EnsimeConfig
) extends Actor with ActorLogging with Stash {
private val sauron = context.actorOf(analyzerCreator(config.projects.map(_.id)))
// maps the active modules to their analyzers
private var analyzers: Map[EnsimeProjectId, ActorRef] = Map.empty
private def getOrSpawnNew(optionalId: Option[EnsimeProjectId]): ActorRef =
optionalId match {
case Some(id) =>
analyzers.get(id) match {
case Some(analyzer) =>
analyzer
case None =>
val name = s"${id.project}_${id.config}"
val newAnalyzer = context.actorOf(analyzerCreator(id :: Nil), name)
analyzers += (id -> newAnalyzer)
newAnalyzer
}
case None =>
sauron
}
override def preStart(): Unit = {
// for legacy clients on startup
broadcaster ! Broadcaster.Persist(AnalyzerReadyEvent)
broadcaster ! Broadcaster.Persist(FullTypeCheckCompleteEvent)
}
override def receive: Receive = ready
private def ready: Receive = withLabel("ready") {
case req @ RestartScalaCompilerReq(id, _) =>
if (analyzers.isEmpty)
broadcaster ! AnalyzerReadyEvent
else
id match {
case Some(projectId) =>
analyzers.get(projectId).foreach(_ forward req)
case None =>
analyzers.values foreach (_ forward req)
}
case req @ UnloadAllReq =>
analyzers.foreach {
case (_, analyzer) => analyzer forward req
}
case req @ TypecheckModule(moduleId) =>
getOrSpawnNew(Some(moduleId)) forward req
case req @ RemoveFileReq(file: File) =>
val fileInfo = SourceFileInfo(RawFile(file.toPath), None, None)
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ TypecheckFileReq(fileInfo) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ TypecheckFilesReq(files) =>
val original = sender
val filesPerProject = files.groupBy(config.findProject(_))
context.actorOf(Props(new Actor {
private var remaining = filesPerProject.size
private var aggregate: List[String] = List.empty
override def preStart: Unit =
for ((optionalModuleId, list) <- filesPerProject)
getOrSpawnNew(optionalModuleId) ! TypecheckFilesReq(list)
override def receive = {
case res: RpcResponse if remaining > 1 =>
aggregate = addResponse(res, aggregate)
remaining -= 1
case res: RpcResponse =>
aggregate = addResponse(res, aggregate)
original ! combine(aggregate)
context.stop(self)
}
def addResponse(res: RpcResponse, agg: List[String]) = res match {
case EnsimeServerError(desc) =>
desc :: aggregate
case _ =>
aggregate
}
def combine(errors: List[String]): RpcResponse =
if (aggregate.isEmpty) // had no errors; return a VoidResponse
VoidResponse
else // return the cumulative error
EnsimeServerError(aggregate mkString ", ")
}))
case req @ RefactorReq(_, _, _) =>
val original = sender
context.actorOf(Props(new Actor {
override def preStart(): Unit = {
context.actorOf(analyzerCreator(config.projects.map(_.id))) ! req
}
override def receive = {
case res: RpcResponse =>
original ! res
context.stop(self)
}
}))
case req @ CompletionsReq(fileInfo, _, _, _, _) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ FqnOfSymbolAtPointReq(file, point) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ FqnOfTypeAtPointReq(file, point) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ SymbolAtPointReq(file, point: Int) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ DocUriAtPointReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ TypeAtPointReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ SymbolDesignationsReq(file, start, end, _) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ ImplicitInfoReq(file, range: OffsetRange) =>
getOrSpawnNew(config.findProject(file)) forward req
case req @ ExpandSelectionReq(file, start: Int, stop: Int) =>
val fileInfo = SourceFileInfo(RawFile(file.toPath), None, None)
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ StructureViewReq(fileInfo: SourceFileInfo) =>
getOrSpawnNew(config.findProject(fileInfo)) forward req
case req @ UnloadFileReq(file) =>
getOrSpawnNew(config.findProject(file)) forward req
}
}
object AnalyzerManager {
def apply(
broadcaster: ActorRef,
creator: List[EnsimeProjectId] => Props
)(
implicit
config: EnsimeConfig
) = Props(new AnalyzerManager(broadcaster, creator, config))
}
|
VlachJosef/ensime-server
|
core/src/main/scala/org/ensime/core/AnalyzerManager.scala
|
Scala
|
gpl-3.0
| 5,553 |
package com.cloudera.hue.livy.server
import javax.servlet.ServletContext
import com.cloudera.hue.livy.{Utils, Logging, LivyConf, WebServer}
import org.scalatra._
import org.scalatra.servlet.ScalatraListener
object Main {
val SESSION_KIND = "livy-server.session.kind"
val THREAD_SESSION = "thread"
val PROCESS_SESSION = "process"
val YARN_SESSION = "yarn"
def main(args: Array[String]): Unit = {
val livyConf = new LivyConf()
Utils.loadDefaultLivyProperties(livyConf)
val host = livyConf.get("livy.server.host", "0.0.0.0")
val port = livyConf.getInt("livy.server.port", 8998)
val server = new WebServer(host, port)
server.context.setResourceBase("src/main/com/cloudera/hue/livy/server")
server.context.setInitParameter(ScalatraListener.LifeCycleKey, classOf[ScalatraBootstrap].getCanonicalName)
server.context.addEventListener(new ScalatraListener)
server.start()
try {
System.setProperty("livy.server.callback-url", f"http://${server.host}:${server.port}")
} finally {
server.join()
server.stop()
// Make sure to close all our outstanding http requests.
dispatch.Http.shutdown()
}
}
}
class ScalatraBootstrap extends LifeCycle with Logging {
var sessionManager: SessionManager = null
override def init(context: ServletContext): Unit = {
val livyConf = new LivyConf()
val sessionFactoryKind = livyConf.get("livy.server.session.factory", "process")
info(f"Using $sessionFactoryKind sessions")
val sessionFactory = sessionFactoryKind match {
case "thread" => new ThreadSessionFactory(livyConf)
case "process" => new ProcessSessionFactory(livyConf)
case "yarn" => new YarnSessionFactory(livyConf)
case _ =>
println(f"Unknown session factory: $sessionFactoryKind}")
sys.exit(1)
}
sessionManager = new SessionManager(sessionFactory)
context.mount(new WebApp(sessionManager), "/*")
}
override def destroy(context: ServletContext): Unit = {
if (sessionManager != null) {
sessionManager.shutdown()
}
}
}
|
nvoron23/hue
|
apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/Main.scala
|
Scala
|
apache-2.0
| 2,097 |
package techex.cases
import org.http4s.dsl._
import org.joda.time.{Instant, DateTime}
import techex._
import techex.data._
import techex.domain._
import scalaz.stream.async.mutable.Topic
object endSession {
def restApi(topic: Topic[InputMessage]): WebHandler = {
case req@POST -> Root / "sessions" / "end" / sessionId => {
for {
_ <- topic.publishOne(EndEntry(ScId(sessionId),Instant.now()))
ok <- Ok()
} yield ok
}
/*To avoid CORS crap*/
case req@GET -> Root / "sessions" / "end" / sessionId => {
for {
_ <- topic.publishOne(EndEntry(ScId(sessionId),Instant.now()))
ok <- Ok()
} yield ok
}
}
}
|
kantega/tech-ex-2015
|
backend/src/main/scala/techex/cases/endSession.scala
|
Scala
|
mit
| 683 |
package com.github.xubo245.gcdss.utils
/**
* Created by xubo on 2017/4/9.
*/
object Constants {
var debug=false
}
|
xubo245/GCDSS
|
src/main/scala/com/github/xubo245/gcdss/utils/Constants.scala
|
Scala
|
gpl-2.0
| 119 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.nio._
import scala.math._
import kafka.utils._
import org.apache.kafka.common.utils.Utils
/**
* Constants related to messages
*/
object Message {
/**
* The current offset and size for all the fixed-length fields
*/
val CrcOffset = 0
val CrcLength = 4
val MagicOffset = CrcOffset + CrcLength
val MagicLength = 1
val AttributesOffset = MagicOffset + MagicLength
val AttributesLength = 1
val KeySizeOffset = AttributesOffset + AttributesLength
val KeySizeLength = 4
val KeyOffset = KeySizeOffset + KeySizeLength
val ValueSizeLength = 4
/** The amount of overhead bytes in a message */
val MessageOverhead = KeyOffset + ValueSizeLength
/**
* The minimum valid size for the message header
*/
val MinHeaderSize = CrcLength + MagicLength + AttributesLength + KeySizeLength + ValueSizeLength
/**
* The current "magic" value
*/
val CurrentMagicValue: Byte = 0
/**
* Specifies the mask for the compression code. 3 bits to hold the compression codec.
* 0 is reserved to indicate no compression
*/
val CompressionCodeMask: Int = 0x07
/**
* Compression code for uncompressed messages
*/
val NoCompression: Int = 0
}
/**
* A message. The format of an N byte message is the following:
*
* 1. 4 byte CRC32 of the message
* 2. 1 byte "magic" identifier to allow format changes, value is 0 currently
* 3. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used)
* 4. 4 byte key length, containing length K
* 5. K byte key
* 6. 4 byte payload length, containing length V
* 7. V byte payload
*
* Default constructor wraps an existing ByteBuffer with the Message object with no change to the contents.
*/
class Message(val buffer: ByteBuffer) {
import kafka.message.Message._
/**
* A constructor to create a Message
* @param bytes The payload of the message
* @param codec The compression codec used on the contents of the message (if any)
* @param key The key of the message (null, if none)
* @param payloadOffset The offset into the payload array used to extract payload
* @param payloadSize The size of the payload to use
*/
def this(bytes: Array[Byte],
key: Array[Byte],
codec: CompressionCodec,
payloadOffset: Int,
payloadSize: Int) = {
this(ByteBuffer.allocate(Message.CrcLength +
Message.MagicLength +
Message.AttributesLength +
Message.KeySizeLength +
(if(key == null) 0 else key.length) +
Message.ValueSizeLength +
(if(bytes == null) 0
else if(payloadSize >= 0) payloadSize
else bytes.length - payloadOffset)))
// skip crc, we will fill that in at the end
buffer.position(MagicOffset)
buffer.put(CurrentMagicValue)
var attributes: Byte = 0
if (codec.codec > 0)
attributes = (attributes | (CompressionCodeMask & codec.codec)).toByte
buffer.put(attributes)
if(key == null) {
buffer.putInt(-1)
} else {
buffer.putInt(key.length)
buffer.put(key, 0, key.length)
}
val size = if(bytes == null) -1
else if(payloadSize >= 0) payloadSize
else bytes.length - payloadOffset
buffer.putInt(size)
if(bytes != null)
buffer.put(bytes, payloadOffset, size)
buffer.rewind()
// now compute the checksum and fill it in
Utils.writeUnsignedInt(buffer, CrcOffset, computeChecksum)
}
def this(bytes: Array[Byte], key: Array[Byte], codec: CompressionCodec) =
this(bytes = bytes, key = key, codec = codec, payloadOffset = 0, payloadSize = -1)
def this(bytes: Array[Byte], codec: CompressionCodec) =
this(bytes = bytes, key = null, codec = codec)
def this(bytes: Array[Byte], key: Array[Byte]) =
this(bytes = bytes, key = key, codec = NoCompressionCodec)
def this(bytes: Array[Byte]) =
this(bytes = bytes, key = null, codec = NoCompressionCodec)
/**
* Compute the checksum of the message from the message contents
*/
def computeChecksum(): Long =
CoreUtils.crc32(buffer.array, buffer.arrayOffset + MagicOffset, buffer.limit - MagicOffset)
/**
* Retrieve the previously computed CRC for this message
*/
def checksum: Long = Utils.readUnsignedInt(buffer, CrcOffset)
/**
* Returns true if the crc stored with the message matches the crc computed off the message contents
*/
def isValid: Boolean = checksum == computeChecksum
/**
* Throw an InvalidMessageException if isValid is false for this message
*/
def ensureValid() {
if(!isValid)
throw new InvalidMessageException("Message is corrupt (stored crc = " + checksum + ", computed crc = " + computeChecksum() + ")")
}
/**
* The complete serialized size of this message in bytes (including crc, header attributes, etc)
*/
def size: Int = buffer.limit
/**
* The length of the key in bytes
*/
def keySize: Int = buffer.getInt(Message.KeySizeOffset)
/**
* Does the message have a key?
*/
def hasKey: Boolean = keySize >= 0
/**
* The position where the payload size is stored
*/
private def payloadSizeOffset = Message.KeyOffset + max(0, keySize)
/**
* The length of the message value in bytes
*/
def payloadSize: Int = buffer.getInt(payloadSizeOffset)
/**
* Is the payload of this message null
*/
def isNull(): Boolean = payloadSize < 0
/**
* The magic version of this message
*/
def magic: Byte = buffer.get(MagicOffset)
/**
* The attributes stored with this message
*/
def attributes: Byte = buffer.get(AttributesOffset)
/**
* The compression codec used with this message
*/
def compressionCodec: CompressionCodec =
CompressionCodec.getCompressionCodec(buffer.get(AttributesOffset) & CompressionCodeMask)
/**
* A ByteBuffer containing the content of the message
*/
def payload: ByteBuffer = sliceDelimited(payloadSizeOffset)
/**
* A ByteBuffer containing the message key
*/
def key: ByteBuffer = sliceDelimited(KeySizeOffset)
/**
* Read a size-delimited byte buffer starting at the given offset
*/
private def sliceDelimited(start: Int): ByteBuffer = {
val size = buffer.getInt(start)
if(size < 0) {
null
} else {
var b = buffer.duplicate
b.position(start + 4)
b = b.slice()
b.limit(size)
b.rewind
b
}
}
override def toString(): String =
"Message(magic = %d, attributes = %d, crc = %d, key = %s, payload = %s)".format(magic, attributes, checksum, key, payload)
override def equals(any: Any): Boolean = {
any match {
case that: Message => this.buffer.equals(that.buffer)
case _ => false
}
}
override def hashCode(): Int = buffer.hashCode
}
|
usakey/kafka
|
core/src/main/scala/kafka/message/Message.scala
|
Scala
|
apache-2.0
| 7,922 |
package mr.merc.map.pathfind
import org.scalatest.funsuite.AnyFunSuite
import mr.merc.map.hex.HexField
import mr.merc.map.hex.Hex
class MercPossibleMovesFinderTest extends AnyFunSuite {
val finder = PossibleMovesFinder
test("sanity check") {
val grid = new TestGrid(new HexField[Hex](5, 5, Hex.hexInit))
val from = grid.hex(0, 1)
val result = finder.findPossibleMoves(grid, from, 2, false)
import grid.hex
assert(result === Set(hex(0, 1), hex(0, 0), hex(1, 0), hex(1, 1), hex(0, 2),
hex(2, 0), hex(2, 1), hex(2, 2), hex(1, 2), hex(0, 3)))
}
test("when longest way is more effective") {
val grid = new TestGrid(new HexField[Hex](5, 5, Hex.hexInit)) {
override def price(from: Hex, h: Hex) = if (h.x == 0 && h.y == 1) 1000 else 1
}
val from = grid.hex(0, 0)
val result = finder.findPossibleMoves(grid, from, 3, false)
import grid.hex
assert(result === Set(hex(0, 0), hex(1, 0), hex(2, 0), hex(2, 1),
hex(1, 1), hex(3, 0), hex(3, 1), hex(2, 2), hex(1, 2), hex(0, 2)))
}
test("when we need to stop") {
val grid = new TestGrid(new HexField[Hex](5, 5, Hex.hexInit)) {
override def cellWhereMovementMustBeStopped(h: Hex) = h.x == 0 && h.y == 1
}
val from = grid.hex(0, 0)
val result = finder.findPossibleMoves(grid, from, 3, false)
import grid.hex
assert(result === Set(hex(0, 0), hex(1, 0), hex(0, 1), hex(2, 0), hex(2, 1),
hex(1, 1), hex(3, 0), hex(3, 1), hex(2, 2), hex(1, 2), hex(0, 2)))
}
}
|
RenualdMarch/merc
|
src/test/scala/mr/merc/map/pathfind/PossibleMovesFinderTest.scala
|
Scala
|
gpl-3.0
| 1,506 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.kafka
import kafka.common.TopicAndPartition
import org.apache.samza.util.Logging
import kafka.message.MessageAndOffset
import org.apache.samza.Partition
import kafka.utils.Utils
import org.apache.samza.util.Clock
import java.util.UUID
import kafka.serializer.DefaultDecoder
import kafka.serializer.Decoder
import org.apache.samza.util.BlockingEnvelopeMap
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.system.IncomingMessageEnvelope
import kafka.consumer.ConsumerConfig
import org.apache.samza.util.ExponentialSleepStrategy
import org.apache.samza.SamzaException
import org.apache.samza.util.TopicMetadataStore
import org.apache.samza.util.ExponentialSleepStrategy
import kafka.api.TopicMetadata
import org.apache.samza.util.ExponentialSleepStrategy
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConversions._
import org.apache.samza.system.SystemAdmin
object KafkaSystemConsumer {
def toTopicAndPartition(systemStreamPartition: SystemStreamPartition) = {
val topic = systemStreamPartition.getStream
val partitionId = systemStreamPartition.getPartition.getPartitionId
TopicAndPartition(topic, partitionId)
}
}
/**
* Maintain a cache of BrokerProxies, returning the appropriate one for the
* requested topic and partition.
*/
private[kafka] class KafkaSystemConsumer(
systemName: String,
systemAdmin: SystemAdmin,
metrics: KafkaSystemConsumerMetrics,
metadataStore: TopicMetadataStore,
clientId: String = "undefined-client-id-%s" format UUID.randomUUID.toString,
timeout: Int = ConsumerConfig.ConsumerTimeoutMs,
bufferSize: Int = ConsumerConfig.SocketBufferSize,
fetchSize: StreamFetchSizes = new StreamFetchSizes,
consumerMinSize: Int = ConsumerConfig.MinFetchBytes,
consumerMaxWait: Int = ConsumerConfig.MaxFetchWaitMs,
/**
* Defines a low water mark for how many messages we buffer before we start
* executing fetch requests against brokers to get more messages. This value
* is divided equally among all registered SystemStreamPartitions. For
* example, if fetchThreshold is set to 50000, and there are 50
* SystemStreamPartitions registered, then the per-partition threshold is
* 1000. As soon as a SystemStreamPartition's buffered message count drops
* below 1000, a fetch request will be executed to get more data for it.
*
* Increasing this parameter will decrease the latency between when a queue
* is drained of messages and when new messages are enqueued, but also leads
* to an increase in memory usage since more messages will be held in memory.
*/
fetchThreshold: Int = 50000,
offsetGetter: GetOffset = new GetOffset("fail"),
deserializer: Decoder[Object] = new DefaultDecoder().asInstanceOf[Decoder[Object]],
keyDeserializer: Decoder[Object] = new DefaultDecoder().asInstanceOf[Decoder[Object]],
retryBackoff: ExponentialSleepStrategy = new ExponentialSleepStrategy,
clock: () => Long = { System.currentTimeMillis }) extends BlockingEnvelopeMap(metrics.registry, new Clock {
def currentTimeMillis = clock()
}, classOf[KafkaSystemConsumerMetrics].getName) with Toss with Logging {
type HostPort = (String, Int)
val brokerProxies = scala.collection.mutable.Map[HostPort, BrokerProxy]()
val topicPartitionsAndOffsets: scala.collection.concurrent.Map[TopicAndPartition, String] = new ConcurrentHashMap[TopicAndPartition, String]()
var perPartitionFetchThreshold = fetchThreshold
def start() {
if (topicPartitionsAndOffsets.size > 0) {
perPartitionFetchThreshold = fetchThreshold / topicPartitionsAndOffsets.size
}
refreshBrokers
}
override def register(systemStreamPartition: SystemStreamPartition, offset: String) {
super.register(systemStreamPartition, offset)
val topicAndPartition = KafkaSystemConsumer.toTopicAndPartition(systemStreamPartition)
val existingOffset = topicPartitionsAndOffsets.getOrElseUpdate(topicAndPartition, offset)
// register the older offset in the consumer
if (systemAdmin.offsetComparator(existingOffset, offset) >= 0) {
topicPartitionsAndOffsets.replace(topicAndPartition, offset)
}
metrics.registerTopicAndPartition(KafkaSystemConsumer.toTopicAndPartition(systemStreamPartition))
}
def stop() {
brokerProxies.values.foreach(_.stop)
}
protected def createBrokerProxy(host: String, port: Int): BrokerProxy = {
new BrokerProxy(host, port, systemName, clientId, metrics, sink, timeout, bufferSize, fetchSize, consumerMinSize, consumerMaxWait, offsetGetter)
}
protected def getHostPort(topicMetadata: TopicMetadata, partition: Int): Option[(String, Int)] = {
// Whatever we do, we can't say Broker, even though we're
// manipulating it here. Broker is a private type and Scala doesn't seem
// to care about that as long as you don't explicitly declare its type.
val brokerOption = topicMetadata
.partitionsMetadata
.find(_.partitionId == partition)
.flatMap(_.leader)
brokerOption match {
case Some(broker) => Some(broker.host, broker.port)
case _ => None
}
}
def refreshBrokers {
var tpToRefresh = topicPartitionsAndOffsets.keySet.toList
info("Refreshing brokers for: %s" format topicPartitionsAndOffsets)
retryBackoff.run(
loop => {
val topics = tpToRefresh.map(_.topic).toSet
val topicMetadata = TopicMetadataCache.getTopicMetadata(topics, systemName, (topics: Set[String]) => metadataStore.getTopicInfo(topics))
// addTopicPartition one at a time, leaving the to-be-done list intact in case of exceptions.
// This avoids trying to re-add the same topic partition repeatedly
def refresh(tp: List[TopicAndPartition]) = {
val head :: rest = tpToRefresh
// refreshBrokers can be called from abdicate and refreshDropped,
// both of which are triggered from BrokerProxy threads. To prevent
// accidentally creating multiple objects for the same broker, or
// accidentally not updating the topicPartitionsAndOffsets variable,
// we need to lock.
this.synchronized {
// Check if we still need this TopicAndPartition inside the
// critical section. If we don't, then skip it.
topicPartitionsAndOffsets.get(head) match {
case Some(nextOffset) =>
getHostPort(topicMetadata(head.topic), head.partition) match {
case Some((host, port)) =>
val brokerProxy = brokerProxies.getOrElseUpdate((host, port), createBrokerProxy(host, port))
brokerProxy.addTopicPartition(head, Option(nextOffset))
brokerProxy.start
debug("Claimed topic-partition (%s) for (%s)".format(head, brokerProxy))
topicPartitionsAndOffsets -= head
case None => info("No metadata available for: %s. Will try to refresh and add to a consumer thread later." format head)
}
case _ => debug("Ignoring refresh for %s because we already added it from another thread." format head)
}
}
rest
}
while (!tpToRefresh.isEmpty) {
tpToRefresh = refresh(tpToRefresh)
}
loop.done
},
(exception, loop) => {
warn("While refreshing brokers for %s: %s. Retrying." format (tpToRefresh.head, exception))
debug("Exception detail:", exception)
})
}
val sink = new MessageSink {
var lastDroppedRefresh = clock()
def refreshDropped() {
if (topicPartitionsAndOffsets.size > 0 && clock() - lastDroppedRefresh > 10000) {
refreshBrokers
lastDroppedRefresh = clock()
}
}
def setIsAtHighWatermark(tp: TopicAndPartition, isAtHighWatermark: Boolean) {
setIsAtHead(toSystemStreamPartition(tp), isAtHighWatermark)
}
def needsMoreMessages(tp: TopicAndPartition) = {
getNumMessagesInQueue(toSystemStreamPartition(tp)) <= perPartitionFetchThreshold
}
def addMessage(tp: TopicAndPartition, msg: MessageAndOffset, highWatermark: Long) = {
trace("Incoming message %s: %s." format (tp, msg))
val systemStreamPartition = toSystemStreamPartition(tp)
val isAtHead = highWatermark == msg.offset
val offset = msg.offset.toString
val key = if (msg.message.key != null) {
keyDeserializer.fromBytes(Utils.readBytes(msg.message.key))
} else {
null
}
val message = if (!msg.message.isNull) {
deserializer.fromBytes(Utils.readBytes(msg.message.payload))
} else {
null
}
put(systemStreamPartition, new IncomingMessageEnvelope(systemStreamPartition, offset, key, message))
setIsAtHead(systemStreamPartition, isAtHead)
}
def abdicate(tp: TopicAndPartition, nextOffset: Long) {
info("Abdicating for %s" format (tp))
topicPartitionsAndOffsets += tp -> nextOffset.toString
refreshBrokers
}
private def toSystemStreamPartition(tp: TopicAndPartition) = {
new SystemStreamPartition(systemName, tp.topic, new Partition(tp.partition))
}
}
}
|
davidzchen/samza
|
samza-kafka/src/main/scala/org/apache/samza/system/kafka/KafkaSystemConsumer.scala
|
Scala
|
apache-2.0
| 10,032 |
package io.gatling.amqp.infra
import java.util.concurrent.atomic._
import akka.actor.Props
import com.rabbitmq.client._
import io.gatling.amqp.config._
import io.gatling.amqp.data._
import io.gatling.amqp.event._
import io.gatling.core.result.writer.StatsEngine
import io.gatling.core.session.Session
import io.gatling.core.util.TimeHelper.nowMillis
import scala.util._
class AmqpPublisher(actorName: String)(implicit amqp: AmqpProtocol) extends AmqpActor {
private val nacker = amqp.nacker
private val isConfirmMode = amqp.isConfirmMode
private def sendEvent(event: AmqpEvent): Unit = nacker ! event
// private def sendEvent(event: AmqpEvent): Unit = amqp.event.publish(event)
override def preStart(): Unit = {
super.preStart()
if (isConfirmMode) {
channel.confirmSelect()
channel.addConfirmListener(new ConfirmListener() {
def handleAck (no: Long, multi: Boolean): Unit = {
sendEvent(AmqpPublishAcked (actorName, no.toInt, multi, nowMillis))
}
def handleNack(no: Long, multi: Boolean): Unit =
sendEvent(AmqpPublishNacked(actorName, no.toInt, multi, nowMillis))
})
}
}
private val localPublishSeqNoCounter = new AtomicInteger(1)
private def getNextPublishSeqNo: Int = {
if (isConfirmMode)
channel.getNextPublishSeqNo.toInt
else
localPublishSeqNoCounter.getAndIncrement
}
override def receive = {
case AmqpPublishRequest(req, session) if isConfirmMode =>
publishAsync(req, session)
case AmqpPublishRequest(req, session) =>
publishSync(req, session)
}
protected def publishSync(req: PublishRequest, session: Session): Unit = {
import req._
val startedAt = nowMillis
val no: Int = getNextPublishSeqNo
val event = AmqpPublishing(actorName, no, nowMillis, req, session)
Try {
channel.basicPublish(exchange.name, routingKey, props, bytes)
} match {
case Success(_) =>
sendEvent(AmqpPublished(actorName, no, nowMillis, event))
case Failure(e) =>
sendEvent(AmqpPublishFailed(actorName, no, nowMillis, e))
log.error(s"basicPublish($exchange) failed", e)
}
}
protected def publishAsync(req: PublishRequest, session: Session): Unit = {
import req._
val no: Int = getNextPublishSeqNo
sendEvent(AmqpPublishing(actorName, no, nowMillis, req, session))
try {
channel.basicPublish(exchange.name, routingKey, props, bytes)
} catch {
case e: Exception =>
sendEvent(AmqpPublishFailed(actorName, no, nowMillis, e))
log.error(s"basicPublish($exchange) failed", e)
}
}
}
object AmqpPublisher {
def props(name: String, amqp: AmqpProtocol) = Props(classOf[AmqpPublisher], name, amqp)
}
|
maiha/gatling-amqp
|
src/main/scala/io/gatling/amqp/infra/AmqpPublisher.scala
|
Scala
|
mit
| 2,742 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.util.sparql
import de.fuberlin.wiwiss.silk.util.Uri
import java.util.logging.Logger
import de.fuberlin.wiwiss.silk.entity.{SparqlRestriction, ForwardOperator, Path}
/**
* Retrieves the most frequent paths of a number of random sample entities.
*
* It is typically faster than SparqlAggregatePathsCollector but also less precise.
*
* The limitations of the current implementation are:
* - It does only return forward paths of length 1
* - It returns a maximum of 100 paths
*/
object SparqlSamplePathsCollector extends SparqlPathsCollector {
/**Number of sample entities */
private val maxEntities = 100
/**The minimum frequency of a property to be considered relevant */
private val MinFrequency = 0.01
private implicit val logger = Logger.getLogger(SparqlSamplePathsCollector.getClass.getName)
def apply(endpoint: SparqlEndpoint, restrictions: SparqlRestriction, limit: Option[Int]): Traversable[(Path, Double)] = {
val sampleEntities = {
if (restrictions.isEmpty)
getAllEntities(endpoint)
else
getEntities(endpoint, restrictions)
}
getEntitiesPaths(endpoint, sampleEntities, restrictions.variable, limit.getOrElse(100))
}
private def getAllEntities(endpoint: SparqlEndpoint): Traversable[String] = {
val sparql = "SELECT ?s WHERE { ?s ?p ?o }"
val results = endpoint.query(sparql, maxEntities)
results.map(_("s").value)
}
private def getEntities(endpoint: SparqlEndpoint, restrictions: SparqlRestriction): Traversable[String] = {
val sparql = "SELECT ?" + restrictions.variable + " WHERE { " + restrictions.toSparql + " }"
val results = endpoint.query(sparql, maxEntities)
results.map(_(restrictions.variable).value)
}
private def getEntitiesPaths(endpoint: SparqlEndpoint, entities: Traversable[String], variable: String, limit: Int): Traversable[(Path, Double)] = {
logger.info("Searching for relevant properties in " + endpoint)
val entityArray = entities.toArray
//Get all properties
val properties = entityArray.flatMap(entity => getEntityProperties(endpoint, entity, variable, limit))
//Compute the frequency of each property
val propertyFrequencies = properties.groupBy(x => x).mapValues(_.size.toDouble / entityArray.size).toList
//Choose the relevant properties
val relevantProperties = propertyFrequencies.filter { case (uri, frequency) => frequency > MinFrequency }
.sortWith(_._2 > _._2).take(limit)
logger.info("Found " + relevantProperties.size + " relevant properties in " + endpoint)
relevantProperties
}
private def getEntityProperties(endpoint: SparqlEndpoint, entityUri: String, variable: String, limit: Int): Traversable[Path] = {
var sparql = ""
sparql += "SELECT DISTINCT ?p \\n"
sparql += "WHERE {\\n"
sparql += " <" + entityUri + "> ?p ?o\\n"
sparql += "}"
for (result <- endpoint.query(sparql, limit); binding <- result.values) yield
Path(variable, ForwardOperator(Uri.fromURI(binding.value)) :: Nil)
}
}
|
fusepoolP3/p3-silk
|
silk-core/src/main/scala/de/fuberlin/wiwiss/silk/util/sparql/SparqlSamplePathsCollector.scala
|
Scala
|
apache-2.0
| 3,665 |
package com.datastax.spark.connector.mapper
import com.datastax.driver.core.ProtocolVersion
import com.datastax.spark.connector.ColumnRef
import com.datastax.spark.connector.cql.{StructDef, TableDef}
import scala.reflect.runtime.universe._
/** Produces [[ColumnMapForReading]] or [[ColumnMapForWriting]] objects that map
* class `T` properties to columns in a given Cassandra table.
*
* You can associate a custom `ColumnMapper` object with any of your classes by
* providing an implicit `ColumnMapper` in the companion object of the mapped class:
* {{{
* CREATE TABLE kv(key int primary key, value text);
* }}}
* {{{
* case class KeyValue(k: Int, v: String)
*
* object KeyValue {
* implicit val columnMapper =
* new DefaultColumnMapper[KeyValue](Map("k" -> "key", "v" -> "value"))
* }
* }}}
*/
trait ColumnMapper[T] {
/** Provides a mapping between given table or UDT and properties of type `T`,
* useful for creating objects of type `T`. Throws [[IllegalArgumentException]] if
* `selectedColumns` does not provide some columns needed to instantiate object of type `T`*/
def columnMapForReading(struct: StructDef, selectedColumns: IndexedSeq[ColumnRef]): ColumnMapForReading
/** Provides a mapping between given table or UDT and properties of type `T`,
* useful for reading property values of type `T` and writing them to Cassandra.
* Throws [[IllegalArgumentException]] if `selectedColumns` contains some columns that
* don't have matching getters. */
def columnMapForWriting(struct: StructDef, selectedColumns: IndexedSeq[ColumnRef]): ColumnMapForWriting
/** Provides a definition of the table that class `T` could be saved to. */
def newTable(
keyspaceName: String,
tableName: String,
protocolVersion: ProtocolVersion = ProtocolVersion.NEWEST_SUPPORTED): TableDef
}
/** Provides implicit [[ColumnMapper]] used for mapping all non-tuple classes. */
trait LowPriorityColumnMapper {
implicit def defaultColumnMapper[T : TypeTag]: ColumnMapper[T] =
new DefaultColumnMapper[T]
}
/** Provides implicit [[ColumnMapper]] objects used for mapping tuples. */
object ColumnMapper extends LowPriorityColumnMapper {
implicit def tuple1ColumnMapper[A1 : TypeTag] =
new TupleColumnMapper[Tuple1[A1]]
implicit def tuple2ColumnMapper[A1 : TypeTag, A2 : TypeTag] =
new TupleColumnMapper[(A1, A2)]
implicit def tuple3ColumnMapper[A1 : TypeTag, A2 : TypeTag, A3 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3)]
implicit def tuple4ColumnMapper[A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4)]
implicit def tuple5ColumnMapper[A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5)]
implicit def tuple6ColumnMapper[A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6)]
implicit def tuple7ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7)]
implicit def tuple8ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8)]
implicit def tuple9ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9)]
implicit def tuple10ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10)]
implicit def tuple11ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11)]
implicit def tuple12ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12)]
implicit def tuple13ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13)]
implicit def tuple14ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14)]
implicit def tuple15ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15)]
implicit def tuple16ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16)]
implicit def tuple17ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17)]
implicit def tuple18ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag, A18 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18)]
implicit def tuple19ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag, A18 : TypeTag, A19 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19)]
implicit def tuple20ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag, A18 : TypeTag, A19 : TypeTag, A20 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20)]
implicit def tuple21ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag, A18 : TypeTag, A19 : TypeTag, A20 : TypeTag, A21 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21)]
implicit def tuple22ColumnMapper[
A1 : TypeTag, A2 : TypeTag, A3 : TypeTag, A4 : TypeTag, A5 : TypeTag, A6 : TypeTag,
A7 : TypeTag, A8 : TypeTag, A9 : TypeTag, A10 : TypeTag, A11 : TypeTag,
A12 : TypeTag, A13 : TypeTag, A14 : TypeTag, A15 : TypeTag, A16 : TypeTag,
A17 : TypeTag, A18 : TypeTag, A19 : TypeTag, A20 : TypeTag, A21 : TypeTag, A22 : TypeTag] =
new TupleColumnMapper[(A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A11, A12, A13, A14, A15, A16, A17, A18, A19, A20, A21, A22)]
}
|
shashwat7/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/mapper/ColumnMapper.scala
|
Scala
|
apache-2.0
| 8,519 |
package ws.vinta.albedo.utils
import org.apache.spark.sql.types._
object SchemaUtils {
def equalsIgnoreNullability(left: DataType, right: DataType): Boolean = {
(left, right) match {
case (ArrayType(leftElementType, _), ArrayType(rightElementType, _)) =>
equalsIgnoreNullability(leftElementType, rightElementType)
case (MapType(leftKeyType, leftValueType, _), MapType(rightKeyType, rightValueType, _)) =>
equalsIgnoreNullability(leftKeyType, rightKeyType) && equalsIgnoreNullability(leftValueType, rightValueType)
case (StructType(leftFields), StructType(rightFields)) =>
leftFields.length == rightFields.length && leftFields.zip(rightFields).forall { case (l, r) =>
l.name == r.name && equalsIgnoreNullability(l.dataType, r.dataType)
}
case (l, r) => l == r
}
}
def checkColumnType(schema: StructType, colName: String, expectedDataType: DataType): Unit = {
val actualDataType = schema(colName).dataType
require(actualDataType == expectedDataType, s"Column $colName must be of type $expectedDataType but was actually $actualDataType.")
}
}
|
vinta/albedo
|
src/main/scala/ws/vinta/albedo/utils/SchemaUtils.scala
|
Scala
|
mit
| 1,132 |
package io.iohk.ethereum.logger
import java.util.concurrent.atomic.AtomicInteger
import akka.actor.{ActorRef, ActorSystem}
import akka.dispatch._
import akka.event.Logging
import com.typesafe.config.Config
/**
* Logs the mailbox size when exceeding the configured limit. It logs at most once per second
* when the messages are enqueued or dequeued.
*
* Configuration:
* <pre>
* akka.actor.default-mailbox {
* mailbox-type = akka.contrib.mailbox.LoggingMailboxType
* size-limit = 20
* }
* </pre>
*/
class LoggingMailboxType(settings: ActorSystem.Settings, config: Config)
extends MailboxType
with ProducesMessageQueue[UnboundedMailbox.MessageQueue] {
override def create(owner: Option[ActorRef], system: Option[ActorSystem]): LoggingMailbox = (owner, system) match {
case (Some(o), Some(s)) =>
val sizeLimit = config.getInt("size-limit")
val mailbox = new LoggingMailbox(o, s, sizeLimit)
mailbox
case _ => throw new IllegalArgumentException("no mailbox owner or system given")
}
}
class LoggingMailbox(owner: ActorRef, system: ActorSystem, sizeLimit: Int) extends UnboundedMailbox.MessageQueue {
private val interval = 1000000000L // 1 s, in nanoseconds
private lazy val log = Logging(system, classOf[LoggingMailbox])
private val path = owner.path.toString
@volatile private var logTime: Long = System.nanoTime()
private val queueSize = new AtomicInteger
private val dequeueCount = new AtomicInteger
override def dequeue(): Envelope = {
val x = super.dequeue()
if (x ne null) {
val size = queueSize.decrementAndGet()
dequeueCount.incrementAndGet()
logSize(size)
}
x
}
override def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
super.enqueue(receiver, handle)
val size = queueSize.incrementAndGet()
logSize(size)
}
def logSize(size: Int): Unit =
if (size >= sizeLimit) {
val now = System.nanoTime()
if (now - logTime > interval) {
val msgPerSecond = dequeueCount.get.toDouble / ((now - logTime).toDouble / interval)
val actorName = owner.path.name
logTime = now
dequeueCount.set(0)
log.info("Mailbox size for [{}] is [{}], processing [{}] msg/s", owner, size, f"$msgPerSecond%2.2f")
}
}
override def numberOfMessages: Int = queueSize.get
override def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
super.cleanUp(owner, deadLetters)
}
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/logger/LoggingMailbox.scala
|
Scala
|
mit
| 2,477 |
package lables.pimpmylib
import lables.model._
object LabelPrinter {
// Function using view bound syntactical sugar
def printLabelSmall[T <% CanBeLabeled](from : T): Unit = {
val toPrint = LabelMaker.createLabel(from)
println(s"Small Label: $toPrint")
}
// Function using curried implicit modifier for the LabelMaker
def printLabelLarge[T](from : T)(implicit convert: T => CanBeLabeled): Unit = {
val toPrint = LabelMaker.createLabel(from)
println(s"Large Label: $toPrint")
}
}
object Main extends App {
implicit def addressToLabel(from: Address) : CanBeLabeled = new CanBeLabeled {
override def label: String = s"[local] $from.plain"
}
implicit def productToLabel(from: Product) : CanBeLabeled = new CanBeLabeled {
override def label: String = s"[local] $from.name"
}
val printer = LabelPrinter
val address = Address("My Address on Earth is...")
printer.printLabelSmall(address)
printer.printLabelLarge(address)
val product = Product("Headphones")
printer.printLabelSmall(product)
printer.printLabelLarge(product)
}
|
tupol/scala-patterns-tc-pml
|
src/main/scala/lables/pimpmylib/LabelPrinter.scala
|
Scala
|
apache-2.0
| 1,088 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.jobs
import java.io.File
import org.apache.hadoop.conf.Configuration
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.classpath.ClassPathUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class JobUtilsTest extends Specification {
"JobUtils" should {
"configure libjars based on search paths" in {
val testFolder = new File(getClass.getClassLoader.getResource("fakejars").getFile)
val conf = new Configuration()
val search = Seq("jar1", "jar3")
val paths = Iterator(() => ClassPathUtils.loadJarsFromFolder(testFolder))
JobUtils.setLibJars(conf, search, paths)
val libjars = conf.get("tmpjars")
libjars must contain("fakejars/jar1.jar")
libjars must contain("fakejars/nested/jar3.jar")
}
}
}
|
elahrvivaz/geomesa
|
geomesa-jobs/src/test/scala/org/locationtech/geomesa/jobs/JobUtilsTest.scala
|
Scala
|
apache-2.0
| 1,337 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.features.avro
import org.apache.avro.file.{DataFileStream, DataFileWriter}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* AvroDataFiles are binary Avro files (see https://avro.apache.org/) that encode
* SimpleFeatures using a custom avro schema per SimpleFeatureType. AvroDataFiles
* are meant to:
* 1. Provide binary longer-term storage in filesystems for SimpleFeatures
* 2. Carry the SimpleFeatureType and feature name along with the data
* using avro metadata
* 3. Be self-describing outside of Geotools as much as possible
*
* You may want to consider gzipping your avro data file for better compression
*
* Version 3 supports Bytes as a type in the SFT
*/
object AvroDataFile {
val SftNameKey = "sft.name"
val SftSpecKey = "sft.spec"
val VersionKey = "version"
private[avro] val Version: Long = 3L
def setMetaData(dfw: DataFileWriter[SimpleFeature], sft: SimpleFeatureType): Unit = {
dfw.setMeta(VersionKey, Version)
dfw.setMeta(SftNameKey, sft.getTypeName)
dfw.setMeta(SftSpecKey, SimpleFeatureTypes.encodeType(sft))
}
/**
* Backwards compatible...Version 2 can parse v1
* @param dfs
* @return
*/
def canParse(dfs: DataFileStream[_ <: SimpleFeature]): Boolean = {
dfs.getMetaKeys.contains(VersionKey) &&
dfs.getMetaLong(VersionKey) <= Version &&
dfs.getMetaString(SftNameKey) != null &&
dfs.getMetaString(SftSpecKey) != null
}
def getSft(dfs: DataFileStream[_ <: SimpleFeature]): SimpleFeatureType = {
val sftName = dfs.getMetaString(SftNameKey)
val sftString = dfs.getMetaString(SftSpecKey)
SimpleFeatureTypes.createType(sftName, sftString)
}
}
|
nagavallia/geomesa
|
geomesa-features/geomesa-feature-avro/src/main/scala/org/locationtech/geomesa/features/avro/AvroDataFile.scala
|
Scala
|
apache-2.0
| 2,258 |
package se.marcuslonnberg.stark.site
import org.scalatest.{FreeSpec, Matchers}
import se.marcuslonnberg.stark.site.Implicits._
import spray.http.Uri
import spray.http.Uri.Path
class RelativePathSpec extends FreeSpec with Matchers {
implicit class PathString(request: String) {
def shouldBeRelativeTo(site: String): Unit = {
registerTest(s"'$site' should match request '$request'") {
assert(Uri.Path(request).isRelativeTo(Uri.Path(site)))
}
}
def shouldNotBeRelativeTo(site: String): Unit = {
registerTest(s"'$site' should not match request '$request'") {
assert(!Uri.Path(request).isRelativeTo(Uri.Path(site)))
}
}
}
"isRelativeTo" - {
"" shouldBeRelativeTo ""
"/" shouldBeRelativeTo ""
"/" shouldBeRelativeTo "/"
"//" shouldBeRelativeTo "/"
"/abc" shouldBeRelativeTo "/"
"/abc" shouldBeRelativeTo "/abc"
"/abc/" shouldBeRelativeTo "/abc"
"/abc/" shouldBeRelativeTo "/abc/"
"/abc/xyz" shouldBeRelativeTo "/abc"
"/abc/xyz" shouldBeRelativeTo "/abc/"
"" shouldNotBeRelativeTo "/"
"/" shouldNotBeRelativeTo "//"
"" shouldNotBeRelativeTo "/abc"
"/xyz" shouldNotBeRelativeTo "/abc"
"/abcxyz" shouldNotBeRelativeTo "/abc"
"/abc" shouldNotBeRelativeTo "/abc/"
"/abc" shouldNotBeRelativeTo "/abc/xyz"
}
"relativizeTo" - {
Path("").relativizeTo(Path("")) shouldEqual Some(Path(""))
Path("/").relativizeTo(Path("")) shouldEqual Some(Path("/"))
Path("/abc").relativizeTo(Path("")) shouldEqual Some(Path("/abc"))
Path("/abc").relativizeTo(Path("/")) shouldEqual Some(Path("/abc"))
}
}
|
FredrikWendt/stark
|
src/test/scala/se/marcuslonnberg/stark/site/RelativePathSpec.scala
|
Scala
|
mit
| 1,632 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.lewuathe.dllib.example
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.SparkContext
import org.apache.spark.sql.{DataFrame, SQLContext}
import com.lewuathe.dllib.graph.Graph
import com.lewuathe.dllib.layer.{AffineLayer, SoftmaxLayer}
import com.lewuathe.dllib.model.InMemoryModel
import com.lewuathe.dllib.network.Network
import com.lewuathe.dllib.solver.MultiLayerPerceptron
object XORApp {
case class Sample(label: Double, features: Vector)
def createTrainingData(sqlContext: SQLContext): DataFrame = {
import sqlContext.implicits._
val sc = sqlContext.sparkContext
val data = sc
.parallelize(
Seq(
(1.0, Array(1.0, 0.0)),
(1.0, Array(0.0, 1.0)),
(0.0, Array(0.0, 0.0)),
(0.0, Array(1.0, 1.0))
))
.map({
case (label, features) => Sample(label, Vectors.dense(features))
})
data.toDF()
}
var miniBatchFraction = 1.0
var numIterations = 100
var learningRate = 0.7
def submit(sc: SparkContext): Unit = {
val sqlContext = new SQLContext(sc)
val df = createTrainingData(sqlContext)
val nn3Graph = new Graph(
Array(
new AffineLayer(2, 2),
new SoftmaxLayer(2, 2)
))
val nn3Model = InMemoryModel(nn3Graph)
val nn3 = Network(nn3Model, nn3Graph)
val multilayerPerceptron = new MultiLayerPerceptron("XOR", nn3)
multilayerPerceptron.setNumIterations(numIterations)
multilayerPerceptron.miniBatchFraction = miniBatchFraction
multilayerPerceptron.learningRate = learningRate
val model = multilayerPerceptron.fit(createTrainingData(sqlContext))
val testData = Seq(
Array(1.0, 0.0),
Array(0.0, 1.0),
Array(0.0, 0.0),
Array(1.0, 1.0)
)
val result = model.transform(createTrainingData(sqlContext))
result.show()
}
def main(args: Array[String]) {}
}
|
Lewuathe/neurallib
|
src/main/scala/com/lewuathe/dllib/example/XORApp.scala
|
Scala
|
mit
| 2,731 |
package mesosphere.marathon.api.v1.json
import org.junit.Test
import org.junit.Assert._
import com.fasterxml.jackson.databind.ObjectMapper
import mesosphere.marathon.Protos.Constraint
import mesosphere.marathon.api.v2.json.MarathonModule
/**
* @author Tobi Knaup
*/
class ConstraintTest {
@Test
def testDeserialize() {
val mapper = new ObjectMapper
mapper.registerModule(new MarathonModule)
def shouldMatch(json: String, field: String, operator: Constraint.Operator, value: String = "") {
val constraint = mapper.readValue(json, classOf[Constraint])
assertEquals(field, constraint.getField)
assertEquals(operator, constraint.getOperator)
assertEquals(value, constraint.getValue)
}
shouldMatch("""["hostname","UNIQUE"]""", "hostname", Constraint.Operator.UNIQUE)
shouldMatch("""["rackid","GROUP_BY","1"]""", "rackid", Constraint.Operator.GROUP_BY, "1")
shouldMatch("""["jdk","LIKE","7"]""", "jdk", Constraint.Operator.LIKE, "7")
}
@Test
def testSerialize() {
val mapper = new ObjectMapper
mapper.registerModule(new MarathonModule)
def shouldMatch(expected: String, constraint: Constraint) {
val actual = mapper.writeValueAsString(constraint)
assertEquals(expected, actual)
}
shouldMatch("""["hostname","UNIQUE"]""", Constraint.newBuilder.setField("hostname")
.setOperator(Constraint.Operator.UNIQUE).build)
shouldMatch("""["rackid","GROUP_BY","1"]""", Constraint.newBuilder.setField("rackid")
.setOperator(Constraint.Operator.GROUP_BY).setValue("1").build)
shouldMatch("""["jdk","LIKE","7"]""", Constraint.newBuilder.setField("jdk")
.setOperator(Constraint.Operator.LIKE).setValue("7").build)
}
}
|
MiLk/marathon
|
src/test/scala/mesosphere/marathon/api/v1/json/ConstraintTest.scala
|
Scala
|
apache-2.0
| 1,729 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package com.github.nscala_time.time
import org.joda.time._
import com.github.nscala_time.PimpedType
class RichInt(val underlying: Int) extends AnyVal with PimpedType[Int] {
// These units of time can build durations or periods.
// At most we lose a leap second. (Unless someone adopts
// leap minutes).
def millis = DurationBuilder(Period.millis(underlying))
def seconds = DurationBuilder(Period.seconds(underlying))
def minutes = DurationBuilder(Period.minutes(underlying))
def hours = DurationBuilder(Period.hours(underlying))
// These units of time can only be periods. At this
// point if we made durations automatically we'd start
// getting into trouble with daylight savings time,
// monthly differences, leap years, etc.
def days = Period.days(underlying)
def weeks = Period.weeks(underlying)
def months = Period.months(underlying)
def years = Period.years(underlying)
// See above.
def milli = DurationBuilder(Period.millis(underlying))
def second = DurationBuilder(Period.seconds(underlying))
def minute = DurationBuilder(Period.minutes(underlying))
def hour = DurationBuilder(Period.hours(underlying))
// See above.
def day = Period.days(underlying)
def week = Period.weeks(underlying)
def month = Period.months(underlying)
def year = Period.years(underlying)
def *(period: Period) = period.multipliedBy(underlying)
}
|
xuwei-k/nscala-time
|
src/main/scala/com/github/nscala_time/time/RichInt.scala
|
Scala
|
apache-2.0
| 2,003 |
package streams
import common._
/**
* This trait represents the layout and building blocks of the game
*/
trait GameDef {
/**
* The case class `Pos` encodes positions in the terrain.
*
* IMPORTANT NOTE
* - The `row` coordinate denotes the position on the vertical axis
* - The `col` coordinate is used for the horizontal axis
* - The coordinates increase when moving down and right
*
* Illustration:
*
* 0 1 2 3 <- col axis
* 0 o o o o
* 1 o o o o
* 2 o # o o # is at position Pos(2, 1)
* 3 o o o o
*
* ^
* |
*
* row axis
*/
case class Pos(row: Int, col: Int) {
/** The position obtained by changing the `row` coordinate by `d` */
def deltaRow(d: Int): Pos = copy(row = row + d)
/** The position obtained by changing the `col` coordinate by `d` */
def deltaCol(d: Int): Pos = copy(col = col + d)
}
/**
* The position where the block is located initially.
*
* This value is left abstract, it will be defined in concrete
* instances of the game.
*/
val startPos: Pos
/**
* The target position where the block has to go.
* This value is left abstract.
*/
val goal: Pos
/**
* The terrain is represented as a function from positions to
* booleans. The function returns `true` for every position that
* is inside the terrain.
*
* As explained in the documentation of class `Pos`, the `row` axis
* is the vertical one and increases from top to bottom.
*/
type Terrain = Pos => Boolean
/**
* The terrain of this game. This value is left abstract.
*/
val terrain: Terrain
/**
* In Bloxorz, we can move left, right, Up or down.
* These moves are encoded as case objects.
*/
sealed abstract class Move
case object Left extends Move
case object Right extends Move
case object Up extends Move
case object Down extends Move
/**
* This function returns the block at the start position of
* the game.
*/
def startBlock: Block = Block(startPos, startPos)
/**
* A block is represented by the position of the two cubes that
* it consists of. We make sure that `b1` is lexicographically
* smaller than `b2`.
*/
case class Block(b1: Pos, b2: Pos) {
// checks the requirement mentioned above
require(b1.row <= b2.row && b1.col <= b2.col, "Invalid block position: b1=" + b1 + ", b2=" + b2)
/**
* Returns a block where the `row` coordinates of `b1` and `b2` are
* changed by `d1` and `d2`, respectively.
*/
def deltaRow(d1: Int, d2: Int) = Block(b1.deltaRow(d1), b2.deltaRow(d2))
/**
* Returns a block where the `col` coordinates of `b1` and `b2` are
* changed by `d1` and `d2`, respectively.
*/
def deltaCol(d1: Int, d2: Int) = Block(b1.deltaCol(d1), b2.deltaCol(d2))
/** The block obtained by moving left */
def left = if (isStanding) deltaCol(-2, -1)
else if (b1.row == b2.row) deltaCol(-1, -2)
else deltaCol(-1, -1)
/** The block obtained by moving right */
def right = if (isStanding) deltaCol(1, 2)
else if (b1.row == b2.row) deltaCol(2, 1)
else deltaCol(1, 1)
/** The block obtained by moving up */
def up = if (isStanding) deltaRow(-2, -1)
else if (b1.row == b2.row) deltaRow(-1, -1)
else deltaRow(-1, -2)
/** The block obtained by moving down */
def down = if (isStanding) deltaRow(1, 2)
else if (b1.row == b2.row) deltaRow(1, 1)
else deltaRow(2, 1)
/**
* Returns the list of blocks that can be obtained by moving
* the current block, together with the corresponding move.
*/
def neighbors: List[(Block, Move)] =
(left, Left) :: (right, Right) :: (up, Up) :: (down, Down) :: Nil
/**
* Returns the list of positions reachable from the current block
* which are inside the terrain.
*/
def legalNeighbors: List[(Block, Move)] = neighbors filter (_._1.isLegal)
/**
* Returns `true` if the block is standing.
*/
def isStanding: Boolean = b1 == b2
/**
* Returns `true` if the block is entirely inside the terrain.
*/
def isLegal: Boolean = terrain(b1) && terrain(b2)
}
}
|
yurii-khomenko/fpScalaSpec
|
c2w2streams/src/main/scala/streams/GameDef.scala
|
Scala
|
gpl-3.0
| 4,450 |
package com.benkolera.slick.pg
import scala.slick.jdbc.{PositionedResult,GetResult}
trait DecodePgObject[A] extends GetResult[A] {
def apply(pr:PositionedResult) =
stringDecode( pr.nextString )
def stringDecode(s:String): A
}
object DecodePgObject extends DecodePgObjects {
def apply[A](f:String => A): DecodePgObject[A] =
new DecodePgObject[A]{
def stringDecode(s:String) = f(s)
}
}
trait DecodePgObjects {
implicit def decodeSqlOption[A:DecodePgObject]: GetResult[Option[A]] =
GetResult{ (pr:PositionedResult) =>
pr.nextStringOption.map(
implicitly[DecodePgObject[A]].stringDecode(_)
)
}
implicit def decodeSqlList[A:DecodePgObject]: GetResult[List[A]] =
DecodePgObject{ (s:String) =>
s.tail.init.split(",").toList match {
case ""::Nil => Nil
case x => x.map( implicitly[DecodePgObject[A]].stringDecode(_) )
}
}
}
|
benkolera/scala-slick-extras
|
src/main/scala/pg/DecodePgObject.scala
|
Scala
|
mit
| 922 |
package net.revenj.database.postgres.converters
import net.revenj.database.postgres.PostgresWriter
object NumberConverter {
private val NUMBERS = {
val arr = new Array[Int](100)
for (i <- arr.indices) {
val first = ((i / 10) + '0').asInstanceOf[Char]
val second = ((i % 10) + '0').asInstanceOf[Char]
val offset = if (i < 10) 1 else 0
arr(i) = (offset << 24) + (first << 8) + second
}
arr
}
private[converters] def write2(number: Int, buffer: Array[Char], start: Int): Unit = {
val pair = NUMBERS(number)
buffer(start) = (pair >> 8).toChar
buffer(start + 1) = pair.toByte.toChar
}
private[converters] def write2(number: Int, sw: PostgresWriter): Unit = {
val pair = NUMBERS(number)
sw.write((pair >> 8).toChar)
sw.write(pair.toByte)
}
private[converters] def write3(number: Int, buffer: Array[Char], start: Int): Unit = {
val div = number / 100
buffer(start) = (div + '0').toChar
val rem = number - div * 100
val pair2 = NUMBERS(rem)
buffer(start + 1) = (pair2 >> 8).toChar
buffer(start + 2) = pair2.toByte.toChar
}
private[converters] def write4(number: Int, buffer: Array[Char], start: Int): Unit = {
val div = number / 100
val pair1 = NUMBERS(div)
buffer(start) = (pair1 >> 8).toChar
buffer(start + 1) = pair1.toByte.toChar
val rem = number - div * 100
val pair2 = NUMBERS(rem)
buffer(start + 2) = (pair2 >> 8).toChar
buffer(start + 3) = pair2.toByte.toChar
}
private[converters] def write4(number: Int, sw: PostgresWriter): Unit = {
val div = number / 100
val pair1 = NUMBERS(div)
sw.write((pair1 >> 8).toChar)
sw.write(pair1.toByte.toChar)
val rem = number - div * 100
val pair2 = NUMBERS(rem)
sw.write((pair2 >> 8).toChar)
sw.write(pair2.toByte)
}
private[converters] def read2(source: Array[Char], start: Int): Int = {
val first = source(start) - '0'
(first << 3) + (first << 1) + source(start + 1) - '0'
}
private[converters] def read4(source: Array[Char], start: Int): Int = {
val first = source(start) - '0'
val second = source(start + 1) - '0'
val third = source(start + 2) - '0'
first * 1000 + second * 100 + (third << 3) + (third << 1) + source(start + 3) - '0'
}
def tryParsePositiveInt(number: String): Option[Int] = {
if (number.length == 0 || number.charAt(0) < '0' || number.charAt(0) > '9') {
None
} else {
var value = 0
var i = 0
while (i < number.length) {
value = (value << 3) + (value << 1) + number.charAt(i) - '0'
i += 1
}
Some(value)
}
}
def parseLong(number: String): Long = {
var value = 0L
if (number.charAt(0) == '-') {
var i = 1
while (i < number.length) {
value = (value << 3) + (value << 1) - number.charAt(i) + '0'
i += 1
}
} else {
var i = 0
while (i < number.length) {
value = (value << 3) + (value << 1) + number.charAt(i) - '0'
i += 1
}
}
value
}
def serialize(value: Short, buf: Array[Char]): Int = {
if (value == Short.MinValue) {
"-32768".getChars(0, 6, buf, 0)
0
} else if (value == 0) {
buf(5) = '0'
5
} else {
var q = 0
var r = 0
var charPos = 5
var offset = 0
var i = 0
if (value < 0) {
i = -value
offset = 0
} else {
i = value.toInt
offset = 1
}
var v = 0
while (charPos > 0 && i != 0) {
q = i / 100
r = i - ((q << 6) + (q << 5) + (q << 2))
i = q
v = NUMBERS(r)
buf(charPos) = v.toByte.toChar
charPos -= 1
buf(charPos) = (v >> 8).toChar
charPos -= 1
}
val zeroBased = v >> 24
buf(charPos + zeroBased) = '-'
charPos + offset + zeroBased
}
}
def serialize(value: Int, buf: Array[Char]): Int = {
if (value == Int.MinValue) {
"-2147483648".getChars(0, 11, buf, 0)
0
} else if (value == 0) {
buf(10) = '0'
10
} else {
var q = 0
var r = 0
var charPos = 10
var offset = 0
var i = 0
if (value < 0) {
i = -value
offset = 0
} else {
i = value
offset = 1
}
var v = 0
while (charPos > 0 && i != 0) {
q = i / 100
r = i - ((q << 6) + (q << 5) + (q << 2))
i = q
v = NUMBERS(r)
buf(charPos) = v.toByte.toChar
charPos -= 1
buf(charPos) = (v >> 8).toChar
charPos -= 1
}
val zeroBased = v >> 24
buf(charPos + zeroBased) = '-'
charPos + offset + zeroBased
}
}
def serialize(value: Long, buf: Array[Char]): Int = {
if (value == Long.MinValue) {
"-9223372036854775808".getChars(0, 20, buf, 1)
1
} else if (value == 0L) {
buf(20) = '0'
20
} else {
var q = 0L
var r = 0
var charPos = 20
var offset = 0
var i = 0L
if (value < 0) {
i = -value
offset = 0
} else {
i = value
offset = 1
}
var v = 0
while (charPos > 0 && i != 0) {
q = i / 100
r = (i - ((q << 6) + (q << 5) + (q << 2))).toInt
i = q
v = NUMBERS(r)
buf(charPos) = v.toByte.toChar
charPos -= 1
buf(charPos) = (v >> 8).toChar
charPos -= 1
}
val zeroBased = v >> 24
buf(charPos + zeroBased) = '-'
charPos + offset + zeroBased
}
}
def parsePositive(source: Array[Char], start: Int, end: Int): Int = {
var res = 0
var i = start
while (i < source.length && i != end) {
res = res * 10 + (source(i) - '0')
i += 1
}
res
}
}
|
ngs-doo/revenj
|
scala/revenj-core/src/main/scala/net/revenj/database/postgres/converters/NumberConverter.scala
|
Scala
|
bsd-3-clause
| 5,786 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.sql.{Connection, Date, Timestamp}
import java.util.Properties
import java.math.BigDecimal
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.tags.DockerTest
/**
* This patch was tested using the Oracle docker. Created this integration suite for the same.
* The ojdbc6-11.2.0.2.0.jar was to be downloaded from the maven repository. Since there was
* no jdbc jar available in the maven repository, the jar was downloaded from oracle site
* manually and installed in the local; thus tested. So, for SparkQA test case run, the
* ojdbc jar might be manually placed in the local maven repository(com/oracle/ojdbc6/11.2.0.2.0)
* while Spark QA test run.
*
* The following would be the steps to test this
* 1. Pull oracle 11g image - docker pull wnameless/oracle-xe-11g
* 2. Start docker - sudo service docker start
* 3. Download oracle 11g driver jar and put it in maven local repo:
* (com/oracle/ojdbc6/11.2.0.2.0/ojdbc6-11.2.0.2.0.jar)
* 4. The timeout and interval parameter to be increased from 60,1 to a high value for oracle test
* in DockerJDBCIntegrationSuite.scala (Locally tested with 200,200 and executed successfully).
* 5. Run spark test - ./build/sbt "test-only org.apache.spark.sql.jdbc.OracleIntegrationSuite"
*
* All tests in this suite are ignored because of the dependency with the oracle jar from maven
* repository.
*/
@DockerTest
class OracleIntegrationSuite extends DockerJDBCIntegrationSuite with SharedSQLContext {
import testImplicits._
override val db = new DatabaseOnDocker {
override val imageName = "wnameless/oracle-xe-11g:14.04.4"
override val env = Map(
"ORACLE_ROOT_PASSWORD" -> "oracle"
)
override val usesIpc = false
override val jdbcPort: Int = 1521
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:oracle:thin:system/oracle@//$ip:$port/xe"
override def getStartupProcessName: Option[String] = None
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE TABLE numerics (b DECIMAL(1), f DECIMAL(3, 2), i DECIMAL(10))").executeUpdate();
conn.prepareStatement(
"INSERT INTO numerics VALUES (4, 1.23, 9999999999)").executeUpdate();
conn.commit();
}
test("SPARK-16625 : Importing Oracle numeric types") {
val df = sqlContext.read.jdbc(jdbcUrl, "numerics", new Properties);
val rows = df.collect()
assert(rows.size == 1)
val row = rows(0)
// The main point of the below assertions is not to make sure that these Oracle types are
// mapped to decimal types, but to make sure that the returned values are correct.
// A value > 1 from DECIMAL(1) is correct:
assert(row.getDecimal(0).compareTo(BigDecimal.valueOf(4)) == 0)
// A value with fractions from DECIMAL(3, 2) is correct:
assert(row.getDecimal(1).compareTo(BigDecimal.valueOf(1.23)) == 0)
// A value > Int.MaxValue from DECIMAL(10) is correct:
assert(row.getDecimal(2).compareTo(BigDecimal.valueOf(9999999999l)) == 0)
}
test("SPARK-12941: String datatypes to be mapped to Varchar in Oracle") {
// create a sample dataframe with string type
val df1 = sparkContext.parallelize(Seq(("foo"))).toDF("x")
// write the dataframe to the oracle table tbl
df1.write.jdbc(jdbcUrl, "tbl2", new Properties)
// read the table from the oracle
val dfRead = sqlContext.read.jdbc(jdbcUrl, "tbl2", new Properties)
// get the rows
val rows = dfRead.collect()
// verify the data type is inserted
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types(0).equals("class java.lang.String"))
// verify the value is the inserted correct or not
assert(rows(0).getString(0).equals("foo"))
}
test("SPARK-16625: General data types to be mapped to Oracle") {
val props = new Properties()
props.put("oracle.jdbc.mapDateToTimestamp", "false")
val schema = StructType(Seq(
StructField("boolean_type", BooleanType, true),
StructField("integer_type", IntegerType, true),
StructField("long_type", LongType, true),
StructField("float_Type", FloatType, true),
StructField("double_type", DoubleType, true),
StructField("byte_type", ByteType, true),
StructField("short_type", ShortType, true),
StructField("string_type", StringType, true),
StructField("binary_type", BinaryType, true),
StructField("date_type", DateType, true),
StructField("timestamp_type", TimestampType, true)
))
val tableName = "test_oracle_general_types"
val booleanVal = true
val integerVal = 1
val longVal = 2L
val floatVal = 3.0f
val doubleVal = 4.0
val byteVal = 2.toByte
val shortVal = 5.toShort
val stringVal = "string"
val binaryVal = Array[Byte](6, 7, 8)
val dateVal = Date.valueOf("2016-07-26")
val timestampVal = Timestamp.valueOf("2016-07-26 11:49:45")
val data = spark.sparkContext.parallelize(Seq(
Row(
booleanVal, integerVal, longVal, floatVal, doubleVal, byteVal, shortVal, stringVal,
binaryVal, dateVal, timestampVal
)))
val dfWrite = spark.createDataFrame(data, schema)
dfWrite.write.jdbc(jdbcUrl, tableName, props)
val dfRead = spark.read.jdbc(jdbcUrl, tableName, props)
val rows = dfRead.collect()
// verify the data type is inserted
val types = dfRead.schema.map(field => field.dataType)
assert(types(0).equals(DecimalType(1, 0)))
assert(types(1).equals(DecimalType(10, 0)))
assert(types(2).equals(DecimalType(19, 0)))
assert(types(3).equals(DecimalType(19, 4)))
assert(types(4).equals(DecimalType(19, 4)))
assert(types(5).equals(DecimalType(3, 0)))
assert(types(6).equals(DecimalType(5, 0)))
assert(types(7).equals(StringType))
assert(types(8).equals(BinaryType))
assert(types(9).equals(DateType))
assert(types(10).equals(TimestampType))
// verify the value is the inserted correct or not
val values = rows(0)
assert(values.getDecimal(0).compareTo(BigDecimal.valueOf(1)) == 0)
assert(values.getDecimal(1).compareTo(BigDecimal.valueOf(integerVal)) == 0)
assert(values.getDecimal(2).compareTo(BigDecimal.valueOf(longVal)) == 0)
assert(values.getDecimal(3).compareTo(BigDecimal.valueOf(floatVal)) == 0)
assert(values.getDecimal(4).compareTo(BigDecimal.valueOf(doubleVal)) == 0)
assert(values.getDecimal(5).compareTo(BigDecimal.valueOf(byteVal)) == 0)
assert(values.getDecimal(6).compareTo(BigDecimal.valueOf(shortVal)) == 0)
assert(values.getString(7).equals(stringVal))
assert(values.getAs[Array[Byte]](8).mkString.equals("678"))
assert(values.getDate(9).equals(dateVal))
assert(values.getTimestamp(10).equals(timestampVal))
}
}
|
spark0001/spark2.1.1
|
external/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/OracleIntegrationSuite.scala
|
Scala
|
apache-2.0
| 7,662 |
package scoverage
import org.scalatest.{FreeSpec, Matchers}
class LocationTest extends FreeSpec with Matchers {
"location function" - {
"should correctly process top level types" - {
"for classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\nclass Sammy")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Sammy"
loc.fullClassName shouldBe "com.test.Sammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\nobject Bammy { def foo = Symbol(\\"boo\\") } ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Bammy"
loc.fullClassName shouldBe "com.test.Bammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.test\\ntrait Gammy { def goo = Symbol(\\"hoo\\") } ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.test"
loc.className shouldBe "Gammy"
loc.fullClassName shouldBe "com.test.Gammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should correctly process methods" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Hammy { def foo = Symbol(\\"boo\\") } ")
val loc = compiler.locations.result().find(_._2.method == "foo").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Hammy"
loc.fullClassName shouldBe "com.methodtest.Hammy"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should correctly process nested methods" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Hammy { def foo = { def goo = { getClass; 3 }; goo } } ")
val loc = compiler.locations.result().find(_._2.method == "goo").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Hammy"
loc.fullClassName shouldBe "com.methodtest.Hammy"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should process anon functions as inside the enclosing method" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { def moo = { Option(\\"bat\\").map(_.length) } } ")
val loc = compiler.locations.result().find(_._1 == "Function").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Jammy"
loc.fullClassName shouldBe "com.methodtest.Jammy"
loc.method shouldBe "moo"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"should use outer package" - {
"for nested classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { class Pammy } ")
val loc = compiler.locations.result().find(_._2.className == "Pammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Pammy"
loc.fullClassName shouldBe "com.methodtest.Jammy.Pammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for nested objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { object Zammy } ")
val loc = compiler.locations.result().find(_._2.className == "Zammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Zammy"
loc.fullClassName shouldBe "com.methodtest.Jammy.Zammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for nested traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.methodtest \\n class Jammy { trait Mammy } ")
val loc = compiler.locations.result().find(_._2.className == "Mammy").get._2
loc.packageName shouldBe "com.methodtest"
loc.className shouldBe "Mammy"
loc.fullClassName shouldBe "com.methodtest.Jammy.Mammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should support nested packages" - {
"for classes" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"class Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.fullClassName shouldBe "com.a.b.Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for objects" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"object Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.fullClassName shouldBe "com.a.b.Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for traits" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n " +
"package b \\n" +
"trait Kammy ")
val loc = compiler.locations.result().find(_._1 == "Template").get._2
loc.packageName shouldBe "com.a.b"
loc.className shouldBe "Kammy"
loc.fullClassName shouldBe "com.a.b.Kammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"should use <none> method name" - {
"for class constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n class Tammy { val name = Symbol(\\"sam\\") } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Tammy"
loc.fullClassName shouldBe "com.b.Tammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"for object constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n object Yammy { val name = Symbol(\\"sam\\") } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Yammy"
loc.fullClassName shouldBe "com.b.Yammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
"for trait constructor body" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.b \\n trait Wammy { val name = Symbol(\\"sam\\") } ")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.b"
loc.className shouldBe "Wammy"
loc.fullClassName shouldBe "com.b.Wammy"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Trait
loc.sourcePath should endWith(".scala")
}
}
"anon class should report enclosing class" in {
val compiler = ScoverageCompiler.locationCompiler
compiler
.compile(
"package com.a; object A { def foo(b : B) : Unit = b.invoke }; trait B { def invoke : Unit }; class C { A.foo(new B { def invoke = () }) }")
val loc = compiler.locations.result().filter(_._1 == "Template").last._2
loc.packageName shouldBe "com.a"
loc.className shouldBe "C"
loc.fullClassName shouldBe "com.a.C"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"anon class implemented method should report enclosing method" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile(
"package com.a; object A { def foo(b : B) : Unit = b.invoke }; trait B { def invoke : Unit }; class C { A.foo(new B { def invoke = () }) }")
val loc = compiler.locations.result().filter(_._1 == "DefDef").last._2
loc.packageName shouldBe "com.a"
loc.className shouldBe "C"
loc.fullClassName shouldBe "com.a.C"
loc.method shouldBe "invoke"
loc.classType shouldBe ClassType.Class
loc.sourcePath should endWith(".scala")
}
"doubly nested classes should report correct fullClassName" in {
val compiler = ScoverageCompiler.locationCompiler
compiler.compile("package com.a \\n object Foo { object Boo { object Moo { val name = Symbol(\\"sam\\") } } }")
val loc = compiler.locations.result().find(_._1 == "ValDef").get._2
loc.packageName shouldBe "com.a"
loc.className shouldBe "Moo"
loc.fullClassName shouldBe "com.a.Foo.Boo.Moo"
loc.method shouldBe "<none>"
loc.classType shouldBe ClassType.Object
loc.sourcePath should endWith(".scala")
}
}
}
|
gslowikowski/scalac-scoverage-plugin
|
scalac-scoverage-plugin/src/test/scala/scoverage/LocationTest.scala
|
Scala
|
apache-2.0
| 10,191 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.ner
import cc.factorie.app.nlp.lemma.{LowercaseTokenLemma, LowercaseLemmatizer}
import cc.factorie.app.nlp.lexicon.{LexiconsProvider, StaticLexicons}
import cc.factorie.util._
import java.io._
import cc.factorie._
import cc.factorie.app.chain._
import cc.factorie.app.nlp._
import cc.factorie.app.nlp.embeddings._
import cc.factorie.app.strings._
import cc.factorie.model.DotFamilyWithStatistics2
import cc.factorie.optimize.{AdaGrad, ParameterAveraging}
import cc.factorie.util.{BinarySerializer, CmdOptions, HyperparameterMain, JavaHashMap}
import cc.factorie.variable._
import cc.factorie.optimize.Trainer
import cc.factorie.la.WeightsMapAccumulator
import scala.reflect.{ClassTag, classTag}
import scala.collection.mutable.ListBuffer
import scala.io._
import scala.math.round
class TokenSequence[T<:NerTag](token: Token)(implicit m: ClassTag[T]) extends collection.mutable.ArrayBuffer[Token] {
this.prepend(token)
val label : String = token.attr[T].categoryValue.split("-")(1)
def key = this.mkString("-")
}
abstract class StackedChainNer[L<:NerTag](labelDomain: CategoricalDomain[String],
newLabel: (Token, String) => L,
labelToToken: L => Token,
embeddingMap: SkipGramEmbedding,
embeddingDim: Int,
scale: Double,
useOffsetEmbedding: Boolean,
modelIs: InputStream=null,
nerLexiconFeatures: NerLexiconFeatures)(implicit m: ClassTag[L]) extends DocumentAnnotator with Serializable {
val FEATURE_PREFIX_REGEX = "^[^@]*$".r
val ALPHA_REGEX = "[A-Za-z]+".r
object NERModelOpts {
val argsList = new scala.collection.mutable.HashMap[String, String]()
argsList += ("scale" -> scale.toString)
argsList += ("embeddingDim" -> embeddingDim.toString)
}
def process(document:Document) =
if(document.tokenCount > 0) {
if (!document.tokens.head.attr.contains(m.runtimeClass))
document.tokens.map(token => token.attr += newLabel(token, "O"))
if (!document.tokens.head.attr.contains(classOf[ChainNerFeatures])) {
document.tokens.map(token => {token.attr += new ChainNerFeatures(token)})
initFeatures(document,(t:Token)=>t.attr[ChainNerFeatures])
}
process(document, useModel2 = false)
if (!document.tokens.head.attr.contains(classOf[ChainNer2Features])) {
document.tokens.map(token => token.attr += new ChainNer2Features(token))
initFeatures(document,(t:Token)=>t.attr[ChainNer2Features])
initSecondaryFeatures(document)
}
process(document,useModel2 = true)
for (token <- document.tokens) {
token.attr.remove[ChainNerFeatures]
token.attr.remove[ChainNer2Features]
}
document
} else {
document
}
val prereqAttrs = Seq(classOf[Sentence])
val postAttrs = Seq(m.runtimeClass)
def tokenAnnotationString(token: Token) = token.attr[NerTag].categoryValue
object ChainNer2FeaturesDomain extends CategoricalVectorDomain[String]
class ChainNer2Features(val token:Token) extends BinaryFeatureVectorVariable[String] {
def domain = ChainNer2FeaturesDomain
override def skipNonCategories = true
}
object ChainNerFeaturesDomain extends CategoricalVectorDomain[String]
class ChainNerFeatures(val token:Token) extends BinaryFeatureVectorVariable[String] {
def domain = ChainNerFeaturesDomain
override def skipNonCategories = true
}
class StackedChainNereModel[Features <: CategoricalVectorVar[String]:ClassTag](featuresDomain1:CategoricalVectorDomain[String],
labelToFeatures1:L=>Features,
labelToToken1:L=>Token,
tokenToLabel1:Token=>L)
extends ChainModel(labelDomain, featuresDomain1, labelToFeatures1, labelToToken1, tokenToLabel1) with Parameters {
// Factor for embedding of observed token
val embedding = new DotFamilyWithStatistics2[L, EmbeddingVariable] {
val weights = Weights(new la.DenseTensor2(labelDomain.size, embeddingDim))
}
val embeddingPrev = new DotFamilyWithStatistics2[L, EmbeddingVariable] {
val weights = Weights(new la.DenseTensor2(labelDomain.size, embeddingDim))
}
val embeddingNext = new DotFamilyWithStatistics2[L, EmbeddingVariable] {
val weights = Weights(new la.DenseTensor2(labelDomain.size, embeddingDim))
}
override def factors(variables:Iterable[Var]): Iterable[Factor] = {
val result = new ListBuffer[Factor]
variables match {
case labels: Iterable[L] if variables.forall(v => classTag[L].runtimeClass.isAssignableFrom(v.getClass)) =>
var prevLabel: L = null.asInstanceOf[L]
for (label <- labels) {
result += bias.Factor(label)
result += obs.Factor(labelToFeatures(label), label)
if (prevLabel ne null) {
result += markov.Factor(prevLabel, label)
if (useObsMarkov) result += obsmarkov.Factor(prevLabel, label, labelToFeatures(label))
}
val scale = NERModelOpts.argsList("scale").toDouble
if (embeddingMap != null ) {
if (embeddingMap.contains(labelToToken(label).string)) result += embedding.Factor(label, new EmbeddingVariable(embeddingMap(labelToToken(label).string) * scale))
if (useOffsetEmbedding && labelToToken(label).sentenceHasPrev && embeddingMap.contains(labelToToken(label).prev.string)) result += embeddingPrev.Factor(label, new EmbeddingVariable(embeddingMap(labelToToken(label).prev.string) * scale))
if (useOffsetEmbedding && labelToToken(label).sentenceHasNext && embeddingMap.contains(labelToToken(label).next.string)) result += embeddingNext.Factor(label, new EmbeddingVariable(embeddingMap(labelToToken(label).next.string) * scale))
}
prevLabel = label
}
}
result
}
override def getLocalScores(varying: Seq[L]): Array[DenseTensor1] = {
val biasScores = bias.weights.value
val obsWeights = obs.weights.value
val a = Array.fill[DenseTensor1](varying.size)(null)
var i = 0
while (i < varying.length) {
val scores = obsWeights.leftMultiply(labelToFeatures(varying(i)).value.asInstanceOf[Tensor1]).asInstanceOf[DenseTensor1]
scores += biasScores
if (embeddingMap != null) {
if (embeddingMap.contains(labelToToken(varying(i)).string)) scores += embedding.weights.value * embeddingMap(labelToToken(varying(i)).string)
if (i >= 1 && embeddingMap.contains(labelToToken(varying(i-1)).string)) scores += embeddingPrev.weights.value * embeddingMap(labelToToken(varying(i-1)).string)
if (i < varying.length-1 && embeddingMap.contains(labelToToken(varying(i+1)).string)) scores += embeddingNext.weights.value * embeddingMap(labelToToken(varying(i+1)).string)
}
a(i) = scores
i += 1
}
a
}
override def accumulateExtraObsGradients(gradient: WeightsMapAccumulator, obs: Tensor1, position: Int, labels: Seq[L]): Unit = {
if (embeddingMap ne null) {
if (embeddingMap.contains(labelToToken(labels(position)).string)) gradient.accumulate(embedding.weights, obs outer embeddingMap(labelToToken(labels(position)).string))
if (position >= 1 && embeddingMap.contains(labelToToken(labels(position-1)).string)) gradient.accumulate(embeddingPrev.weights, obs outer embeddingMap(labelToToken(labels(position-1)).string))
if (position < labels.length-1 && embeddingMap.contains(labelToToken(labels(position+1)).string)) gradient.accumulate(embeddingNext.weights, obs outer embeddingMap(labelToToken(labels(position+1)).string))
}
}
}
val model = new StackedChainNereModel[ChainNerFeatures](ChainNerFeaturesDomain, l => labelToToken(l).attr[ChainNerFeatures], labelToToken, t => t.attr[L])
val model2 = new StackedChainNereModel[ChainNer2Features](ChainNer2FeaturesDomain, l => labelToToken(l).attr[ChainNer2Features], labelToToken, t => t.attr[L])
val objective = cc.factorie.variable.HammingObjective //new HammingTemplate[LabeledMutableDiscreteVar]()
if (modelIs != null) {
deSerialize(modelIs)
// freeze!
ChainNerFeaturesDomain.freeze()
ChainNer2FeaturesDomain.freeze()
println("Found model")
}
else {
println("model not found")
}
println("Model info: scale= "+ NERModelOpts.argsList("scale").toDouble)
def serialize(stream: OutputStream) {
import cc.factorie.util.CubbieConversions._
val is = new DataOutputStream(new BufferedOutputStream(stream))
BinarySerializer.serialize(ChainNerFeaturesDomain.dimensionDomain, is)
BinarySerializer.serialize(ChainNer2FeaturesDomain.dimensionDomain, is)
BinarySerializer.serialize(NERModelOpts.argsList, is)
BinarySerializer.serialize(model, is)
BinarySerializer.serialize(model2, is)
is.close()
}
def deSerialize(stream: InputStream) {
import cc.factorie.util.CubbieConversions._
val is = new DataInputStream(new BufferedInputStream(stream))
BinarySerializer.deserialize(ChainNerFeaturesDomain.dimensionDomain, is)
BinarySerializer.deserialize(ChainNer2FeaturesDomain.dimensionDomain, is)
BinarySerializer.deserialize(NERModelOpts.argsList, is)
BinarySerializer.deserialize(model, is)
BinarySerializer.deserialize(model2, is)
is.close()
}
var aggregate = false
var twoStage = false
val clusters = new scala.collection.mutable.HashMap[String,String]
var count = 0
var didagg = false
var bP = false
var ss = 10.0
def prefix( prefixSize : Int, cluster : String ) : String = if(cluster.length > prefixSize) cluster.substring(0, prefixSize) else cluster
def addContextFeatures[A<:Observation[A]](t : Token, from : Token, vf:Token=>CategoricalVectorVar[String]) : Unit = {
val prevWindow = from.prevWindow(2).zipWithIndex
val nextWindow = from.nextWindow(2).zipWithIndex
vf(t) ++= prevWindow.map { case (t2, idx) =>
if (clusters.contains(t2.string)) vf(t) += ("CONTEXTPATH="+prefix(4, clusters(t2.string)) + ("@-" + idx.toString))
"CONTEXT="+simplifyDigits(t2.string).toLowerCase + "@-" + idx
}
vf(t) ++= nextWindow.map { case (t2, idx) =>
if (clusters.contains(t2.string)) vf(t) += ("CONTEXTPATH="+prefix(4, clusters(t2.string)) + ("@" + idx.toString))
"CONTEXT="+simplifyDigits(t2.string).toLowerCase + "@" + idx
}
}
def aggregateContext[A<:Observation[A]](token : Token, vf:Token=>CategoricalVectorVar[String]) : Unit = {
var count = 0
var compareToken : Token = token
while(count < 200 && compareToken.hasPrev) {
count += 1
compareToken = compareToken.prev
if(token.string.toLowerCase == compareToken.string.toLowerCase)
addContextFeatures(token, compareToken, vf)
}
count = 0
compareToken = token
while(count < 200 && compareToken.hasNext) {
count += 1
compareToken = compareToken.next
if(token.string.toLowerCase == compareToken.string.toLowerCase)
addContextFeatures(token, compareToken, vf)
}
}
// val bos: BufferedOutputStream = new BufferedOutputStream(new FileOutputStream("features.txt"), 10000)
// val out: PrintStream = new PrintStream(bos, true)
//
// for (token <- document.tokens) {
// val features: ChainNerFeatures = token.attr[ChainNerFeatures]
// if(features != null && features.activeCategories.size > 0) {
// val feats: Seq[String] = features.activeCategories.sortWith(_ < _)
// out.println(document.name+":"+token.position+"="+feats.mkString(", "))
// }
// }
def initFeatures(document:Document, vf:Token=>CategoricalVectorVar[String]): Unit = {
count=count+1
val tokenSequence = document.tokens.toIndexedSeq
//One pass of lemmatising, this should be the same lemmatiser as the one used to construct the lexicon.
LowercaseLemmatizer.process(document)
nerLexiconFeatures.addLexiconFeatures(tokenSequence, vf)
import cc.factorie.app.strings.simplifyDigits
for (token <- document.tokens) {
val features = vf(token)
val rawWord = token.string
val word = simplifyDigits(rawWord).toLowerCase
features += "W="+word
//if (token.isCapitalized) features += "CAPITALIZED"
//else features += "NOTCAPITALIZED"
features += "SHAPE="+cc.factorie.app.strings.stringShape(rawWord, 2)
if (word.length > 5) { features += "P="+cc.factorie.app.strings.prefix(word, 4); features += "S="+cc.factorie.app.strings.suffix(word, 4) }
if (token.isPunctuation) features += "PUNCTUATION"
if (clusters.nonEmpty && clusters.contains(rawWord)) {
features += "CLUS="+prefix(4,clusters(rawWord))
features += "CLUS="+prefix(6,clusters(rawWord))
features += "CLUS="+prefix(10,clusters(rawWord))
features += "CLUS="+prefix(20,clusters(rawWord))
}
}
for (sentence <- document.sentences) {
cc.factorie.app.chain.Observations.addNeighboringFeatures(sentence.tokens,vf,FEATURE_PREFIX_REGEX,-2,2)
}
val tokenBuffer = new CircularBuffer[CategoricalVectorVar[String]](4)
val stringBuffer = new CircularBuffer[String](4)
// This is a separate iteration as combining them would be semantically different due to addNeighbouringFeatures().
for (token <- document.tokens) {
val tokenStr = token.string
val tokenFeatures = vf(token)
if (ALPHA_REGEX.findFirstIn(tokenStr).nonEmpty) {
tokenFeatures ++= token.charNGrams(2,5).map(n => "NGRAM="+n)
}
val simpleLowerStr = simplifyDigits(tokenStr).toLowerCase()
val nextStr = "NEXTWINDOW="+simpleLowerStr
// Add features from window of 4 words before and after
var i = 0
while (i < 4) {
val curTok = tokenBuffer(i)
if (curTok != null) {
curTok += nextStr // add next window feature to the token history
}
val prevStr = stringBuffer(i)
if (prevStr != null) {
tokenFeatures += prevStr // add previous window feature to the current token
}
i += 1
}
tokenBuffer += vf(token)
stringBuffer += "PREVWINDOW="+simpleLowerStr
}
if(aggregate) document.tokens.foreach( aggregateContext(_, vf) )
}
def mode(list : List[String]) : String = {
val domainCount = new collection.mutable.HashMap[String, Int]
for(item <- list) {
if(domainCount.contains(item)) domainCount(item) = domainCount(item) + 1
else domainCount(item) = 1
}
var maxDomain = ""
var maxCount = 0
for(domain <- domainCount.keys) {
if(domainCount(domain) > maxCount) {
maxCount = domainCount(domain)
maxDomain = domain
}
}
maxDomain
}
def getSequences(document : Document) : List[TokenSequence[L]] = {
var sequences = List[TokenSequence[L]]()
var seq : TokenSequence[L] = null
for(token <- document.tokens) {
val categoryVal = token.attr[L].categoryValue
if(categoryVal.length() > 0) {
categoryVal.substring(0,1) match {
case "B" => seq = new TokenSequence[L](token)
case "I" => if (seq != null) seq.append(token) else seq = new TokenSequence[L](token)
case "U" => seq = new TokenSequence[L](token)
case "L" => if (seq != null) seq.append(token) else seq = new TokenSequence[L](token)
case _ => null
}
if(categoryVal.matches("(L|U)-\\\\D+")) sequences = seq :: sequences
}
}
sequences
}
def allSubstrings(seq: TokenSequence[L], length : Int) : List[String] = {
if(length == 0) return List[String]()
var list = List[String]()
for(i <- 0 to seq.length-length) {
var sub = ""
for(k <- i until i+length) {
sub += " " + seq(k).string
}
list = sub :: list
}
allSubstrings(seq, length-1) ::: list
}
def initSecondaryFeatures(document:Document, extraFeatures : Boolean = false): Unit = {
for(t <- document.tokens) {
val tokenPrevWindow = t.prevWindow(2)
t.attr[ChainNer2Features] ++= tokenPrevWindow.zipWithIndex.map(t2 => "PREVLABEL" + t2._2 + "="+t2._1.attr[L].categoryValue)
if (t.hasPrev) {
t.attr[ChainNer2Features] += "PREVLABELCON=" + t.prev.attr[L].categoryValue + "&" + t.string
}
if (t.sentenceHasPrev) {
t.attr[ChainNer2Features] ++= tokenPrevWindow.map(t2 => "PREVLABELLCON=" + t.sentencePrev.attr[L].categoryValue + "&" + t2.string)
t.attr[ChainNer2Features] ++= t.nextWindow(2).map(t2 => "PREVLABELLCON=" + t.sentencePrev.attr[L].categoryValue + "&" + t2.string)
}
}
if(extraFeatures) {
val sequences = getSequences(document)
val tokenToLabelMap = JavaHashMap[String,List[String]]()
val sequenceToLabelMap = JavaHashMap[String,List[String]]()
val subsequencesToLabelMap = JavaHashMap[String,List[String]]()
for (token <- document.tokens) {
if(tokenToLabelMap.contains(token.string))
tokenToLabelMap(token.string) = tokenToLabelMap(token.string) ++ List(token.attr[L].categoryValue)
else
tokenToLabelMap(token.string) = List(token.attr[L].categoryValue)
}
for (seq <- sequences) {
if(sequenceToLabelMap.contains(seq.key))
sequenceToLabelMap(seq.key) = sequenceToLabelMap(seq.key) ++ List(seq.label)
else
sequenceToLabelMap(seq.key) = List(seq.label)
}
for (seq <- sequences) {
for(subseq <- allSubstrings(seq, seq.length)) {
if(subsequencesToLabelMap.contains(subseq))
subsequencesToLabelMap(subseq) = subsequencesToLabelMap(subseq) ++ List(seq.label)
else
subsequencesToLabelMap(seq.key) = List(seq.label)
}
}
for (token <- document.tokens) {
val tokenVote = tokenToLabelMap(token.string)
token.attr[ChainNer2Features] += "CLASSIFIERLABEL="+mode(tokenVote)
}
for(seq <- sequences) {
val seqVote = sequenceToLabelMap(seq.key)
val seqLabelMode = mode(seqVote)
val subSeqVote = subsequencesToLabelMap(seq.key)
val subSeqLabelMode = mode(subSeqVote)
for(token <- seq) {
token.attr[ChainNer2Features] += "SEQUENCELABEL="+seqLabelMode
token.attr[ChainNer2Features] += "SUBSEQUENCELABEL="+subSeqLabelMode
}
}
}
val extendedPrediction = JavaHashMap[String, collection.mutable.Map[String,Int]]()
val surfaceFormCount = JavaHashMap[String,Int]()
for(token <- document.tokens) {
val tokenStr = token.string
if(extendedPrediction.contains(tokenStr)) {
labelDomain.categories.foreach(str => token.attr[ChainNer2Features] += str + "=" + history(extendedPrediction(token.string).getOrElse(str,0), surfaceFormCount.getOrElse(tokenStr,0)) )
val map = extendedPrediction(tokenStr)
val count = map.getOrElse(token.attr[L].categoryValue,0) + 1
map.put(token.attr[L].categoryValue,count)
surfaceFormCount.put(tokenStr,surfaceFormCount.getOrElse(tokenStr,0) + 1)
} else {
val map = JavaHashMap[String,Int]()
map.put(token.attr[L].categoryValue,1)
extendedPrediction.put(tokenStr,map)
surfaceFormCount.put(tokenStr,1)
}
}
if (clusters.nonEmpty) {
for(token <- document.tokens) {
val rawWord = token.string
if(token.hasPrev) {
if(clusters.contains(rawWord))
token.attr[ChainNer2Features] ++= List(4,6,10,20).map("BROWNCON="+token.prev.attr[L].categoryValue + "&" + prefix(_,clusters(rawWord)))
if(token.hasNext) {
var nextRawWord = token.next.string
if(clusters.contains(nextRawWord))
token.attr[ChainNer2Features] ++= List(4,6,10,20).map("BROWNCON="+token.prev.attr[L].categoryValue + "&" + prefix(_,clusters(nextRawWord)))
if(token.next.hasNext && clusters.contains(token.next.next.string)) {
nextRawWord = token.next.next.string
token.attr[ChainNer2Features] ++= List(4,6,10,20).map("BROWNCON="+token.prev.attr[L].categoryValue + "&" + prefix(_,clusters(nextRawWord)))
}
}
var prevRawWord = token.prev.string
if(clusters.contains(prevRawWord))
token.attr[ChainNer2Features] ++= List(4,6,10,20).map("BROWNCON="+token.prev.attr[L].categoryValue + "&" + prefix(_,clusters(prevRawWord)))
if(token.prev.hasPrev && clusters.contains(token.prev.prev.string)) {
prevRawWord = token.prev.prev.string
token.attr[ChainNer2Features] ++= List(4,6,10,20).map("BROWNCON="+token.prev.attr[L].categoryValue + "&" + prefix(_,clusters(prevRawWord)))
}
}
}
}
}
object EmbeddingDomain extends DiscreteDomain(NERModelOpts.argsList("embeddingDim").toInt)
class EmbeddingVariable(t:la.Tensor1) extends VectorVariable(t) { def domain = EmbeddingDomain }
object EmbeddingDomain2 extends DiscreteDomain(EmbeddingDomain.size * EmbeddingDomain.size)
class EmbeddingVariable2(t:la.Tensor1) extends VectorVariable(t) { def domain = EmbeddingDomain2 }
def history(list : List[String], category : String) : String = {
(round( 10.0 * ((list.count(_ == category).toDouble / list.length.toDouble)/3)) / 10.0).toString
}
def history(count : Int, total : Int) : String = {
(round( 10.0 * ((count.toDouble / total)/3.0)) / 10.0).toString
}
def train(trainDocuments: Seq[Document],testDocuments: Seq[Document], rate: Double, delta: Double): Double = {
implicit val random = new scala.util.Random(0)
// Read in the data
// Add features for NER \\
println("Initializing training features")
(trainDocuments ++ testDocuments).foreach(_.tokens.map(token => token.attr += new ChainNerFeatures(token)))
trainDocuments.foreach(initFeatures(_,(t:Token)=>t.attr[ChainNerFeatures]))
ChainNerFeaturesDomain.freeze()
println("Initializing testing features")
testDocuments.foreach(initFeatures(_,(t:Token)=>t.attr[ChainNerFeatures]))
if (embeddingMap != null) println("StackedChainNer #tokens with no embedding %d/%d".format(trainDocuments.flatMap(_.tokens.filter(t => !embeddingMap.contains(t.string))).size, trainDocuments.map(_.tokens.size).sum))
println("StackedChainNer #tokens with no brown clusters assigned %d/%d".format(trainDocuments.flatMap(_.tokens.filter(t => !clusters.contains(t.string))).size, trainDocuments.map(_.tokens.size).sum))
val trainLabels = trainDocuments.flatMap(_.tokens).map(_.attr[L with LabeledMutableDiscreteVar]) //.take(100)
val testLabels = testDocuments.flatMap(_.tokens).map(_.attr[L with LabeledMutableDiscreteVar]) //.take(20)
val vars = for(td <- trainDocuments; sentence <- td.sentences if sentence.length > 1) yield sentence.tokens.map(_.attr[L with LabeledMutableDiscreteVar])
val examples = vars.map(v => new model.ChainLikelihoodExample(v.toSeq))
println("Training with " + examples.length + " examples")
Trainer.onlineTrain(model.parameters, examples, optimizer=new AdaGrad(rate=rate, delta=delta) with ParameterAveraging, useParallelTrainer=false)
trainDocuments.foreach(process(_, useModel2=false))
testDocuments.foreach(process(_, useModel2=false))
printEvaluation(trainDocuments, testDocuments, "FINAL 1")
(trainDocuments ++ testDocuments).foreach( _.tokens.map(token => token.attr += new ChainNer2Features(token)))
for(document <- trainDocuments) initFeatures(document, (t:Token)=>t.attr[ChainNer2Features])
for(document <- trainDocuments) initSecondaryFeatures(document)
ChainNer2FeaturesDomain.freeze()
for(document <- testDocuments) initFeatures(document, (t:Token)=>t.attr[ChainNer2Features])
for(document <- testDocuments) initSecondaryFeatures(document)
//println(trainDocuments(3).tokens.map(token => token.nerTag.target.categoryValue + " "+token.string+" "+token.attr[ChainNer2Features].toString).mkString("\\n"))
//println("Example Test Token features")
//println(testDocuments(1).tokens.map(token => token.nerTag.baseCategoryValue+" "+token.string+" "+token.attr[ChainNer2Features].toString).mkString("\\n"))
(trainLabels ++ testLabels).foreach(_.setRandomly)
val vars2 = for(td <- trainDocuments; sentence <- td.sentences if sentence.length > 1) yield sentence.tokens.map(_.attr[L with LabeledMutableDiscreteVar])
val examples2 = vars2.map(v => new model2.ChainLikelihoodExample(v.toSeq))
Trainer.onlineTrain(model2.parameters, examples2, optimizer=new AdaGrad(rate=rate, delta=delta) with ParameterAveraging, useParallelTrainer=false)
trainDocuments.foreach(process)
testDocuments.foreach(process)
printEvaluation(trainDocuments, testDocuments, "FINAL")
}
def test(testDocs: Seq[Document]): (Double, Double, Double) = {
var tokenTotal = 0.0
var sentenceTotal = 0.0
val t0 = System.currentTimeMillis()
val segmentEvaluation = new cc.factorie.app.chain.SegmentEvaluation[L with LabeledMutableCategoricalVar[String]](labelDomain.categories.filter(_.length > 2).map(_.substring(2)), "(B|U)-", "(I|L)-")
testDocs.foreach(doc => {
process(doc)
for(sentence <- doc.sentences) segmentEvaluation += sentence.tokens.map(_.attr[L with LabeledMutableCategoricalVar[String]])
sentenceTotal += doc.sentenceCount
tokenTotal += doc.tokenCount
})
val totalTime = System.currentTimeMillis() - t0
val sentencesPerSecond = (sentenceTotal / totalTime) * 1000.0
val tokensPerSecond = (tokenTotal / totalTime) * 1000.0
(sentencesPerSecond, tokensPerSecond, segmentEvaluation.f1)
}
def printEvaluation(testDocuments:Iterable[Document]): Double = {
val test = evaluationString(testDocuments)
println(test)
test
}
def printEvaluation(trainDocuments:Iterable[Document], testDocuments:Iterable[Document], iteration:String): Double = {
println("TRAIN")
println(evaluationString(trainDocuments))
println("TEST")
val test = evaluationString(testDocuments)
println(test)
test
}
def evaluationString(documents: Iterable[Document]): Double = {
val buf = new StringBuffer
buf.append(new LabeledDiscreteEvaluation(documents.flatMap(_.tokens.map(_.attr[L with LabeledMutableDiscreteVar]))))
val segmentEvaluation = new cc.factorie.app.chain.SegmentEvaluation[L with LabeledMutableCategoricalVar[String]](labelDomain.categories.filter(_.length > 2).map(_.substring(2)), "(B|U)-", "(I|L)-")
for (doc <- documents; sentence <- doc.sentences) segmentEvaluation += sentence.tokens.map(_.attr[L with LabeledMutableCategoricalVar[String]])
println("Segment evaluation")
println(segmentEvaluation)
segmentEvaluation.f1
}
def process(document:Document, useModel2 : Boolean): Unit = {
if (document.tokenCount == 0) return
for(sentence <- document.sentences if sentence.tokens.nonEmpty) {
val vars = sentence.tokens.map(_.attr[L]).toSeq
(if (useModel2) model2 else model).maximize(vars)(null)
}
}
}
class ConllStackedChainNer(embeddingMap: SkipGramEmbedding,
embeddingDim: Int,
scale: Double,
useOffsetEmbedding: Boolean)(implicit mp:ModelProvider[ConllStackedChainNer], nerLexiconFeatures:NerLexiconFeatures)
extends StackedChainNer[BilouConllNerTag](
BilouConllNerDomain,
(t, s) => new BilouConllNerTag(t, s),
l => l.token,
embeddingMap,
embeddingDim,
scale,
useOffsetEmbedding,
mp.provide, nerLexiconFeatures)
//object ConllStackedChainNer extends ConllStackedChainNer(SkipGramEmbedding, 100, 1.0, true, ClasspathURL[ConllStackedChainNer](".factorie"))
class NoEmbeddingsConllStackedChainNer()(implicit mp:ModelProvider[NoEmbeddingsConllStackedChainNer], nerLexiconFeatures:NerLexiconFeatures) extends ConllStackedChainNer(null, 0, 0.0, false)(mp, nerLexiconFeatures) with Serializable
object NoEmbeddingsConllStackedChainNer extends NoEmbeddingsConllStackedChainNer()(ModelProvider.classpath(), StaticLexiconFeatures()) with Serializable
class OntonotesStackedChainNer(embeddingMap: SkipGramEmbedding,
embeddingDim: Int,
scale: Double,
useOffsetEmbedding: Boolean)(implicit mp:ModelProvider[OntonotesStackedChainNer], nerLexiconFeatures:NerLexiconFeatures)
extends StackedChainNer[BilouOntonotesNerTag](
BilouOntonotesNerDomain,
(t, s) => new BilouOntonotesNerTag(t, s),
l => l.token,
embeddingMap,
embeddingDim,
scale,
useOffsetEmbedding,
mp.provide, nerLexiconFeatures)
class NoEmbeddingsOntonotesStackedChainNer()(implicit mp:ModelProvider[NoEmbeddingsOntonotesStackedChainNer], nerLexiconFeatures: NerLexiconFeatures) extends OntonotesStackedChainNer(null, 0, 0.0, false)(mp, nerLexiconFeatures) with Serializable
object NoEmbeddingsOntonotesStackedChainNer extends NoEmbeddingsOntonotesStackedChainNer()(ModelProvider.classpath(), StaticLexiconFeatures()) with Serializable
class StackedChainNerOpts extends CmdOptions with SharedNLPCmdOptions{
val trainFile = new CmdOption("train", "eng.train", "FILE", "CoNLL formatted training file.")
val testFile = new CmdOption("test", "eng.testb", "FILE", "CoNLL formatted test file.")
val dataLoader = new CmdOption("data-loader", "conll2003", "STRING", "Data loader for this format.")
val encoding = new CmdOption("encoding", "UTF-8", "STRING", "Encoding of input files.")
val modelDir = new CmdOption[File]("model", new File("StackedNER.factorie"), "FILE", "File for saving or loading model.")
val runXmlDir = new CmdOption("run-xml", "xml", "DIR", "Directory for reading NYTimes XML data on which to run saved model.")
val brownClusFile = new CmdOption("brown", "", "FILE", "File containing brown clusters.")
val aggregateTokens = new CmdOption("aggregate", true, "BOOLEAN", "Turn on context aggregation feature.")
val rate = new CmdOption("rate", 0.18, "DOUBLE", "Learning rate")
val delta = new CmdOption("delta", 0.066, "DOUBLE", "Learning delta")
val saveModel = new CmdOption("save-model", false, "BOOLEAN", "Whether to save the model")
val runOnlyHere = new CmdOption("runOnlyHere", false, "BOOLEAN", "Run Experiments only on this machine")
val embeddingDir = new CmdOption("embeddingDir", "", "STRING", "location of embedding file")
val embeddingDim = new CmdOption("embeddingDim", 100, "INT", "embedding dimension")
val embeddingScale = new CmdOption("embeddingScale", 10.0, "FLOAT", "The scale of the embeddings")
val useOffsetEmbedding = new CmdOption("useOffsetEmbeddings", true, "BOOLEAN", "Whether to use offset embeddings")
val lang = new CmdOption("language", "en", "STRING", "Lexicons language.")
}
object ConllStackedChainNerTester extends App {
val opts = new StackedChainNerOpts
opts.parse(args)
val ner =
if(opts.modelDir.wasInvoked)
new ConllStackedChainNer(null: SkipGramEmbedding, opts.embeddingDim.value, opts.embeddingScale.value, opts.useOffsetEmbedding.value)(opts.modelDir.value.toURI.toURL, StaticLexiconFeatures(opts.lang.value))
else NoEmbeddingsConllStackedChainNer
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value else 1.0
val dataLoader = opts.dataLoader.value match {
case "conll2003" => load.LoadConll2003(BILOU=true)
case "conll2002" => load.LoadConll2002(BILOU=true)
}
val testDocsFull = dataLoader.fromFilename(opts.testFile.value, encoding = opts.encoding.value)
val testDocs = testDocsFull.take((testDocsFull.length*testPortionToTake).floor.toInt)
println(ner.test(testDocs))
}
object ConllStackedChainNerTrainer extends HyperparameterMain {
def evaluateParameters(args: Array[String]): Double = {
// Parse command-line
val opts = new StackedChainNerOpts
opts.parse(args)
val skipgram = if (opts.embeddingDir.wasInvoked)
new SkipGramEmbedding(opts.embeddingDir.value, opts.embeddingDim.value)
else
null
val ner = new ConllStackedChainNer(skipgram: SkipGramEmbedding, opts.embeddingDim.value, opts.embeddingScale.value, opts.useOffsetEmbedding.value)(ModelProvider.empty, StaticLexiconFeatures(opts.lang.value))
ner.aggregate = opts.aggregateTokens.wasInvoked
if (opts.brownClusFile.wasInvoked) {
println("Reading brown cluster file " + opts.brownClusFile.value)
for(line <- Source.fromFile(opts.brownClusFile.value).getLines()){
val splitLine = line.split("\\t")
ner.clusters(splitLine(1)) = splitLine(0)
}
}
val trainPortionToTake = if(opts.trainPortion.wasInvoked) opts.trainPortion.value else 1.0
val testPortionToTake = if(opts.testPortion.wasInvoked) opts.testPortion.value else 1.0
val dataLoader = opts.dataLoader.value match {
case "conll2003" => load.LoadConll2003(BILOU=true)
case "conll2002" => load.LoadConll2002(BILOU=true)
}
val trainDocsFull = dataLoader.fromFilename(opts.trainFile.value, encoding = opts.encoding.value)
val testDocsFull = dataLoader.fromFilename(opts.testFile.value, encoding = opts.encoding.value)
val trainDocs = trainDocsFull.take((trainDocsFull.length*trainPortionToTake).floor.toInt)
val testDocs = testDocsFull.take((testDocsFull.length*testPortionToTake).floor.toInt)
val result = ner.train(trainDocs,testDocs, opts.rate.value, opts.delta.value)
if (opts.saveModel.value) {
ner.serialize(new FileOutputStream(opts.modelDir.value))
}
if(opts.targetAccuracy.wasInvoked) cc.factorie.assertMinimalAccuracy(result,opts.targetAccuracy.value.toDouble)
result
}
}
object ConllStackedChainNerOptimizer {
def main(args: Array[String]) {
val opts = new StackedChainNerOpts
opts.parse(args)
opts.saveModel.setValue(false)
if (opts.runOnlyHere.value) {
opts.saveModel.setValue(true)
val result = ConllStackedChainNerTrainer.evaluateParameters(args)
println("result: "+ result)
}
else {
val rate = cc.factorie.util.HyperParameter(opts.rate, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
val delta = cc.factorie.util.HyperParameter(opts.delta, new cc.factorie.util.LogUniformDoubleSampler(1e-4, 1e4))
/*
val ssh = new cc.factorie.util.SSHActorExecutor("apassos",
Seq("avon1", "avon2"),
"/home/apassos/canvas/factorie-test",
"try-log/",
"cc.factorie.app.nlp.parse.DepParser2",
10, 5)
*/
val qs = new cc.factorie.util.QSubExecutor(60, "cc.factorie.app.nlp.ner.ConllStackedChainNerTrainer")
val optimizer = new cc.factorie.util.HyperParameterSearcher(opts, Seq(rate, delta), qs.execute, 200, 180, 60)
val result = optimizer.optimize()
println("Got results: " + result.mkString(" "))
opts.saveModel.setValue(true)
println("Running best configuration...")
import scala.concurrent.Await
import scala.concurrent.duration._
Await.result(qs.execute(opts.values.flatMap(_.unParse).toArray), 5.hours)
println("Done")
}
}
}
|
Craigacp/factorie
|
src/main/scala/cc/factorie/app/nlp/ner/StackedChainNer.scala
|
Scala
|
apache-2.0
| 36,237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
/**
* Test suite for resolving Uuid expressions.
*/
class ResolvedUuidExpressionsSuite extends AnalysisTest {
private lazy val a = 'a.int
private lazy val r = LocalRelation(a)
private lazy val uuid1 = Uuid().as('_uuid1)
private lazy val uuid2 = Uuid().as('_uuid2)
private lazy val uuid3 = Uuid().as('_uuid3)
private lazy val uuid1Ref = uuid1.toAttribute
private val analyzer = getAnalyzer(caseSensitive = true)
private def getUuidExpressions(plan: LogicalPlan): Seq[Uuid] = {
plan.flatMap {
case p =>
p.expressions.flatMap(_.collect {
case u: Uuid => u
})
}
}
test("analyzed plan sets random seed for Uuid expression") {
val plan = r.select(a, uuid1)
val resolvedPlan = analyzer.executeAndCheck(plan)
getUuidExpressions(resolvedPlan).foreach { u =>
assert(u.resolved)
assert(u.randomSeed.isDefined)
}
}
test("Uuid expressions should have different random seeds") {
val plan = r.select(a, uuid1).groupBy(uuid1Ref)(uuid2, uuid3)
val resolvedPlan = analyzer.executeAndCheck(plan)
assert(getUuidExpressions(resolvedPlan).map(_.randomSeed.get).distinct.length == 3)
}
test("Different analyzed plans should have different random seeds in Uuids") {
val plan = r.select(a, uuid1).groupBy(uuid1Ref)(uuid2, uuid3)
val resolvedPlan1 = analyzer.executeAndCheck(plan)
val resolvedPlan2 = analyzer.executeAndCheck(plan)
val uuids1 = getUuidExpressions(resolvedPlan1)
val uuids2 = getUuidExpressions(resolvedPlan2)
assert(uuids1.distinct.length == 3)
assert(uuids2.distinct.length == 3)
assert(uuids1.intersect(uuids2).length == 0)
}
}
|
bravo-zhang/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolvedUuidExpressionsSuite.scala
|
Scala
|
apache-2.0
| 2,757 |
package org.scalameta.collections
trait OverloadHack1
object OverloadHack1 { implicit object Instance extends OverloadHack1 }
trait OverloadHack2
object OverloadHack2 { implicit object Instance extends OverloadHack2 }
trait OverloadHack3
object OverloadHack3 { implicit object Instance extends OverloadHack3 }
trait OverloadHack4
object OverloadHack4 { implicit object Instance extends OverloadHack4 }
|
beni55/scalameta
|
foundation/src/main/scala/org/scalameta/collections/OverloadHack.scala
|
Scala
|
bsd-3-clause
| 406 |
import java.io.{BufferedInputStream, File}
import java.nio.file.{Files, Path, Paths}
import java.time._
import java.util.TimeZone
import java.util.concurrent.atomic.AtomicBoolean
import burnup._
import cli.CliUtil._
import github.{Issue, IssueEvent, Milestone, GitHubClient}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsArray, JsValue, Json}
import scala.collection.JavaConversions._
import scala.collection.{mutable, Set}
object Main extends App {
val logger = LoggerFactory.getLogger(this.getClass)
// parse the command line options
parser.parse(args, cli.CliArgs()) match {
case Some(config) =>
logger.info(s"Generate milestone burnup charts of https://github.com/${config.owner}/${config.repo}")
logger.info(s"Time Zone: ${config.timeZone.getID}")
// initialize GitHubClient
val gitHubClient = config
.accessToken.map(new GitHubClient(_))
.orElse(for (u <- config.username; p <- config.password) yield new GitHubClient(u, p))
.get
val since = config.since.map(date => ZonedDateTime.of(date, LocalTime.of(0, 0), config.timeZone.toZoneId))
logger.info(s"Fetching issue events${since.map(" since " + _.toString).getOrElse("")} ...")
val issueEventsPath = fetchGitHubIssueEvents(gitHubClient, config.owner, config.repo, since)
// val issueEventsPath = Paths.get("/tmp/xxx") // read local cache
val milestones = fetchGitHubMilestones(gitHubClient, config.owner, config.repo)
logger.info(s"Finished fetching data from GitHub")
// analyze GitHub issue events
val eventProcessor = analyzeIssueEvents(issueEventsPath, milestones)
// set milestone details
val milestonesHistory = eventProcessor.milestoneHistory.toMap
milestonesHistory.foreach { case (title, history) =>
history.milestone = milestones.find(_.title == title)
}
// generate a chart per milestone
generateBurnupCharts(milestonesHistory, config.timeZone, config.outputDir)
case None =>
}
/**
* Fetch /repos/:owner/:repo/issue/events and save into a file
* @return path to the file containing issue events in JSON
*/
def fetchGitHubIssueEvents(
githubClient: GitHubClient, owner: String, repo: String,
till: Option[ZonedDateTime] = Option.empty,
outFile: Path = Files.createTempFile("github-issues", ".json")
): Path = {
val stream = Files.newBufferedWriter(outFile)
try {
val isFirstElement = new AtomicBoolean(true)
stream.write("[")
githubClient
.pagedGet(s"/repos/${owner}/${repo}/issues/events")
.flatMap(_.value)
.takeWhile(j => till.map(_.isBefore(ZonedDateTime.parse((j \\ "created_at").as[String]))).getOrElse(true))
.foreach { json =>
// write ',' if not the first element. note you need to synchronize this block when using parallel collection.
if (isFirstElement.getAndSet(false) == false)
stream.write(",")
stream.write(Json.prettyPrint(json))
}
stream.write("]")
logger.debug(s"Wrote GitHub issue events in ${outFile.toAbsolutePath} temporarily")
outFile
} finally {
stream.close()
}
}
/**
* Fetch /repos/:owner/:repo/milestones?state=all
* @return list of milestones
*/
def fetchGitHubMilestones(githubClient: GitHubClient, owner: String, repo: String): Seq[Milestone] = {
githubClient
.pagedGet(s"/repos/$owner/$repo/milestones", Map("state" -> "all"))
.flatMap(_.value)
.map(new Milestone(_))
.toSeq
}
/**
* @param issueEventsPath path of a file containing the response of 'GET /repos/:owner/:repo/issues/events'
* in the order of the creation
*/
def analyzeIssueEvents(issueEventsPath: Path, milestones: Seq[Milestone]): IssueEventProcessor = {
logger.info(s"Analyzing GitHub issue events...")
val stream = new BufferedInputStream(Files.newInputStream(issueEventsPath))
try {
analyzeIssueEvents(Json.parse(stream).as[JsArray].value)
} finally {
logger.info(s"Finished analyzing GitHub issue events")
stream.close()
}
}
/**
* @param issueEvents list of issue events in reverse chronological order
*/
def analyzeIssueEvents(issueEvents: Seq[JsValue]): IssueEventProcessor = {
// Reverse the order of issue events
val events = issueEvents.map(new IssueEvent(_))
.filter(_.issue.isPullRequest == false)
.reverse
// To find if milestones are renamed, check the current milestone of each issue.
// Each issue event has 'milestone' property and it retains the *current* milestone
// the issue is in. If the milestone was renamed, the *current* one is also renamed too.
val issueNumberToCurrentMilestoneTitle = events
.flatMap(e => e.issue.milestone map (e.issue.number -> _.title))
.toMap
// Process the events. The result keeps the name of the *last* milestone each issue
// was put into. The name is the one when the issue was milestoned - it has nothing
// to do with the current name if the milestone was renamed.
logger.debug(s"Processing the issue events.")
val preProcessor = new IssueEventProcessor
events.foreach(preProcessor.process(_))
// If an issue is milestoned now and the current name of the milestone name is different
// from that of the milestone when the issue was milestoned, the milestone was renamed
// since the issue was milestoned
val oldToNewMilestoneTitles = (for (
key <- issueNumberToCurrentMilestoneTitle.keys ++ preProcessor.issueNumberToMilestoneTitle.keys;
currentName <- issueNumberToCurrentMilestoneTitle.get(key);
lastName <- preProcessor.issueNumberToMilestoneTitle.get(key);
if lastName != currentName
) yield (lastName, currentName)) toMap
// If no milestone was renamed, it's done. Otherwise, do processing again.
oldToNewMilestoneTitles match {
case map if map.isEmpty => preProcessor
case map =>
logger.debug(s"Milestones renamed: ${oldToNewMilestoneTitles}")
// Do process all events again. In this path, consider milestones renamed.
logger.debug(s"Processing the issue events again with renamed milestones considered.")
val processor = new IssueEventProcessor(oldToNewMilestoneTitles)
events.foreach(processor.process(_))
processor
}
}
/**
* Generate a burnup chart for each milestone
*/
def generateBurnupCharts(milestonesHistory: Map[String, MilestoneHistory], timeZone: TimeZone, outputDir: File) {
// add two days margin to x-axis
val marginDays = 2
milestonesHistory.map { case (milestoneTitle, milestoneHistory) =>
// if the milestone is open, set max of x-axis to cover today
val isOpen = milestoneHistory.milestone.map(_.closedAt.isEmpty).getOrElse(false)
val max = (milestoneHistory.eventTimestamps ++ (if (isOpen) Set(ZonedDateTime.now) else Nil)).lastOption.map(_.plusDays(marginDays))
val min = milestoneHistory.from.map(_.minusDays(marginDays))
// find all weekends in this milestone
val saturdays = (for (from <- min; to <- max)
yield Util.dayOfWeekBetween(
DayOfWeek.SATURDAY,
from.withZoneSameInstant(timeZone.toZoneId), to.withZoneSameInstant(timeZone.toZoneId)
)
).getOrElse(Nil)
def numberOfIssuesOverTime(issues: java.util.TreeMap[ZonedDateTime, Set[Long]]): Map[ZonedDateTime, Int] = {
val dataPoints = issues.map(d => (d._1 -> d._2.size)).toMap
val lastNumOfIssues = issues.lastOption.map(_._2.size).getOrElse(0)
// if the milestone is open, add today as as a new data point
dataPoints ++ (if (isOpen) Map(ZonedDateTime.now() -> lastNumOfIssues) else Nil)
}
val output = OutputTemplate.chart(
milestoneTitle,
numberOfIssuesOverTime(milestoneHistory.milestonedIssueNumbers),
numberOfIssuesOverTime(milestoneHistory.closedIssueNumbers),
milestoneHistory.milestone.flatMap(_.due),
saturdays,
min,
max,
timeZone
)
(milestoneTitle, output)
}.foreach { case (milestoneTitle, output) =>
val escapedFilename = Util.escape(milestoneTitle)
val path = Paths.get(s"$outputDir/$escapedFilename.html")
Files.write(path, output.getBytes)
logger.info(s"Generated ${path.toAbsolutePath}")
}
}
}
|
shuwada/github-burnup-chart
|
src/main/scala/Main.scala
|
Scala
|
apache-2.0
| 8,462 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.reactive.subjects
import monifu.reactive.Subscriber
/**
* A `PublishSubject` emits to a subscriber only those items that are
* emitted by the source subsequent to the time of the subscription
*
* If the source terminates with an error, the `PublishSubject` will not emit any
* items to subsequent subscribers, but will simply pass along the error
* notification from the source Observable.
*
* @see [[monifu.reactive.Subject]]
*/
final class PublishSubject[T] private () extends GenericSubject[T] {
protected type LiftedSubscriber = Subscriber[T]
protected def cacheOrIgnore(elem: T): Unit = ()
protected def liftSubscriber(ref: Subscriber[T]) = ref
protected def onSubscribeContinue(lifted: Subscriber[T], s: Subscriber[T]): Unit = ()
protected def onSubscribeCompleted(subscriber: Subscriber[T], errorThrown: Throwable): Unit = {
if (errorThrown == null)
subscriber.onComplete()
else
subscriber.onError(errorThrown)
}
}
object PublishSubject {
/** Builder for [[PublishSubject]] */
def apply[T](): PublishSubject[T] =
new PublishSubject[T]()
}
|
sergius/monifu
|
monifu/shared/src/main/scala/monifu/reactive/subjects/PublishSubject.scala
|
Scala
|
apache-2.0
| 1,801 |
package stormlantern.consul.client
import java.net.URL
import scala.concurrent.duration._
import scala.concurrent._
import akka.actor._
import akka.util.Timeout
import akka.pattern.ask
import stormlantern.consul.client.dao._
import stormlantern.consul.client.dao.akka.AkkaHttpConsulClient
import stormlantern.consul.client.discovery._
import stormlantern.consul.client.election.LeaderInfo
import stormlantern.consul.client.loadbalancers.LoadBalancerActor
import stormlantern.consul.client.util._
class ServiceBroker(serviceBrokerActor: ActorRef, consulClient: ConsulHttpClient)(implicit ec: ExecutionContext) extends RetryPolicy with Logging {
private[this] implicit val timeout = Timeout(10.seconds)
def withService[A, B](name: String)(f: A β Future[B]): Future[B] = {
logger.info(s"Trying to get connection for service $name")
serviceBrokerActor.ask(ServiceBrokerActor.GetServiceConnection(name)).mapTo[ConnectionHolder].flatMap { connectionHolder β
logger.info(s"Received connectionholder $connectionHolder")
try {
connectionHolder.connection.flatMap(c β f(c.asInstanceOf[A]))
} finally {
connectionHolder.loadBalancer ! LoadBalancerActor.ReturnConnection(connectionHolder)
}
}
}
def registerService(registration: ServiceRegistration): Future[Unit] = {
consulClient.putService(registration).map { serviceId β
// Add shutdown hook
val deregisterService = new Runnable {
override def run(): Unit = consulClient.deleteService(serviceId)
}
Runtime.getRuntime.addShutdownHook(new Thread(deregisterService))
}
}
def withLeader[A](key: String)(f: Option[LeaderInfo] β Future[A]): Future[A] = {
???
}
def joinElection(key: String): Future[Unit] = {
???
}
}
object ServiceBroker {
def apply(rootActor: ActorSystem, httpClient: ConsulHttpClient, services: Set[ConnectionStrategy]): ServiceBroker = {
implicit val ec = ExecutionContext.Implicits.global
val serviceAvailabilityActorFactory = (factory: ActorRefFactory, service: ServiceDefinition, listener: ActorRef) β
factory.actorOf(ServiceAvailabilityActor.props(httpClient, service, listener))
val actorRef = rootActor.actorOf(ServiceBrokerActor.props(services, serviceAvailabilityActorFactory), "ServiceBroker")
new ServiceBroker(actorRef, httpClient)
}
def apply(consulAddress: URL, services: Set[ConnectionStrategy]): ServiceBroker = {
implicit val rootActor = ActorSystem("reactive-consul")
val httpClient = new AkkaHttpConsulClient(consulAddress)
ServiceBroker(rootActor, httpClient, services)
}
}
case class ServiceUnavailableException(service: String) extends RuntimeException(s"$service service unavailable")
|
dlouwers/reactive-consul
|
client/src/main/scala/stormlantern/consul/client/ServiceBroker.scala
|
Scala
|
mit
| 2,749 |
package io.console.commands
import io.console.ConsoleState
object Blank extends Command {
override val stringRep: String = "blank"
override val execute = (state: ConsoleState) => state
override val description: String = ""
}
|
akwanashie/constraints-dsl
|
src/main/scala/io/console/commands/Blank.scala
|
Scala
|
mit
| 233 |
package scwebapp.servlet.bootstrap
import jakarta.servlet.*
import scutil.lang.*
import scwebapp.*
import scwebapp.servlet.extensions.*
/** make an object extending this and annotate it with javax.servlet.annotation.WebListener */
trait HttpHandlerServletContextListener extends BootstrapServletContextListener {
protected final def application(sc:ServletContext):IoResource[Unit] =
for {
handler <- httpHandler(sc.initParameters firstString _)
_ <- IoResource delay {
sc.mount(
name = "HttpHandlerServlet",
handler = handler,
mappings = Vector("/*"),
loadOnStartup = Some(100),
multipartConfig = None
)
}
}
yield ()
protected def httpHandler(props:String=>Option[String]):IoResource[HttpHandler]
}
|
ritschwumm/scwebapp
|
modules/servlet/src/main/scala/scwebapp/servlet/bootstrap/HttpHandlerServletContextListener.scala
|
Scala
|
bsd-2-clause
| 784 |
package im.actor.server.persist.push
import scala.concurrent.ExecutionContext
import slick.driver.PostgresDriver.api._
import im.actor.server.models
class ApplePushCredentialsTable(tag: Tag) extends Table[models.push.ApplePushCredentials](tag, "apple_push_credentials") {
def authId = column[Long]("auth_id", O.PrimaryKey)
def apnsKey = column[Int]("apns_key")
def token = column[Array[Byte]]("token")
def * = (authId, apnsKey, token) <> (models.push.ApplePushCredentials.tupled, models.push.ApplePushCredentials.unapply)
}
object ApplePushCredentials {
val creds = TableQuery[ApplePushCredentialsTable]
def createOrUpdate(authId: Long, apnsKey: Int, token: Array[Byte])(implicit ec: ExecutionContext) = {
for {
_ β creds.filterNot(_.authId === authId).filter(c β c.apnsKey === apnsKey && c.token === token).delete
r β creds.insertOrUpdate(models.push.ApplePushCredentials(authId, apnsKey, token))
} yield r
}
def createOrUpdate(c: models.push.ApplePushCredentials) =
creds.insertOrUpdate(c)
def find(authId: Long) =
creds.filter(_.authId === authId).result.headOption
def delete(authId: Long) =
creds.filter(_.authId === authId).delete
def deleteByToken(token: Array[Byte]) =
creds.filter(_.token === token).delete
}
|
boneyao/actor-platform
|
actor-server/actor-persist/src/main/scala/im/actor/server/persist/push/ApplePushCredentials.scala
|
Scala
|
mit
| 1,294 |
/*Β§
===========================================================================
KnapScal
===========================================================================
Copyright (C) 2015-2017 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.knapscal.app
import javafx.event.ActionEvent
import javafx.fxml.FXML
import javafx.scene.control.{TextArea, TextField}
import info.gianlucacosta.helios.apps.AppInfo
import info.gianlucacosta.helios.fx.about.AboutBox
import info.gianlucacosta.knapscal.app.branchbound.strategies.{DantzigStrategy, MartelloTothStrategy, OptimizedDantzigStrategy}
import info.gianlucacosta.knapscal.knapsack.dynamic.full.DynamicProgrammingSolver
import info.gianlucacosta.knapscal.knapsack.dynamic.optimized.OptimizedDynamicProgrammingSolver
import info.gianlucacosta.knapscal.knapsack.{ItemsFormatter, ItemsParser, Problem}
import scalafx.scene.control.Alert.AlertType
import scalafx.scene.control.{Alert, ChoiceDialog}
private class MainSceneController {
private var aboutBox: AboutBox = _
def setup(appInfo: AppInfo): Unit = {
aboutBox = new AboutBox(appInfo)
}
private val itemsParser = new ItemsParser
@FXML
private var capacityField: TextField = null
@FXML
private var itemsArea: TextArea = null
@FXML
private def runBranchBound(event: ActionEvent): Unit = {
val problem = prepareProblem()
if (problem.isEmpty) {
return
}
val choices = List(
new DantzigStrategy,
new OptimizedDantzigStrategy,
new MartelloTothStrategy
)
val choiceDialog = new ChoiceDialog(choices(0), choices) {
title = "Branch & Bound - Upper bound strategy"
headerText = "Choose an algorithm for the upper bound:"
}
val chosenStrategy = choiceDialog.showAndWait()
if (chosenStrategy.isEmpty) {
return
}
chosenStrategy.get.run(problem.get)
}
private def prepareProblem(): Option[Problem] = {
try {
val items = itemsParser.parse(itemsArea.getText)
val capacity = capacityField.getText.toInt
return Some(Problem(items, capacity))
} catch {
case e: IllegalArgumentException => {
val illegalInputAlert = new Alert(AlertType.Warning) {
headerText = "Invalid input"
contentText = e.getMessage
dialogPane().setPrefWidth(500)
}
illegalInputAlert.showAndWait()
return None
}
}
}
@FXML
private def runDynamicProgramming(event: ActionEvent): Unit = {
val problem = prepareProblem()
if (problem.isEmpty) {
return
}
val solver = new DynamicProgrammingSolver()
val solution = solver.solve(problem.get)
val solutionArea = new scalafx.scene.control.TextArea {
prefWidth = 800
prefHeight = 480
editable = false
text =
s"""Ordered problem items: ${ItemsFormatter.format(problem.get.items)}
|
|${solution.toString()}
""".stripMargin
}
val solutionAlert = new Alert(AlertType.Information) {
title = "Knapsack - Dynamic Programming"
headerText = "Solution"
contentText = solution.value.toString
dialogPane().setContent(solutionArea)
}
solutionAlert.showAndWait()
}
@FXML
private def runOptimizedDynamicProgramming(event: ActionEvent): Unit = {
val problem = prepareProblem()
if (problem.isEmpty) {
return
}
val solver = new OptimizedDynamicProgrammingSolver()
val solution = solver.solve(problem.get)
val alert = new Alert(AlertType.Information) {
title = "Knapsack Dynamic Programming - Optimized"
headerText = None
contentText = solution.toString()
}
alert.showAndWait()
}
@FXML
private def showAboutBox(event: ActionEvent): Unit = {
aboutBox.show()
}
}
|
giancosta86/KnapScal
|
src/main/scala/info/gianlucacosta/knapscal/app/MainSceneController.scala
|
Scala
|
apache-2.0
| 4,498 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5
import org.apache.toree.kernel.BuildInfo
object SparkKernelInfo {
/**
* Represents the protocol version (IPython) supported by this kernel.
*/
val protocolVersion = "5.0"
/**
* Represents what the kernel implements.
*/
val implementation = "spark"
/**
* Represents the kernel version.
*/
val implementationVersion = BuildInfo.version
/**
* Represents the language supported by the kernel.
*/
val language_info = Map("name" -> "scala", "version" -> BuildInfo.scalaVersion)
/**
* Represents the displayed name of the kernel.
*/
val banner = "IBM Spark Kernel"
/**
* Represents the name of the user who started the kernel process.
*/
val username = System.getProperty("user.name")
/**
* Represents the unique session id used by this instance of the kernel.
*/
val session = java.util.UUID.randomUUID.toString
}
|
poplav/incubator-toree
|
protocol/src/main/scala/org/apache/toree/kernel/protocol/v5/SparkKernelInfo.scala
|
Scala
|
apache-2.0
| 1,819 |
/*
* Copyright (c) 2016. <[email protected]>
*
* VariableField.scala is part of marc4scala.
*
* marc4scala is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* marc4scala is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with marc4scala; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package org.marc4scala
/**
* Created by jason on 2016-02-20.
*/
trait VariableField{
/**
* Sets the identifier.
* The purpose of this identifier is to provide an identifier
* for persistency.
*
* @param id
* the identifier
*/
def setId(id: Long)
/**
* Returns the identifier.
*
* @return Long - the identifier
*/
def getId: Long
/**
* Returns the tag name.
*
* @return String - the tag name
*/
def getTag: String
/**
* Sets the tag name.
*
* @param tag
* the tag name
*/
def setTag(tag: String)
/**
* Returns true if the given regular expression matches a subsequence of a
* data element within the variable field.
*
* @param pattern
* the regular expression
* @return true if the pattern matches, false othewise
*/
def find(pattern: String): Boolean
}
|
jasonzou/marc4scala
|
src/main/scala/org/marc4scala/VariableField.scala
|
Scala
|
gpl-3.0
| 1,798 |
import java.io.{File}
import scalaxb.compiler.{Config}
import scalaxb.compiler.ConfigEntry._
class NullNamespaceTest extends TestBase {
val inFile = new File("integration/src/test/resources/nullnamespace.xsd")
val config = Config.default.update(PackageNames(Map[Option[String], Option[String]]())).
update(Outdir(tmp))
lazy val generated = module.process(inFile, config)
"nullnamespace.scala file must compile so that Foo can be used" in {
(List("scalaxb.fromXML[Foo](<foo><bar>a</bar><baz>b</baz></foo>).toString"),
generated) must evaluateTo("Foo(a,b)", outdir = "./tmp")
}
}
|
eed3si9n/scalaxb
|
integration/src/test/scala/NullNamespaceTest.scala
|
Scala
|
mit
| 606 |
package io.buoyant.namerd.storage.kubernetes
import com.fasterxml.jackson.annotation.JsonProperty
import io.buoyant.k8s.{KubeList, ObjectMeta}
case class DtabList(
@JsonProperty("items") items: Seq[Dtab],
kind: Option[String] = None,
metadata: Option[ObjectMeta] = None,
apiVersion: Option[String] = None
) extends KubeList[Dtab]
|
denverwilliams/linkerd
|
namerd/storage/k8s/src/main/scala/io/buoyant/namerd/storage/kubernetes/DtabList.scala
|
Scala
|
apache-2.0
| 340 |
package com.outr.arango.api.model
import io.circe.Json
case class GeneralGraphEdgeCreateHttpExamplesRc400(error: Boolean,
code: Option[Int] = None,
errorMessage: Option[String] = None,
errorNum: Option[Int] = None)
|
outr/arangodb-scala
|
api/src/main/scala/com/outr/arango/api/model/GeneralGraphEdgeCreateHttpExamplesRc400.scala
|
Scala
|
mit
| 370 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.TestUtils
import org.apache.kafka.common.errors.UnsupportedVersionException
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.MetadataRequest
import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows, assertTrue}
import org.junit.jupiter.api.{BeforeEach, Test}
import scala.collection.Seq
import scala.jdk.CollectionConverters._
class MetadataRequestWithForwardingTest extends AbstractMetadataRequestTest {
@BeforeEach
override def setUp(): Unit = {
doSetup(createOffsetsTopic = false)
}
override def enableForwarding: Boolean = true
@Test
def testAutoTopicCreation(): Unit = {
val topic1 = "t1"
val topic2 = "t2"
val topic3 = "t3"
val topic4 = "t4"
val topic5 = "t5"
createTopic(topic1)
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build())
assertNull(response1.errors.get(topic1))
checkAutoCreatedTopic(topic2, response1)
// The default behavior in old versions of the metadata API is to allow topic creation, so
// protocol downgrades should happen gracefully when auto-creation is explicitly requested.
val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic3).asJava, true).build(1))
checkAutoCreatedTopic(topic3, response2)
// V3 doesn't support a configurable allowAutoTopicCreation, so disabling auto-creation is not supported
assertThrows(classOf[UnsupportedVersionException], () => sendMetadataRequest(new MetadataRequest(requestData(List(topic4), false), 3.toShort)))
// V4 and higher support a configurable allowAutoTopicCreation
val response3 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic4, topic5).asJava, false, 4.toShort).build)
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic4))
assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic5))
assertEquals(None, zkClient.getTopicPartitionCount(topic5))
}
@Test
def testAutoCreateTopicWithInvalidReplicationFactor(): Unit = {
// Shutdown all but one broker so that the number of brokers is less than the default replication factor
servers.tail.foreach(_.shutdown())
servers.tail.foreach(_.awaitShutdown())
val topic1 = "testAutoCreateTopic"
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1).asJava, true).build)
assertEquals(1, response1.topicMetadata.size)
val topicMetadata = response1.topicMetadata.asScala.head
assertEquals(Errors.INVALID_REPLICATION_FACTOR, topicMetadata.error)
assertEquals(topic1, topicMetadata.topic)
assertEquals(0, topicMetadata.partitionMetadata.size)
}
@Test
def testAutoCreateOfCollidingTopics(): Unit = {
val topic1 = "testAutoCreate.Topic"
val topic2 = "testAutoCreate_Topic"
val response1 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topic1, topic2).asJava, true).build)
assertEquals(2, response1.topicMetadata.size)
val responseMap = response1.topicMetadata.asScala.map(metadata => (metadata.topic(), metadata.error)).toMap
assertEquals(Set(topic1, topic2), responseMap.keySet)
// The topic creation will be delayed, and the name collision error will be swallowed.
assertEquals(Set(Errors.LEADER_NOT_AVAILABLE, Errors.INVALID_TOPIC_EXCEPTION), responseMap.values.toSet)
val topicCreated = responseMap.head._1
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topicCreated, 0)
TestUtils.waitForPartitionMetadata(servers, topicCreated, 0)
// retry the metadata for the first auto created topic
val response2 = sendMetadataRequest(new MetadataRequest.Builder(Seq(topicCreated).asJava, true).build)
val topicMetadata1 = response2.topicMetadata.asScala.head
assertEquals(Errors.NONE, topicMetadata1.error)
assertEquals(Seq(Errors.NONE), topicMetadata1.partitionMetadata.asScala.map(_.error))
assertEquals(1, topicMetadata1.partitionMetadata.size)
val partitionMetadata = topicMetadata1.partitionMetadata.asScala.head
assertEquals(0, partitionMetadata.partition)
assertEquals(2, partitionMetadata.replicaIds.size)
assertTrue(partitionMetadata.leaderId.isPresent)
assertTrue(partitionMetadata.leaderId.get >= 0)
}
}
|
guozhangwang/kafka
|
core/src/test/scala/unit/kafka/server/MetadataRequestWithForwardingTest.scala
|
Scala
|
apache-2.0
| 5,131 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.types.api
import com.intellij.openapi.project.Project
import com.intellij.psi.{JavaPsiFacade, PsiSubstitutor, _}
import com.intellij.psi.search.GlobalSearchScope
import org.argus.jawa.core.{JavaKnowledge, JawaType}
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
trait JawaTypePsiTypeBridge extends TypeSystemOwner {
def toJawaType(`type`: PsiType,
project: Project,
scope: GlobalSearchScope = null): JawaType = `type` match {
case arrayType: PsiArrayType => new JawaType(JavaKnowledge.JAVA_TOPLEVEL_OBJECT, arrayType.getArrayDimensions)
case PsiType.VOID => new JawaType("void")
case PsiType.BOOLEAN => new JawaType("boolean")
case PsiType.CHAR => new JawaType("char")
case PsiType.BYTE => new JawaType("byte")
case PsiType.SHORT => new JawaType("short")
case PsiType.INT => new JawaType("int")
case PsiType.LONG => new JawaType("long")
case PsiType.FLOAT => new JawaType("float")
case PsiType.DOUBLE => new JawaType("double")
// case PsiType.NULL => new JawaType("boolean")
// case null => Any
case diamondType: PsiDiamondType =>
val types = diamondType.resolveInferredTypes().getInferredTypes
if (types.isEmpty) {
JavaKnowledge.JAVA_TOPLEVEL_OBJECT_TYPE
} else {
toJawaType(types.get(0), project, scope)
}
case _ => throw new IllegalArgumentException(s"psi type ${`type`} should not be converted to ${typeSystem.name} type")
}
def toPsiType(`type`: JawaType,
project: Project,
scope: GlobalSearchScope): PsiType = {
def javaObject = createJavaObject(project, scope)
`type` match {
case t if t == JavaKnowledge.JAVA_TOPLEVEL_OBJECT_TYPE => javaObject
case t if t.jawaName == "void" => PsiType.VOID
case t if t.jawaName == "boolean" => PsiType.BOOLEAN
case t if t.jawaName == "char" => PsiType.CHAR
case t if t.jawaName == "byte" => PsiType.BYTE
case t if t.jawaName == "short" => PsiType.SHORT
case t if t.jawaName == "int" => PsiType.INT
case t if t.jawaName == "long" => PsiType.LONG
case t if t.jawaName == "float" => PsiType.FLOAT
case t if t.jawaName == "double" => PsiType.DOUBLE
// case Null => javaObject
// case Nothing => javaObject
case t if t.isArray => new PsiArrayType(toPsiType(new JawaType(t.baseType), project, scope))
case t =>
createTypeByFqn(project, scope, t.jawaName)
// javaObject
}
}
// def extractClass(`type`: JawaType,
// project: Project = null): Option[PsiClass] =
// extractClassType(`type`, project)
//
// def extractClassType(`type`: JawaType,
// project: Project = null): Option[PsiClass] =
// `type` match {
// case jawaType: JawaType =>
// jawaType.asClass(Option(project).getOrElse(DecompilerUtil.obtainProject))
// case _ => None
// }
protected def createType(psiClass: PsiClass,
project: Project,
substitutor: PsiSubstitutor = PsiSubstitutor.EMPTY,
raw: Boolean = false): PsiType = {
val psiType = factory(project).createType(psiClass, substitutor)
if (raw) psiType.rawType
else psiType
}
protected def createJavaObject(project: Project, scope: GlobalSearchScope) = {
createTypeByFqn(project, scope, "java.lang.Object")
}
private def createTypeByFqn(project: Project, scope: GlobalSearchScope, fqn: String): PsiType = {
factory(project).createTypeByFQClassName(fqn, scope)
}
private def factory(project: Project) =
JavaPsiFacade.getInstance(project).getElementFactory
}
|
arguslab/argus-cit-intellij
|
src/main/scala/org/argus/cit/intellij/jawa/lang/psi/types/api/JawaTypePsiTypeBridge.scala
|
Scala
|
epl-1.0
| 4,119 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.util.ListenerBus
/**
* A [[SparkListenerEvent]] bus that relays [[SparkListenerEvent]]s to its listeners
*/
private[spark] trait SparkListenerBus extends ListenerBus[SparkListener, SparkListenerEvent] {
override def onPostEvent(listener: SparkListener, event: SparkListenerEvent): Unit = {
event match {
case stageSubmitted: SparkListenerStageSubmitted =>
listener.onStageSubmitted(stageSubmitted)
case stageCompleted: SparkListenerStageCompleted =>
listener.onStageCompleted(stageCompleted)
case jobStart: SparkListenerJobStart =>
listener.onJobStart(jobStart)
case jobEnd: SparkListenerJobEnd =>
listener.onJobEnd(jobEnd)
case taskStart: SparkListenerTaskStart =>
listener.onTaskStart(taskStart)
case taskGettingResult: SparkListenerTaskGettingResult =>
listener.onTaskGettingResult(taskGettingResult)
case taskEnd: SparkListenerTaskEnd =>
listener.onTaskEnd(taskEnd)
case environmentUpdate: SparkListenerEnvironmentUpdate =>
listener.onEnvironmentUpdate(environmentUpdate)
case blockManagerAdded: SparkListenerBlockManagerAdded =>
listener.onBlockManagerAdded(blockManagerAdded)
case blockManagerRemoved: SparkListenerBlockManagerRemoved =>
listener.onBlockManagerRemoved(blockManagerRemoved)
case unpersistRDD: SparkListenerUnpersistRDD =>
listener.onUnpersistRDD(unpersistRDD)
case applicationStart: SparkListenerApplicationStart =>
listener.onApplicationStart(applicationStart)
case applicationEnd: SparkListenerApplicationEnd =>
listener.onApplicationEnd(applicationEnd)
case metricsUpdate: SparkListenerExecutorMetricsUpdate =>
listener.onExecutorMetricsUpdate(metricsUpdate)
case executorAdded: SparkListenerExecutorAdded =>
listener.onExecutorAdded(executorAdded)
case executorRemoved: SparkListenerExecutorRemoved =>
listener.onExecutorRemoved(executorRemoved)
case blockUpdated: SparkListenerBlockUpdated =>
listener.onBlockUpdated(blockUpdated)
case logStart: SparkListenerLogStart => // ignore event log metadata
}
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/main/scala/org/apache/spark/scheduler/SparkListenerBus.scala
|
Scala
|
apache-2.0
| 3,056 |
package kuaixue.scala.book.chapter2
object condition extends App{
val x = 1
def ifTest = if(x > 0) 1 else -1
val s = if(x > 0) 1 else -1
val s1 = if(x > 0) "positive" else -1
def equalsExpression(){
val s = if(x > 0) 1
val s1 = if(x > 0) 1 else ()
val s2 = if(x > 0) 1 else Unit
}
}
|
slieer/scala-tutorials
|
src/main/scala/kuaixue/scala/book/chapter2/Condition.scala
|
Scala
|
apache-2.0
| 337 |
/*
* Copyright 2012 Pellucid and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package datomisca
import org.scalatest.Suite
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Span, Millis, Seconds}
import scala.concurrent.ExecutionContext.Implicits.global
import java.util.UUID.randomUUID
trait SampleData {
val schema: Seq[TxData]
val txData: Seq[TxData]
}
trait DatomicFixture extends ScalaFutures
{ self: Suite =>
// globally set timeout to 10 seconds, with the future being checked every 100ms
implicit override val patienceConfig =
PatienceConfig(timeout = Span(10, Seconds), interval = Span(100, Millis))
def withDatomicDB(testCode: Connection => Any): Unit = {
val uri = s"datomic:mem://${randomUUID()}"
Datomic.createDatabase(uri)
try {
implicit val conn = Datomic.connect(uri)
testCode(conn)
()
} finally {
Datomic.deleteDatabase(uri)
()
}
}
def withSampleDatomicDB(sampleData: SampleData)(testCode: Connection => Any): Unit = {
val uri = s"datomic:mem://${randomUUID()}"
Datomic.createDatabase(uri)
try {
implicit val conn = Datomic.connect(uri)
whenReady(Datomic.transact(sampleData.schema)) { _ =>
whenReady(Datomic.transact(sampleData.txData)) { _ =>
testCode(conn)
}
}
()
} finally {
Datomic.deleteDatabase(uri)
()
}
}
}
|
Enalmada/datomisca
|
integration/src/it/scala/datomisca/DatomicFixture.scala
|
Scala
|
apache-2.0
| 1,943 |
package agilesites.setup
import java.io.File
import agilesites.{AgileSitesConstants, Utils}
import sbt.Keys._
import sbt._
trait TomcatSettings extends Utils {
this: AutoPlugin =>
lazy val serverStop = taskKey[Unit]("Start Local Sites")
lazy val serverStart = taskKey[Unit]("Stop Local Sites")
def tomcatOpts(cmd: String, base: File, home: File, port: Int, classpath: Seq[File], debug: Boolean) = {
val bin = base / "bin"
val homeBin = home / "bin"
val temp = base / "temp"
val cp = (Seq(bin, homeBin) ++ classpath).map(_.getAbsolutePath).mkString(File.pathSeparator)
val debugSeq = if (debug)
"-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8000" :: Nil
else Nil
val opts = "-cp" :: cp ::
"-Djava.net.preferIPv4Stack=true" ::
"-Djava.io.tmpdir=" + (temp.getAbsolutePath) ::
"-Dfile.encoding=UTF-8" :: "-Duser.timezone=UTC" ::
"-Dorg.owasp.esapi.resources=$BASE/bin" ::
"-Xms256m" :: "-Xmx1024m" :: "-XX:MaxPermSize=256m" ::
s"-Dorg.owasp.esapi.resources=${bin.getAbsolutePath}" ::
"-Dnet.sf.ehcache.enableShutdownHook=true" ::
debugSeq
val args = Seq("agilesites.SitesServer") ++ (cmd match {
case "start" => Seq(port.toString, base.getAbsolutePath)
case "stop" => Seq("stop", port.toString)
case "status" => Seq("status", port.toString)
})
val env = Map("CATALINA_HOME" -> base.getAbsolutePath);
(opts, args, env)
}
def tomcatEmbedded(cmd: String, base: File, home: File, port: Int, classpath: Seq[File], debug: Boolean) = {
val (opts, args, env) = tomcatOpts(cmd: String, base, home, port, classpath, debug)
//println (opts)
val forkOpt = ForkOptions(
runJVMOptions = opts,
envVars = env,
workingDirectory = Some(base))
Fork.java(forkOpt, args)
}
def tomcatScript(base: File, home: File, port: Int, classpath: Seq[File], debug: Boolean, log: Logger) = {
val (opts, args, env) = tomcatOpts("start", base, home, port, classpath, debug)
val (set, ext, prefix) = if (File.pathSeparatorChar == ':')
("export", "sh", "#!/bin/sh")
else ("set", "bat", "@echo off")
val vars = env.map(x => s"${set} ${x._1}=${x._2}").mkString("", "\n", "")
val java = new File(System.getProperty("java.home")) / "bin" / "java"
val script =
s"""|${prefix}
|cd ${base.getAbsolutePath}
|${vars}
|${java.getAbsolutePath} ${opts.mkString(" ")} ${args.mkString(" ")}
""".stripMargin
//println(script)
val scriptFile = base / ("server." + ext)
writeFile(scriptFile, script, log)
println(s"+++ created ${scriptFile.getAbsolutePath}")
}
import agilesites.config.AgileSitesConfigKeys._
import agilesites.setup.AgileSitesSetupKeys._
lazy val serverTask = server := {
val args: Seq[String] = Def.spaceDelimited("<arg>").parsed
val classpath = asTomcatClasspath.value
val port = sitesPort.value.toInt
val base = sitesDirectory.value
val home = file(sitesHome.value)
val url = sitesUrl.value
val log = streams.value.log
val cs = file("webapps") / "cs"
val cas = file("webapps") / "cas"
val debug = args.size == 2 && args(1) == "debug"
val usage = "usage: start [debug]|stop|status|script [debug]"
val ftcs = file(sitesWebapp.value) / "WEB-INF" / "futuretense_cs"
if (!ftcs.exists())
println(s"Sites not installed in ${sitesWebapp.value}")
else
args.headOption match {
case None => println(usage)
case Some("status") =>
tomcatEmbedded("status", base, home, port, classpath, debug)
case Some("stop") =>
tomcatEmbedded("stop", base, home, port, classpath, debug)
case Some("start") =>
val tomcat = new Thread() {
override def run() {
try {
println(s"*** Local Sites Server starting in port ${port} ***")
val tomcatProcess = tomcatEmbedded("start", base, home, port, classpath, debug)
} catch {
case e: Throwable =>
e.printStackTrace
println(s"!!! Cannot start (Sites Server already running?)\nError: ${e.getMessage()}")
}
}
}
tomcat.start
Thread.sleep(3000);
if (tomcat.isAlive()) {
println(" *** Waiting for Local Sites Server startup to complete ***")
println(httpCallRaw(url + "/HelloCS"))
}
case Some("script") =>
tomcatScript(base, home, port, classpath, debug, log)
case Some(thing) =>
println(usage)
}
}
val tomcatSettings = Seq(serverTask,
serverStop := {
server.toTask(" stop").value
},
serverStart := {
server.toTask(" start").value
},
ivyConfigurations += config("tomcat"),
libraryDependencies ++= AgileSitesConstants.tomcatDependencies map { _ % "tomcat"},
asTomcatClasspath <<= (update) map {
report => report.select(configurationFilter("tomcat"))
}
)
}
|
agilesites/agilesites2-build
|
src/main/scala/agilesites/setup/TomcatSettings.scala
|
Scala
|
mit
| 5,093 |
// Copyright 2018 Twitter. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.heron.streamlet.scala.converter
import java.util.stream.{Collectors, StreamSupport}
import java.util.{List => JavaList}
import scala.collection.mutable.ListBuffer
import org.junit.Assert.{assertEquals, assertTrue}
import com.twitter.heron.streamlet.{
Context,
SerializableBiFunction,
SerializableBinaryOperator,
SerializableConsumer,
SerializableFunction,
SerializablePredicate,
SerializableSupplier,
SerializableTransformer,
Sink => JavaSink
}
import com.twitter.heron.streamlet.scala.{Sink, Source}
import com.twitter.heron.streamlet.scala.common.{
BaseFunSuite,
TestIncrementSerializableTransformer
}
/**
* Tests for Streamlet APIs' Scala to Java Conversion functionality
*/
class ScalaToJavaConverterTest extends BaseFunSuite {
test("ScalaToJavaConverterTest should support SerializableSupplier") {
def testFunction() = ""
val serializableSupplier =
ScalaToJavaConverter.toSerializableSupplier[String](testFunction)
assertTrue(
serializableSupplier
.isInstanceOf[SerializableSupplier[String]])
}
test("ScalaToJavaConverterTest should support SerializableFunction") {
def stringToIntFunction(number: String) = number.toInt
val serializableFunction =
ScalaToJavaConverter.toSerializableFunction[String, Int](
stringToIntFunction)
assertTrue(
serializableFunction
.isInstanceOf[SerializableFunction[String, Int]])
}
test(
"ScalaToJavaConverterTest should support SerializableFunctionWithIterable") {
def stringToListOfIntFunction(number: String) = List(number.toInt)
val serializableFunction =
ScalaToJavaConverter.toSerializableFunctionWithIterable[String, Int](
stringToListOfIntFunction)
assertTrue(
serializableFunction
.isInstanceOf[SerializableFunction[String, _]])
val iterable = serializableFunction.apply("123")
val list = StreamSupport
.stream(iterable.spliterator(), false)
.collect(Collectors.toList())
assertEquals(1, list.size())
assertTrue(list.contains(123))
}
test("ScalaToJavaConverterTest should support SerializableBiFunction") {
def numbersToStringFunction(number1: Int, number2: Long): String =
(number1.toLong + number2).toString
val serializableBiFunction =
ScalaToJavaConverter.toSerializableBiFunction[Int, Long, String](
numbersToStringFunction)
assertTrue(
serializableBiFunction
.isInstanceOf[SerializableBiFunction[Int, Long, String]])
}
test("ScalaToJavaConverterTest should support Java Sink") {
val javaSink =
ScalaToJavaConverter.toJavaSink[Int](new TestSink[Int]())
assertTrue(
javaSink
.isInstanceOf[JavaSink[Int]])
}
test("ScalaToJavaConverterTest should support Java Source") {
val javaSource =
ScalaToJavaConverter.toJavaSource[Int](new TestSource())
assertTrue(
javaSource
.isInstanceOf[com.twitter.heron.streamlet.Source[Int]])
}
test("ScalaToJavaConverterTest should support SerializablePredicate") {
def intToBooleanFunction(number: Int) = number.<(5)
val serializablePredicate =
ScalaToJavaConverter.toSerializablePredicate[Int](intToBooleanFunction)
assertTrue(
serializablePredicate
.isInstanceOf[SerializablePredicate[Int]])
}
test("ScalaToJavaConverterTest should support SerializableConsumer") {
def consumerFunction(number: Int): Unit = number * 10
val serializableConsumer =
ScalaToJavaConverter.toSerializableConsumer[Int](consumerFunction)
assertTrue(
serializableConsumer
.isInstanceOf[SerializableConsumer[Int]])
}
test("ScalaToJavaConverterTest should support SerializableBinaryOperator") {
def addNumbersFunction(number1: Int, number2: Int): Int =
number1 + number2
val serializableBinaryOperator =
ScalaToJavaConverter.toSerializableBinaryOperator[Int](addNumbersFunction)
assertTrue(
serializableBinaryOperator
.isInstanceOf[SerializableBinaryOperator[Int]])
}
test("ScalaToJavaConverterTest should support SerializableBiFunctionWithSeq") {
def numbersToSeqOfIntFunction(number1: String, number2: Int): Seq[Int] =
Seq(number1.toInt + number2)
val serializableBiFunction =
ScalaToJavaConverter.toSerializableBiFunctionWithSeq[String](
numbersToSeqOfIntFunction)
assertTrue(serializableBiFunction
.isInstanceOf[SerializableBiFunction[String, Integer, JavaList[Integer]]])
val list = serializableBiFunction.apply("12", 3)
assertEquals(1, list.size())
assertTrue(list.contains(15))
}
test("ScalaToJavaConverterTest should support SerializableTransformer") {
val serializableTransformer =
new TestIncrementSerializableTransformer(factor = 100)
val javaSerializableTransformer =
ScalaToJavaConverter.toSerializableTransformer[Int, Int](
serializableTransformer)
assertTrue(
javaSerializableTransformer
.isInstanceOf[SerializableTransformer[Int, Int]])
}
private class TestSink[T] extends Sink[T] {
override def setup(context: Context): Unit = {}
override def put(tuple: T): Unit = {}
override def cleanup(): Unit = {}
}
private class TestSource() extends Source[Int] {
private val numbers = ListBuffer[Int]()
override def setup(context: Context): Unit = {
numbers += (1, 2, 3, 4, 5)
}
override def get(): Iterable[Int] = numbers
override def cleanup(): Unit = numbers.clear()
}
}
|
lucperkins/heron
|
heron/api/tests/scala/com/twitter/heron/streamlet/scala/converter/ScalaToJavaConverterTest.scala
|
Scala
|
apache-2.0
| 6,150 |
/*
* Copyright Β© 2013 by JΓΆrg D. Weisbarth
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License 3 as published by
* the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY.
*
* See the License.txt file for more details.
*/
package sim.app.antDefenseAIs.model
import java.lang.StrictMath.min
import sim.engine.SimState
class LN_WithFetching(
override val tribeID: Int,
override val world: World,
override val behaviourConf: LN_Normal_BehaviourConf)
extends LN_Normal(tribeID, world, behaviourConf) {
import behaviourConf._
override def adaptState() {
val alpha = min(1, countFriends() / maxAggressiveness)
val aggressivenessProb = alpha * maxAggressivenessProb + (1 - alpha) * minAggressivenessProb
emotion = if (world.random.nextDouble() <= aggressivenessProb)
Emotion.aggressive
else {
setWarPhero(1.0d)
Emotion.fleeing
}
nextEmotionChange = emotionalDwellTime
}
override def step(state: SimState) {
/*
* If the ant is deeply neutral (i.e. emotion == 0) it adapts its state when there are more than
* `threshold_strangers` ants of other colonies in the neighbourhood. This ensures that the ant does
* not change every simulation step its behaviour.
*/
val threshold_strangers = 1
if (emotion == Emotion.undecided && countStrangers() >= threshold_strangers)
adaptState()
emotion match {
case Emotion.aggressive => {
if (enemyClose())
attackNearEnemy()
else if (gradientOf(warPheroOf).isDefined) {
moveTo(gradientOf(warPheroOf).get)
adaptAllPheros()
}
else
actEconomically()
}
case Emotion.fleeing => followHomeWay()
case e if e == Emotion.normal || e == Emotion.undecided => actEconomically()
}
relax()
}
}
|
joergdw/antconflictbeh
|
src/sim/app/antDefenseAIs/model/LN_WithFetching.scala
|
Scala
|
lgpl-3.0
| 2,030 |
import sbt._
import sbt.Keys._
import bintray.Plugin.bintraySettings
import ls.Plugin.lsSettings
object Publishing {
def settings = Seq(
licenses in ThisBuild := Seq(("MIT", url(s"https://github.com/softprops/zoey/blob/${version.value}/LICENSE"))),
publishArtifact in Test := false,
homepage in ThisBuild := Some(url("https://github.com/softprops/zoey")),
pomExtra in ThisBuild := (
<scm>
<url>[email protected]:softprops/zoey.git</url>
<connection>scm:git:[email protected]:softprops/zoey.git</connection>
</scm>
<developers>
<developer>
<id>softprops</id>
<name>Doug Tangren</name>
<url>https://github.com/softprops</url>
</developer>
</developers>)
) ++ bintraySettings ++ lsSettings ++ Seq(
bintray.Keys.packageLabels in bintray.Keys.bintray := Seq("zookeeper", "distributed-systems"),
ls.Plugin.LsKeys.tags in ls.Plugin.LsKeys.lsync := (bintray.Keys.packageLabels in bintray.Keys.bintray).value,
externalResolvers in ls.Plugin.LsKeys.lsync := (resolvers in bintray.Keys.bintray).value
)
}
|
softprops/zoey
|
project/Publishing.scala
|
Scala
|
mit
| 1,102 |
package TAPLcomp2.fullref
import scala.util.parsing.combinator.{ImplicitConversions, PackratParsers}
import scala.util.parsing.combinator.syntactical.StandardTokenParsers
sealed trait Ty
case class TyVar(i: String) extends Ty
case class TyArr(t1: Ty, t2: Ty) extends Ty
case object TyUnit extends Ty
case class TyRecord(els: List[(String, Ty)]) extends Ty
case class TyVariant(els: List[(String, Ty)]) extends Ty
case object TyBool extends Ty
case object TyString extends Ty
case object TyFloat extends Ty
case object TyNat extends Ty
case class TyRef(ty: Ty) extends Ty
case object TyTop extends Ty
case object TyBot extends Ty
case class TySource(ty: Ty) extends Ty
case class TySink(ty: Ty) extends Ty
sealed trait Term
case object TmTrue extends Term
case object TmFalse extends Term
case class TmIf(cond: Term, t1: Term, t2: Term) extends Term
case class TmCase(sel: Term, branches: List[(String, String, Term)]) extends Term
case class TmTag(tag: String, t: Term, ty: Ty) extends Term
case class TmVar(i: String) extends Term
case class TmAbs(v: String, ty: Ty, t: Term) extends Term
case class TmApp(t1: Term, t2: Term) extends Term
case class TmLet(l: String, t1: Term, t2: Term) extends Term
case class TmFix(t: Term) extends Term
case class TmString(s: String) extends Term
case object TmUnit extends Term
case class TmAscribe(t: Term, ty: Ty) extends Term
case class TmRecord(fields: List[(String, Term)]) extends Term
case class TmProj(t: Term, proj: String) extends Term
case object TmZero extends Term
case class TmSucc(t: Term) extends Term
case class TmPred(t: Term) extends Term
case class TmIsZero(t: Term) extends Term
case class TmInert(ty: Ty) extends Term
case class TmLoc(i: Int) extends Term
case class TmRef(t: Term) extends Term
case class TmDeref(t: Term) extends Term
case class TmAssign(t1: Term, t2: Term) extends Term
object FullRefParsers extends StandardTokenParsers with PackratParsers with ImplicitConversions {
lexical.reserved += ("Bool", "true", "false", "if", "then", "else",
"Nat", "String", "Unit", "Float", "unit", "case", "let", "in", "succ", "pred",
"as", "of", "fix", "iszero", "Top", "Bot", "Ref", "Source", "Sink", "ref", "inert")
lexical.delimiters += ("\\", "(", ")", ";", "/", ".", ":", "->", "=", "<", ">", "{", "}", "=>", ",", "|",
"!", ":=", "[", "]")
// lower-case identifier
lazy val lcid: PackratParser[String] = ident ^? { case id if id.charAt(0).isLower => id }
// upper-case identifier
lazy val ucid: PackratParser[String] = ident ^? { case id if id.charAt(0).isUpper => id }
// TYPES
lazy val `type`: PackratParser[Ty] = arrowType |||
"Ref" ~> aType ^^ { ty => TyRef(ty) } |||
"Source" ~> aType ^^ { ty => TySource(ty) } |||
"Sink" ~> aType ^^ { ty => TySink(ty) }
lazy val aType: PackratParser[Ty] =
"(" ~> `type` <~ ")" |||
ucid ^^ { tn => TyVar(tn) } |||
"Bool" ^^ { _ => TyBool } |||
"Top" ^^ { _ => TyTop } |||
"Bot" ^^ { _ => TyBot } |||
"<" ~> fieldTypes <~ ">" ^^ { ft => TyVariant(ft) } |||
"String" ^^ { _ => TyString } |||
"Unit" ^^ { _ => TyUnit } |||
"{" ~> fieldTypes <~ "}" ^^ { ft => TyRecord(ft) } |||
"Nat" ^^ { _ => TyNat } |||
"Float" ^^ { _ => TyFloat }
lazy val fieldTypes: PackratParser[List[(String, Ty)]] =
repsep(fieldType, ",")
lazy val fieldType: PackratParser[(String, Ty)] =
lcid ~ (":" ~> `type`) ^^ { case id ~ ty => (id, ty) }
lazy val arrowType: PackratParser[Ty] =
(aType <~ "->") ~ arrowType ^^ { case t1 ~ t2 => TyArr(t1, t2) } |||
aType
// TERMS
lazy val term: PackratParser[Term] =
("if" ~> term) ~ ("then" ~> term) ~ ("else" ~> term) ^^ { case t1 ~ t2 ~ t3 => TmIf(t1, t2, t3) } |||
("case" ~> term) ~ ("of" ~> cases) ^^ { case t ~ cs => TmCase(t, cs) } |||
("\\" ~> lcid) ~ (":" ~> `type`) ~ ("." ~> term) ^^ { case v ~ ty ~ t => TmAbs(v, ty, t) } |||
("let" ~> lcid) ~ ("=" ~> term) ~ ("in" ~> term) ^^ { case id ~ t1 ~ t2 => TmLet(id, t1, t2) } |||
(appTerm <~ ":=") ~ appTerm ^^ { case t1 ~ t2 => TmAssign(t1, t2) } |||
appTerm
lazy val appTerm: PackratParser[Term] =
appTerm ~ pathTerm ^^ { case t1 ~ t2 => TmApp(t1, t2) } |||
"fix" ~> pathTerm ^^ { t => TmFix(t) } |||
"ref" ~> pathTerm ^^ { t => TmRef(t) } |||
"!" ~> pathTerm ^^ { t => TmDeref(t) } |||
"succ" ~> pathTerm ^^ { t => TmSucc(t) } |||
"pred" ~> pathTerm ^^ { t => TmPred(t) } |||
"iszero" ~> pathTerm ^^ { t => TmIsZero(t) } |||
pathTerm
lazy val ascribeTerm: PackratParser[Term] =
aTerm ~ ("as" ~> `type`) ^^ { case t ~ ty => TmAscribe(t, ty) } |||
aTerm
lazy val pathTerm: PackratParser[Term] =
pathTerm ~ ("." ~> lcid) ^^ { case t1 ~ l => TmProj(t1, l) } |||
pathTerm ~ ("." ~> numericLit) ^^ { case t1 ~ l => TmProj(t1, l) } |||
ascribeTerm
lazy val termSeq: PackratParser[Term] =
term ~ (";" ~> termSeq) ^^ { case t ~ ts => TmApp(TmAbs("_", TyUnit, ts), t) } |||
term
lazy val aTerm: PackratParser[Term] =
"(" ~> termSeq <~ ")" |||
("inert" ~ "[") ~> `type` <~ "]" ^^ { ty => TmInert(ty) } |||
"true" ^^ { _ => TmTrue } |||
"false" ^^ { _ => TmFalse } |||
("<" ~> lcid) ~ ("=" ~> term <~ ">") ~ ("as" ~> `type`) ^^ { case l ~ t ~ ty => TmTag(l, t, ty) } |||
lcid ^^ { i => TmVar(i) } |||
stringLit ^^ { l => TmString(l) } |||
"unit" ^^ { _ => TmUnit } |||
"{" ~> fields <~ "}" ^^ { fs => TmRecord(fs) } |||
numericLit ^^ { x => num(x.toInt) }
lazy val cases: PackratParser[List[(String, String, Term)]] =
rep1sep(`case`, "|")
lazy val `case`: PackratParser[(String, String, Term)] =
("<" ~> lcid <~ "=") ~ (lcid <~ ">") ~ ("=>" ~> term) ^^ { case l1 ~ l2 ~ t => (l1, l2, t) }
lazy val fields: PackratParser[List[(String, Term)]] =
repsep(field, ",")
lazy val field: PackratParser[(String, Term)] =
lcid ~ ("=" ~> term) ^^ { case id ~ t => (id, t) }
private def num(x: Int): Term = x match {
case 0 => TmZero
case _ => TmSucc(num(x - 1))
}
def input(s: String) = phrase(term)(new lexical.Scanner(s)) match {
case t if t.successful => t.get
case t => sys.error(t.toString)
}
}
|
hy-zhang/parser
|
Scala/Parser/src/TAPLcomp2/fullref/parser.scala
|
Scala
|
bsd-3-clause
| 6,262 |
package com.example.sample
import java.util.logging.Logger
import javax.ws.rs.core.MediaType
import javax.ws.rs.ext.{ContextResolver, Provider}
import javax.ws.rs._
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.glassfish.jersey.server.ResourceConfig
/**
*/
@ApplicationPath("/")
class ApplicationResource extends ResourceConfig {
packages(this.getClass.getPackage.getName, "com.fasterxml.jackson.jaxrs")
}
@Path("/do_work")
class Work {
val logger = Logger.getLogger(classOf[Work].getName)
@GET
def get() = "ok"
@POST
@Consumes(Array(MediaType.APPLICATION_JSON))
def post(workParam: TaskBody) = {
logger.info(workParam.toString)
}
}
@Provider
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MediaType.APPLICATION_JSON))
class JsonContextResolver extends ContextResolver[ObjectMapper] {
private val mapper = {
val _mapper = new ObjectMapper()
_mapper.registerModule(DefaultScalaModule)
_mapper
}
override def getContext(`type`: Class[_]): ObjectMapper = {
mapper
}
}
case class TaskBody(a: String, b: Int)
|
daneko/appengine-standard-java8-jax-rs-with-scala
|
gae-taskqueue-sample/worker/src/main/scala/com/example/sample/ApplicationResource.scala
|
Scala
|
mit
| 1,153 |
package org.sstudio.bulldozer.dsl
object Result {
private[this] var _code: Int = _
private[this] var _body: String = _
private[this] var _elapsedTime: Long = _
def code: Int = _code
def body: String = _body
def elapsedTime: Long = _elapsedTime
def setCode(code: Int): Unit = {
_code = code
}
def setBody(body: String): Unit = {
_body = body
}
def setElapsedTime(time: Long): Unit = {
_elapsedTime = time
}
}
|
avril23/bulldozer
|
bulldozer/src/main/scala/org/sstudio/bulldozer/dsl/Result.scala
|
Scala
|
bsd-3-clause
| 452 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views
import config.FrontendAppConfig
import org.jsoup.nodes.{Document, Element}
import org.jsoup.select.{Elements, NodeFilter}
import org.scalatestplus.play.PlaySpec
import org.scalatestplus.play.guice.GuiceOneAppPerSuite
import play.api.i18n.{I18nSupport, MessagesApi}
import play.api.mvc.{AnyContentAsEmpty, Call}
import play.api.test.FakeRequest
import uk.gov.hmrc.govukfrontend.views.html.components.FormWithCSRF
import views.html.components.{button, h1, p}
import views.html.layouts.layout
import scala.collection.JavaConverters._
class VatRegViewSpec extends PlaySpec with GuiceOneAppPerSuite with I18nSupport {
implicit val request: FakeRequest[AnyContentAsEmpty.type] = FakeRequest()
implicit val messagesApi: MessagesApi = app.injector.instanceOf[MessagesApi]
implicit val appConfig: FrontendAppConfig = app.injector.instanceOf[FrontendAppConfig]
val testCall: Call = Call("POST", "/test-url")
object Selectors extends BaseSelectors
val layout: layout = app.injector.instanceOf[layout]
val h1: h1 = app.injector.instanceOf[h1]
val p: p = app.injector.instanceOf[p]
val button: button = app.injector.instanceOf[button]
val formWithCSRF: FormWithCSRF = app.injector.instanceOf[FormWithCSRF]
class ViewSetup(implicit val doc: Document) {
case class Link(text: String, href: String)
case class Details(summary: String, body: String)
case class DateField(legend: String, hint: Option[String] = None)
implicit class ElementExtractor(elements: Elements) {
def toList: List[Element] = elements.iterator.asScala.toList
def headOption: Option[Element] = toList.headOption
}
implicit class SelectorDoc(doc: Document) extends BaseSelectors {
private def selectText(selector: String): List[String] =
doc.select(selector).asScala.toList.map(_.text())
def heading: Option[String] = doc.select(h1).headOption.map(_.text)
def headingLevel2(n: Int) = doc.select(h2(n)).headOption.map(_.text)
def hasBackLink: Boolean = doc.select(".govuk-back-link").headOption.isDefined
def errorSummary: Option[Element] = doc.select(".govuk-error-summary").headOption
def errorSummaryLinks: List[Link] =
doc.select(".govuk-error-summary__list a").toList
.map(l => Link(l.text, l.attr("href")))
def hasErrorSummary: Boolean = errorSummary.isDefined
def hintWithMultiple(n: Int): Option[String] = doc.select(multipleHints(n)).headOption.map(_.text)
def paras: List[String] = doc.select("main p").toList.map(_.text)
def para(n: Int): Option[String] = doc.select(p(n)).headOption.map(_.text)
def panelIndentHeading(n: Int): Option[String] = selectText(panelHeading).lift(n)
def panelIndent(n: Int): Option[String] = selectText("main .govuk-inset-text").headOption
def unorderedList(n: Int): List[String] = doc.select(s"main ul:nth-of-type($n)").first.children().eachText().asScala.toList
def link(n: Int): Option[Link] = doc.select(a).toList.map(l => Link(l.text, l.attr("href"))).lift(n - 1)
def submitButton: Option[String] = doc.select(button).headOption.map(_.text)
def hintText: Option[String] = doc.select(hint).headOption.map(_.text)
def details: Option[Details] = {
doc.select(detailsSummary).headOption map { summary =>
Details(summary.text, doc.select(detailsContent).first.text)
}
}
private def input(inputType: String, selector: String, selectorValue: String): Option[String] = {
doc.select(s"input[type=$inputType][$selector=$selectorValue]").headOption.map { elem =>
doc.select(s"label[for=${elem.id}]").first.text
}
}
def dateInput(n: Int): Option[DateField] =
doc.select(s"main .govuk-fieldset").asScala.toList.lift(n - 1).map { elem =>
DateField(
legend = elem.select(".govuk-fieldset__legend").text(),
hint = elem.select(".govuk-hint").asScala.toList.headOption.map(_.text)
)
}
def radio(value: String): Option[String] = input("radio", "value", value)
def checkbox(value: String): Option[String] = input("checkbox", "value", value)
def textBox(id: String): Option[String] = input("text", "id", id)
def textArea(id: String): Option[String] =
doc.select(s"textarea[id=$id]").headOption.map { elem =>
doc.select(s"label[for=${elem.id}]").first.text
}
def warningText(n: Int): Option[String] =
doc.select(s"$warning:nth-of-type($n)").headOption.map(_.text)
}
}
}
|
hmrc/vat-registration-frontend
|
test/views/VatRegViewSpec.scala
|
Scala
|
apache-2.0
| 5,188 |
package com.github.diegopacheco.scalaplayground.typeclasses
object NumbersMainApp extends App {
trait NumberLike[T] {
def plus(x: T, y: T): T
def divide(x: T, y: T): T
def minus(x: T, y: T): T
def multiply(x: T, y: T): T
}
object NumberLike {
implicit object NumberLikeDouble extends NumberLike[Double] {
def plus(x: Double, y: Double): Double = x + y
def divide(x: Double, y: Double): Double = x / y
def minus(x: Double, y: Double): Double = x - y
def multiply(x: Double, y: Double): Double = x * y
}
implicit object NumberLikeInt extends NumberLike[Int] {
def plus(x: Int, y: Int): Int = x + y
def divide(x: Int, y: Int): Int = x / y
def minus(x: Int, y: Int): Int = x - y
def multiply(x: Int, y: Int): Int = x * y
}
implicit def int2NumberLikeInt(x: Int) = NumberLikeInt
implicit def double2NumberLikeDouble(y: Double) = NumberLikeDouble
}
import NumberLike._
val x = 10
println(s"10 + 10 = ${x.plus(10, 10)}")
println(s"10 - 10 = ${x.minus(10, 10)}")
println(s"10 * 10 = ${x.multiply(10, 10)}")
println(s"10 / 10 = ${x.divide(10, 10)}")
val y:Double = 20.5
println(s"20.5 + 20.5 = ${y.plus(20.5, 20.5)}")
println(s"20.5 - 20.5 = ${y.minus(20.5, 20.5)}")
println(s"20.5 * 20.5 = ${y.multiply(20.5, 20.5)}")
println(s"20.5 / 20.5 = ${y.divide(20.5, 20.5)}")
}
|
diegopacheco/scala-playground
|
scala-2.12-typeclasses/src/main/scala/com/github/diegopacheco/scalaplayground/typeclasses/NumbersMainApp.scala
|
Scala
|
unlicense
| 1,402 |
package scala.swing
package uitest
/** Example application to verify that table row sorting
* and column reordering work correctly.
*/
object Issue47 extends SimpleSwingApplication {
lazy val top: Frame = {
val data0 = Array[Array[Any]](
Array("Schaeffer" , 1910, 1995),
Array("Sun Ra" , 1914, 1993),
Array("Oram" , 1925, 2003),
Array("Oliveros" , 1932, 2016)
)
val cn = Seq("Name", "Born", "Died")
val t = new Table(data0, cn)
val st = new ScrollPane(t)
t.autoCreateRowSorter = true
val ggAsc = new ToggleButton("Ascending")
ggAsc.selected = true
val ggSort = cn.zipWithIndex.map { case (n, ci) => Button(n)(t.sort(ci, ascending = ggAsc.selected)) }
val pSort = new FlowPanel(new Label("Sort by:") +: ggSort :+ ggAsc: _*)
val ggSelected = new TextArea(3, 40) {
lineWrap = true
editable = false
font = Font(Font.Monospaced, Font.Plain, 12)
}
val pSelected = new FlowPanel(new Label("Selection:"), ggSelected)
t.selection.elementMode = Table.ElementMode.Cell
t.listenTo(t.selection)
def captureSelection() = t.selection.cells.toList.sorted
def toModel(in: List[(Int, Int)]): List[(Int, Int)] = in.map { case (row, col) =>
t.viewToModelRow(row) -> t.viewToModelColumn(col)
}
var lastSel = List.empty[(Int, Int)]
t.reactions += {
case _: event.TableRowsSelected | _: event.TableColumnsSelected =>
val newSel = captureSelection()
if (lastSel != newSel) {
lastSel = newSel
val mSel = toModel(newSel)
val data = newSel.map { case (row, col) => t.apply(row = row, column = col) }
val viewS = newSel.mkString("View : ", " ; ", "\\n")
val modelS = mSel .mkString("Model: ", " ; ", "\\n")
val dataS = data.mkString("Data : ", " ; ", "")
ggSelected.text = s"$viewS$modelS$dataS"
}
}
new MainFrame {
contents = new BoxPanel(Orientation.Vertical) {
contents += st
contents += pSort
contents += pSelected
}
}
}
}
|
scala/scala-swing
|
uitest/src/main/scala/scala/swing/uitest/Issue47.scala
|
Scala
|
apache-2.0
| 2,109 |
package scalaz.stream
import java.nio.BufferOverflowException
import org.scalacheck._
import Prop._
import scalaz.concurrent.Task
import scalaz.stream.Process._
import scalaz.stream.text.{LengthExceeded, lines}
object LinesSpec extends Properties("text") {
val samples = 0 until 5 flatMap { i => List("\\r\\n", "\\n").map { s =>
"Hello&World.&Foo&Bar&".replace("&", s*i)
}
}
// behavior should be identical to that of scala.io.Source
def checkLine(s: String): Boolean = {
val source = scala.io.Source.fromString(s).getLines().toList
emitAll(s.toCharArray.map(_.toString)).pipe(lines()).toList == source &&
emit(s).pipe(lines()).toList == source
}
property("lines()") = secure {
samples.forall(checkLine)
}
property("lines(n) should fail for lines with length greater than n") = secure {
val error = classOf[LengthExceeded]
emit("foo\\nbar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
Process("foo\\n", "bar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
Process("foo", "\\nbar").pipe(lines(3)).toList == List("foo", "bar") && // OK input
throws(error){ emit("foo").pipe(lines(2)).run[Task].run } &&
throws(error){ emit("foo\\nbarr").pipe(lines(3)).run[Task].run } &&
throws(error){ emit("fooo\\nbar").pipe(lines(3)).run[Task].run }
}
property("lines(n) can recover from lines longer than n") = {
import Gen._
val stringWithNewlinesGen: Gen[String] =
listOf(frequency((5, alphaChar), (1, oneOf('\\n', '\\r')))).map(_.mkString)
def rmWhitespace(s: String): String = s.replaceAll("\\\\s", "")
forAll(listOf(stringWithNewlinesGen)) { xs: List[String] =>
val stripped = rmWhitespace(xs.mkString)
val maxLength = Gen.choose(1, stripped.length).sample.getOrElse(1)
val nonFailingLines = lines(maxLength).onFailure {
case LengthExceeded(_, s) => emitAll(s.grouped(maxLength).toList)
}.repeat
val allLines = emitAll(xs).pipe(nonFailingLines).toList
allLines.forall(_.length <= maxLength) &&
rmWhitespace(allLines.mkString) == stripped
}
}
}
|
rossabaker/scalaz-stream
|
src/test/scala/scalaz/stream/LinesSpec.scala
|
Scala
|
mit
| 2,133 |
package com.samstarling.prometheusfinagle.helper
import io.prometheus.client.Collector.MetricFamilySamples.{Sample => PrometheusSample}
import io.prometheus.client.{Collector, CollectorRegistry}
import scala.collection.JavaConverters._
case class CollectorRegistryHelper(registry: CollectorRegistry) {
// TODO: Messy
def samples: Map[String, List[Sample]] = {
def metricFamilies = registry.metricFamilySamples.asScala.toList
def allSamples: List[List[Sample]] =
metricFamilies.map(_.samples.asScala.toList.map(Sample(_)))
def flatSamples: List[Sample] = allSamples.flatten
flatSamples
.map({ s =>
s.name -> s
})
.groupBy(_._1)
.mapValues(_.map(_._2)).toMap
}
}
case class Metric(metric: Collector.MetricFamilySamples) {
def samples: Map[String, Sample] = {
metric.samples.asScala.toList
.map(sample => sample.name -> Sample(sample))
.toMap
}
}
case class Sample(sample: PrometheusSample) {
def name: String = sample.name
def value: Double = sample.value
def dimensions: Map[String, String] = {
sample.labelNames.asScala.zip(sample.labelValues.asScala).toMap
}
}
|
samstarling/finagle-prometheus
|
src/test/scala/com/samstarling/prometheusfinagle/helper/CollectorRegistryHelper.scala
|
Scala
|
mit
| 1,160 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import sbt._
import Keys._
import Process._
import scala.xml.{Node, Elem}
import scala.xml.transform.{RewriteRule, RuleTransformer}
object KafkaBuild extends Build {
val buildNumber = SettingKey[String]("build-number", "Build number defaults to $BUILD_NUMBER environment variable")
val releaseName = SettingKey[String]("release-name", "the full name of this release")
val commonSettings = Seq(
organization := "org.apache.kafka",
pomExtra :=
<parent>
<groupId>org.apache</groupId>
<artifactId>apache</artifactId>
<version>10</version>
</parent>
<licenses>
<license>
<name>Apache 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>,
scalacOptions ++= Seq("-deprecation", "-unchecked", "-g:none"),
crossScalaVersions := Seq("2.8.0","2.8.2", "2.9.1", "2.9.2", "2.10.1"),
excludeFilter in unmanagedSources <<= scalaVersion(v => if (v.startsWith("2.8")) "*_2.9+.scala" else "*_2.8.scala"),
scalaVersion := "2.8.0",
version := "0.8.1",
publishTo := Some("Apache Maven Repo" at "https://repository.apache.org/service/local/staging/deploy/maven2"),
credentials += Credentials(Path.userHome / ".m2" / ".credentials"),
buildNumber := System.getProperty("build.number", ""),
version <<= (buildNumber, version) { (build, version) => if (build == "") version else version + "+" + build},
releaseName <<= (name, version, scalaVersion) {(name, version, scalaVersion) => name + "_" + scalaVersion + "-" + version},
javacOptions in compile ++= Seq("-Xlint:unchecked", "-source", "1.5"),
javacOptions in doc ++= Seq("-source", "1.5"),
parallelExecution in Test := false, // Prevent tests from overrunning each other
publishArtifact in Test := true,
libraryDependencies ++= Seq(
"log4j" % "log4j" % "1.2.15" exclude("javax.jms", "jms"),
"net.sf.jopt-simple" % "jopt-simple" % "3.2",
"org.slf4j" % "slf4j-simple" % "1.6.4"
),
// The issue is going from log4j 1.2.14 to 1.2.15, the developers added some features which required
// some dependencies on various sun and javax packages.
ivyXML := <dependencies>
<exclude module="javax"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude module="mail"/>
<exclude module="jms"/>
<dependency org="org.apache.zookeeper" name="zookeeper" rev="3.3.4">
<exclude org="log4j" module="log4j"/>
<exclude org="jline" module="jline"/>
</dependency>
</dependencies>,
mappings in packageBin in Compile += file("LICENSE") -> "LICENSE",
mappings in packageBin in Compile += file("NOTICE") -> "NOTICE"
)
val hadoopSettings = Seq(
javacOptions in compile ++= Seq("-Xlint:deprecation"),
libraryDependencies ++= Seq(
"org.apache.avro" % "avro" % "1.4.0",
"org.apache.pig" % "pig" % "0.8.0",
"commons-logging" % "commons-logging" % "1.0.4",
"org.codehaus.jackson" % "jackson-core-asl" % "1.5.5",
"org.codehaus.jackson" % "jackson-mapper-asl" % "1.5.5",
"org.apache.hadoop" % "hadoop-core" % "0.20.2"
),
ivyXML :=
<dependencies>
<exclude module="netty"/>
<exclude module="javax"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude module="mail"/>
<exclude module="jms"/>
<dependency org="org.apache.hadoop" name="hadoop-core" rev="0.20.2">
<exclude org="junit" module="junit"/>
</dependency>
<dependency org="org.apache.pig" name="pig" rev="0.8.0">
<exclude org="junit" module="junit"/>
</dependency>
</dependencies>
)
val runRat = TaskKey[Unit]("run-rat-task", "Runs Apache rat on Kafka")
val runRatTask = runRat := {
"bin/run-rat.sh" !
}
val release = TaskKey[Unit]("release", "Creates a deployable release directory file with dependencies, config, and scripts.")
val releaseTask = release <<= ( packageBin in (core, Compile), dependencyClasspath in (core, Runtime), exportedProducts in Compile,
target, releaseName in core ) map { (packageBin, deps, products, target, releaseName) =>
val jarFiles = deps.files.filter(f => !products.files.contains(f) && f.getName.endsWith(".jar"))
val destination = target / "RELEASE" / releaseName
IO.copyFile(packageBin, destination / packageBin.getName)
IO.copyFile(file("LICENSE"), destination / "LICENSE")
IO.copyFile(file("NOTICE"), destination / "NOTICE")
IO.copy(jarFiles.map { f => (f, destination / "libs" / f.getName) })
IO.copyDirectory(file("config"), destination / "config")
IO.copyDirectory(file("bin"), destination / "bin")
for {file <- (destination / "bin").listFiles} { file.setExecutable(true, true) }
}
val releaseZip = TaskKey[Unit]("release-zip", "Creates a deployable zip file with dependencies, config, and scripts.")
val releaseZipTask = releaseZip <<= (release, target, releaseName in core) map { (release, target, releaseName) =>
val zipPath = target / "RELEASE" / "%s.zip".format(releaseName)
IO.delete(zipPath)
IO.zip((target/"RELEASE" ** releaseName ***) x relativeTo(target/"RELEASE"), zipPath)
}
val releaseTar = TaskKey[Unit]("release-tar", "Creates a deployable tar.gz file with dependencies, config, and scripts.")
val releaseTarTask = releaseTar <<= ( release, target, releaseName in core) map { (release, target, releaseName) =>
Process(Seq("tar", "czf", "%s.tar.gz".format(releaseName), releaseName), target / "RELEASE").! match {
case 0 => ()
case n => sys.error("Failed to run native tar application!")
}
}
lazy val kafka = Project(id = "Kafka", base = file(".")).aggregate(core, examples, contrib, perf).settings((commonSettings ++
runRatTask ++ releaseTask ++ releaseZipTask ++ releaseTarTask): _*)
lazy val core = Project(id = "core", base = file("core")).settings(commonSettings: _*)
lazy val examples = Project(id = "java-examples", base = file("examples")).settings(commonSettings :_*) dependsOn (core)
lazy val perf = Project(id = "perf", base = file("perf")).settings((Seq(name := "kafka-perf") ++ commonSettings):_*) dependsOn (core)
lazy val contrib = Project(id = "contrib", base = file("contrib")).aggregate(hadoopProducer, hadoopConsumer).settings(commonSettings :_*)
lazy val hadoopProducer = Project(id = "hadoop-producer", base = file("contrib/hadoop-producer")).settings(hadoopSettings ++ commonSettings: _*) dependsOn (core)
lazy val hadoopConsumer = Project(id = "hadoop-consumer", base = file("contrib/hadoop-consumer")).settings(hadoopSettings ++ commonSettings: _*) dependsOn (core)
}
|
fintler/kafka
|
project/Build.scala
|
Scala
|
apache-2.0
| 7,688 |
package util.db
import org.joda.time._
import org.joda.time.format._
import anorm._
/**
* Implicit conversions and extractors for Anorm types not supported by default
*/
object AnormExtension {
/*
* Allows JodaTime to be used with Anorm.
* Credit and reference: http://stackoverflow.com/questions/11388301/joda-datetime-field-on-play-framework-2-0s-anorm
*/
val dateFormatGeneration: DateTimeFormatter = DateTimeFormat.forPattern("yyyyMMddHHmmssSS");
implicit def rowToDateTime: Column[DateTime] = Column.nonNull1 { (value, meta) =>
val MetaDataItem(qualified, nullable, clazz) = meta
value match {
case ts: java.sql.Timestamp => Right(new DateTime(ts.getTime))
case d: java.sql.Date => Right(new DateTime(d.getTime))
case str: java.lang.String => Right(dateFormatGeneration.parseDateTime(str))
case _ => Left(TypeDoesNotMatch("Cannot convert " + value + ":" + value.asInstanceOf[AnyRef].getClass + " to DateTime for column " + qualified))
}
}
implicit val dateTimeToStatement = new ToStatement[DateTime] {
def set(s: java.sql.PreparedStatement, index: Int, aValue: DateTime): Unit = {
s.setTimestamp(index, new java.sql.Timestamp(aValue.withMillisOfSecond(0).getMillis()) )
}
}
/*
* Allows scala's BigDecimal to be used as Doubles with Anorm
*/
implicit def rowToDouble: Column[Double] = Column.nonNull1 { (value, meta) =>
val MetaDataItem(qualified, nullable, clazz) = meta
value match {
case bd: BigDecimal => Right(bd.toDouble)
case bd: java.math.BigDecimal => Right(BigDecimal(bd).toDouble)
case int: Int => Right(int.toDouble)
case long: Long => Right(long.toDouble)
case float: Float => Right(float.toDouble)
case double: Double => Right(double)
case _ => Left(TypeDoesNotMatch("Cannot convert " + value + ":" + value.asInstanceOf[AnyRef].getClass + " to Double for column " + qualified))
}
}
implicit val doubleToStatement = new ToStatement[Double] {
def set(s: java.sql.PreparedStatement, index: Int, aValue: Double): Unit = {
s.setDouble(index, aValue)
}
}
}
|
PriscH/Foosball
|
app/util/db/AnormExtension.scala
|
Scala
|
gpl-3.0
| 2,262 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.instruments.bonds
/**
* @author Paul Bernard
*/
class FixedRateBond {
}
|
pmularien/spectrum-old
|
financial/src/main/scala/org/quantintel/ql/instruments/bonds/FixedRateBond.scala
|
Scala
|
apache-2.0
| 788 |
package org.finra.datagenerator.scaffolding.context
import org.finra.datagenerator.scaffolding.utils.Logging
import scala.collection.JavaConverters._
/**
* Created by dkopel on 12/14/16.
*/
trait GlobalsProvider extends Logging {
private var globals: Map[String, Object] = Map.empty[String, Object]
def setGlobal(key: String, value: Object) = globals = globals + (key->value)
def setGlobals(values: Map[String, Object]) = globals ++= values
def getGlobals: java.util.Map[String, Object] = globals.asJava
def lookupGlobal(key: String): Object = globals(key)
}
|
FINRAOS/DataGenerator
|
rubber-scaffolding/rubber-commons/src/main/scala/org/finra/datagenerator/scaffolding/context/GlobalsProvider.scala
|
Scala
|
apache-2.0
| 590 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.models
import play.api.libs.json.Json
case class Section(
title: String,
fields: List[FieldValue]
)
object Section {
implicit val format = Json.format[Section]
}
case class SectionFormField(
title: String,
fields: List[(FormField, FieldValue)]
)
|
VlachJosef/bforms
|
app/uk/gov/hmrc/bforms/models/Section.scala
|
Scala
|
apache-2.0
| 892 |
package cpup.mc.oldenMagic.content.testing
import net.minecraft.command.{ICommandSender, CommandBase}
import cpup.mc.lib.ModLifecycleHandler
import cpup.mc.oldenMagic.OldenMagicMod
import cpup.mc.oldenMagic.api.oldenLanguage.{EntityMagicData, PassiveSpells}
import cpup.mc.oldenMagic.api.oldenLanguage.runeParsing.Spell
import cpup.mc.oldenMagic.content.runes.{ItRune, MeRune, ProtectRune, DamageRune}
import cpup.mc.oldenMagic.content.targets.{OPCaster, PlayerCaster}
import net.minecraft.util.ChatComponentTranslation
import net.minecraft.entity.Entity
import cpup.mc.lib.util.EntityUtil
object ChangeMagicDataCommand extends CommandBase with ModLifecycleHandler {
def mod = OldenMagicMod
def getCommandName = "change-magic"
def internalName = "change-magic"
def getCommandUsage(sender: ICommandSender) = s"commands.${mod.ref.modID}:$internalName.usage"
def processCommand(sender: ICommandSender, args: Array[String]) {
sender match {
case ent: Entity =>
EntityMagicData.get(ent) match {
case Some(data) =>
if(args.length < 2) {
sender.addChatMessage(new ChatComponentTranslation(getCommandUsage(sender)))
} else {
try {
val name = args(0)
val amt = CommandBase.parseInt(sender, args(1))
data.setData(name, data.datas(name) + amt)
sender.addChatMessage(new ChatComponentTranslation(
s"commands.${mod.ref.modID}:$internalName.success",
sender.getCommandSenderName,
name,
amt: Integer,
data.datas(name): Integer
))
} catch {
case e: Exception =>
sender.addChatMessage(new ChatComponentTranslation(
s"commands.${mod.ref.modID}:$internalName.failure",
e.getMessage
))
}
}
case None =>
sender.addChatMessage(new ChatComponentTranslation(
s"commands.${mod.ref.modID}:$internalName.failure",
"cannot get or create data"
))
}
case _ =>
sender.addChatMessage(new ChatComponentTranslation(
s"commands.${mod.ref.modID}:$internalName.failure",
"not an Entity"
))
}
}
}
|
CoderPuppy/oldenmagic-mc
|
src/main/scala/cpup/mc/oldenMagic/content/testing/ChangeMagicDataCommand.scala
|
Scala
|
mit
| 2,118 |
package com.twitter.finagle.loadbalancer
import com.twitter.app.App
import com.twitter.finagle.client.StringClient
import com.twitter.finagle.{NoBrokersAvailableException, param}
import com.twitter.finagle.stats.{InMemoryStatsReceiver, LoadedStatsReceiver, NullStatsReceiver}
import com.twitter.util.Await
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.concurrent.Eventually.eventually
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class LoadBalancerFactoryTest extends FunSuite with StringClient {
trait Ctx {
val sr = new InMemoryStatsReceiver
val client = stringClient
.configured(param.Stats(sr))
}
trait PerHostFlagCtx extends Ctx with App {
val label = "myclient"
val port = "localhost:8080"
val perHostStatKey = Seq(label, port, "available")
def enablePerHostStats() =
flag.parse(Array("-com.twitter.finagle.loadbalancer.perHostStats=true"))
def disablePerHostStats() =
flag.parse(Array("-com.twitter.finagle.loadbalancer.perHostStats=false"))
//ensure the per-host stats are disabled if previous test didn't call disablePerHostStats()
disablePerHostStats()
}
test("per-host stats flag not set, no configured per-host stats.\\n" +
"No per-host stats should be reported") (new PerHostFlagCtx {
val loadedStatsReceiver = new InMemoryStatsReceiver
LoadedStatsReceiver.self = loadedStatsReceiver
client.configured(param.Label(label))
.newService(port)
assert(loadedStatsReceiver.gauges.contains(perHostStatKey) === false)
disablePerHostStats()
})
test("per-host stats flag not set, configured per-host stats.\\n" +
"Per-host stats should be reported to configured stats receiver") (new PerHostFlagCtx {
val hostStatsReceiver = new InMemoryStatsReceiver
client.configured(param.Label(label))
.configured(LoadBalancerFactory.HostStats(hostStatsReceiver))
.newService(port)
eventually {
assert(hostStatsReceiver.gauges(perHostStatKey).apply === 1.0)
}
disablePerHostStats()
})
test("per-host stats flag set, no configured per-host stats.\\n" +
"Per-host stats should be reported to loadedStatsReceiver") (new PerHostFlagCtx {
enablePerHostStats()
val hostStatsReceiver = new InMemoryStatsReceiver
LoadedStatsReceiver.self = hostStatsReceiver
client.configured(param.Label(label))
.newService(port)
eventually {
assert(hostStatsReceiver.gauges(perHostStatKey).apply === 1.0)
}
disablePerHostStats()
})
test("per-host stats flag set, configured per-host stats.\\n" +
"Per-host stats should be reported to configured stats receiver") (new PerHostFlagCtx {
enablePerHostStats()
val hostStatsReceiver = new InMemoryStatsReceiver
client.configured(param.Label(label))
.configured(LoadBalancerFactory.HostStats(hostStatsReceiver))
.newService(port)
eventually {
assert(hostStatsReceiver.gauges(perHostStatKey).apply === 1.0)
}
disablePerHostStats()
})
test("per-host stats flag set, configured per-host stats is NullStatsReceiver.\\n" +
"Per-host stats should not be reported") (new PerHostFlagCtx {
enablePerHostStats()
val loadedStatsReceiver = new InMemoryStatsReceiver
LoadedStatsReceiver.self = loadedStatsReceiver
client.configured(param.Label(label))
.configured(LoadBalancerFactory.HostStats(NullStatsReceiver))
.newService(port)
assert(loadedStatsReceiver.gauges.contains(perHostStatKey) === false)
disablePerHostStats()
})
test("destination name is passed to NoBrokersAvailableException") {
val name = "nil!"
val exc = intercept[NoBrokersAvailableException] {
Await.result(stringClient.newClient(name)())
}
assert(exc.name === name)
}
}
|
yancl/finagle-6.22.0
|
finagle-core/src/test/scala/com/twitter/finagle/loadbalancer/LoadBalancerFactoryTest.scala
|
Scala
|
apache-2.0
| 3,821 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.api
import java.util.concurrent.atomic.AtomicLong
import scala.util.{ Try, Failure, Success }
import scala.util.control.{ NonFatal, NoStackTrace }
import scala.concurrent.{ Await, ExecutionContext, Future, Promise }
import scala.concurrent.duration.{ Duration, FiniteDuration, SECONDS }
import com.typesafe.config.Config
import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Terminated }
import akka.pattern._
import akka.util.Timeout
import reactivemongo.core.actors._
import reactivemongo.core.errors.ConnectionException
import reactivemongo.core.nodeset.{ Authenticate, ProtocolMetadata }
import reactivemongo.core.protocol.{
CheckedWriteRequest,
MongoWireVersion,
RequestMaker,
Response
}
import reactivemongo.core.commands.SuccessfulAuthentication
import reactivemongo.api.commands.WriteConcern
import reactivemongo.util.LazyLogger
/**
* A helper that sends the given message to the given actor,
* following a failover strategy.
* This helper holds a future reference that is completed with a response,
* after 1 or more attempts (specified in the given strategy).
* If the all the tryouts configured by the given strategy were unsuccessful,
* the future reference is completed with a Throwable.
*
* Should not be used directly for most use cases.
*
* @tparam T Type of the message to send.
* @param message The message to send to the given actor. This message will be wrapped into an ExpectingResponse message by the `expectingResponseMaker` function.
* @param connection The reference to the MongoConnection the given message will be sent to.
* @param strategy The Failover strategy.
* @param expectingResponseMaker A function that takes a message of type `T` and wraps it into an ExpectingResponse message.
*/
class Failover[T](message: T, connection: MongoConnection, strategy: FailoverStrategy)(expectingResponseMaker: T => ExpectingResponse)(implicit ec: ExecutionContext) {
import Failover2.logger
import reactivemongo.core.errors._
import reactivemongo.core.actors.Exceptions._
private val promise = Promise[Response]()
/** A future that is completed with a response, after 1 or more attempts (specified in the given strategy). */
val future: Future[Response] = promise.future
private def send(n: Int) {
val expectingResponse = expectingResponseMaker(message)
connection.mongosystem ! expectingResponse
expectingResponse.future.onComplete {
case Failure(e) if isRetryable(e) =>
if (n < strategy.retries) {
val `try` = n + 1
val delayFactor = strategy.delayFactor(`try`)
val delay = Duration.unapply(strategy.initialDelay * delayFactor).map(t => FiniteDuration(t._1, t._2)).getOrElse(strategy.initialDelay)
logger.debug(s"Got an error, retrying... (try #${`try`} is scheduled in ${delay.toMillis} ms)", e)
connection.actorSystem.scheduler.scheduleOnce(delay)(send(`try`))
} else {
// generally that means that the primary is not available or the nodeset is unreachable
logger.error("Got an error, no more attempts to do. Completing with a failure...", e)
promise.failure(e)
}
case Failure(e) =>
logger.trace("Got an non retryable error, completing with a failure...", e)
promise.failure(e)
case Success(response) =>
logger.trace("Got a successful result, completing...")
promise.success(response)
}
}
private def isRetryable(throwable: Throwable) = throwable match {
case PrimaryUnavailableException | NodeSetNotReachable => true
case e: DatabaseException if e.isNotAPrimaryError || e.isUnauthorized => true
case _: ConnectionException => true
case _: ConnectionNotInitialized => true
case _ => false
}
send(0)
}
class Failover2[A](producer: () => Future[A], connection: MongoConnection, strategy: FailoverStrategy)(implicit ec: ExecutionContext) {
import Failover2.logger
import reactivemongo.core.errors._
import reactivemongo.core.actors.Exceptions._
private val promise = Promise[A]()
/**
* A future that is completed with a response,
* after 1 or more attempts (specified in the given strategy).
*/
val future: Future[A] = promise.future
private def send(n: Int): Unit =
Future(producer()).flatMap(identity).onComplete {
case Failure(e) if isRetryable(e) => {
if (n < strategy.retries) {
val `try` = n + 1
val delayFactor = strategy.delayFactor(`try`)
val delay = Duration.unapply(strategy.initialDelay * delayFactor).
fold(strategy.initialDelay)(t => FiniteDuration(t._1, t._2))
logger.debug(s"Got an error, retrying... (try #${`try`} is scheduled in ${delay.toMillis} ms)", e)
connection.actorSystem.scheduler.scheduleOnce(delay)(send(`try`))
} else {
// generally that means that the primary is not available
// or the nodeset is unreachable
logger.error("Got an error, no more attempts to do. Completing with a failure...", e)
promise.failure(e)
}
}
case Failure(e) => {
logger.trace(
"Got an non retryable error, completing with a failure...", e)
promise.failure(e)
}
case Success(response) => {
logger.trace("Got a successful result, completing...")
promise.success(response)
}
}
private def isRetryable(throwable: Throwable) = throwable match {
case PrimaryUnavailableException | NodeSetNotReachable => true
case e: DatabaseException if e.isNotAPrimaryError || e.isUnauthorized => true
case _: ConnectionException => true
case _: ConnectionNotInitialized => true
case _ => false
}
send(0)
}
object Failover2 {
private[api] val logger = LazyLogger("reactivemongo.api.Failover2")
def apply[A](connection: MongoConnection, strategy: FailoverStrategy)(producer: () => Future[A])(implicit ec: ExecutionContext): Failover2[A] =
new Failover2(producer, connection, strategy)
}
@deprecated(message = "Unused", since = "0.11.10")
object Failover {
/**
* Produces a [[reactivemongo.api.Failover]] holding a future reference that is completed with a result, after 1 or more attempts (specified in the given strategy).
*
* @param checkedWriteRequest The checkedWriteRequest to send to the given actor.
* @param connection The reference to the MongoConnection the given message will be sent to.
* @param strategy The Failover strategy.
*/
@deprecated(message = "Unused", since = "0.11.10")
def apply(checkedWriteRequest: CheckedWriteRequest, connection: MongoConnection, strategy: FailoverStrategy)(implicit ec: ExecutionContext): Failover[CheckedWriteRequest] =
new Failover(checkedWriteRequest, connection, strategy)(CheckedWriteRequestExpectingResponse.apply)
/**
* Produces a [[reactivemongo.api.Failover]] holding a future reference that is completed with a result, after 1 or more attempts (specified in the given strategy).
*
* @param requestMaker The requestMaker to send to the given actor.
* @param connection The reference to the MongoConnection actor the given message will be sent to.
* @param strategy The Failover strategy.
*/
@deprecated(message = "Unused", since = "0.11.10")
def apply(requestMaker: RequestMaker, connection: MongoConnection, strategy: FailoverStrategy)(implicit ec: ExecutionContext): Failover[RequestMaker] =
new Failover(requestMaker, connection, strategy)(RequestMakerExpectingResponse(_, false))
}
/**
* A failover strategy for sending requests.
*
* @param initialDelay the initial delay between the first failed attempt and the next one.
* @param retries the number of retries to do before giving up.
* @param delayFactor a function that takes the current iteration and returns a factor to be applied to the initialDelay.
*/
case class FailoverStrategy(
initialDelay: FiniteDuration = FiniteDuration(500, "ms"),
retries: Int = 5,
delayFactor: Int => Double = _ => 1) {
/** The maximum timeout, including all the retried */
lazy val maxTimeout: FiniteDuration =
(1 to retries).foldLeft(initialDelay) { (d, i) =>
d + (initialDelay * delayFactor(i).toLong)
}
}
/**
* A pool of MongoDB connections.
*
* Connection here does not mean that there is one open channel to the server:
* behind the scene, many connections (channels) are open on all the available servers in the replica set.
*
* Example:
* {{{
* import reactivemongo.api._
*
* val connection = MongoConnection(List("localhost"))
* val db = connection.database("plugin")
* val collection = db.map(_.("acoll"))
* }}}
*
* @param mongosystem the reference to the internal [[reactivemongo.core.actors.MongoDBSystem]] Actor.
*/
class MongoConnection(
val actorSystem: ActorSystem,
val mongosystem: ActorRef,
val options: MongoConnectionOptions) {
import akka.pattern.ask
import akka.util.Timeout
/**
* Returns a DefaultDB reference using this connection.
*
* @param name the database name
* @param failoverStrategy the failover strategy for sending requests.
*/
def apply(name: String, failoverStrategy: FailoverStrategy = FailoverStrategy())(implicit context: ExecutionContext): DefaultDB = {
metadata.foreach {
case ProtocolMetadata(_, MongoWireVersion.V24AndBefore, _, _, _) =>
throw ConnectionException("unsupported MongoDB version < 2.6")
case meta => ()
}
DefaultDB(name, this, failoverStrategy)
}
/**
* Returns a DefaultDB reference using this connection
* (alias for the `apply` method).
*
* @param name the database name
* @param failoverStrategy the failover strategy for sending requests.
*/
@deprecated(message = "Must use [[apply]]", since = "0.11.8")
def db(name: String, failoverStrategy: FailoverStrategy = FailoverStrategy())(implicit context: ExecutionContext): DefaultDB = apply(name, failoverStrategy)
/**
* Returns a DefaultDB reference using this connection.
* The failover strategy is also used to wait for the node set to be ready,
* before returning an available DB.
*
* @param name the database name
* @param failoverStrategy the failover strategy for sending requests.
*/
def database(name: String, failoverStrategy: FailoverStrategy = FailoverStrategy())(implicit context: ExecutionContext): Future[DefaultDB] =
waitIsAvailable(failoverStrategy).map(_ => apply(name, failoverStrategy))
/** Returns a future that will be successful when node set is available. */
private[api] def waitIsAvailable(failoverStrategy: FailoverStrategy)(implicit ec: ExecutionContext): Future[Unit] = {
@inline def nextTimeout(i: Int): Duration = {
val delayFactor = failoverStrategy.delayFactor(i)
failoverStrategy.initialDelay * delayFactor
}
def wait(iteration: Int, attempt: Int, timeout: Duration): Future[Unit] = {
if (attempt == 0) Future.failed(Exceptions.NodeSetNotReachable)
else {
Future {
val ms = timeout.toMillis
try {
val before = System.currentTimeMillis
val result = Await.result(isAvailable, timeout)
val duration = System.currentTimeMillis - before
if (result) true
else {
Thread.sleep(ms - duration)
false
}
} catch {
case e: Throwable =>
Thread.sleep(ms)
throw e
}
}.flatMap {
case false if (attempt > 0) => Future.failed[Unit](
new scala.RuntimeException("Got an error, no more attempt to do."))
case _ => Future.successful({})
}.recoverWith {
case error =>
val nextIt = iteration + 1
wait(nextIt, attempt - 1, nextTimeout(nextIt))
}
}
}
wait(0, 1 + failoverStrategy.retries, failoverStrategy.initialDelay).
flatMap { _ =>
metadata match {
case Some(ProtocolMetadata(
_, MongoWireVersion.V24AndBefore, _, _, _)) =>
Future.failed[Unit](ConnectionException(
"unsupported MongoDB version < 2.6"))
case Some(_) => Future successful {}
case _ => Future.failed[Unit](ConnectionException(
"protocol metadata not available"))
}
}
}
/**
* Writes a request and drop the response if any.
*
* @param message The request maker.
*/
private[api] def send(message: RequestMaker): Unit = mongosystem ! message
private[api] def sendExpectingResponse(checkedWriteRequest: CheckedWriteRequest)(implicit ec: ExecutionContext): Future[Response] = {
val expectingResponse =
CheckedWriteRequestExpectingResponse(checkedWriteRequest)
mongosystem ! expectingResponse
expectingResponse.future
}
private[api] def sendExpectingResponse(requestMaker: RequestMaker, isMongo26WriteOp: Boolean)(implicit ec: ExecutionContext): Future[Response] = {
val expectingResponse =
RequestMakerExpectingResponse(requestMaker, isMongo26WriteOp)
mongosystem ! expectingResponse
expectingResponse.future
}
/** Authenticates the connection on the given database. */
def authenticate(db: String, user: String, password: String): Future[SuccessfulAuthentication] = {
val req = AuthRequest(Authenticate(db, user, password))
mongosystem ! req
req.future
}
/**
* Closes this MongoConnection (closes all the channels and ends the actors).
*/
def askClose()(implicit timeout: FiniteDuration): Future[_] =
ask(monitor, Close)(Timeout(timeout))
/**
* Closes this MongoConnection
* (closes all the channels and ends the actors)
*/
def close(): Unit = monitor ! Close
private case class IsKilled(result: Promise[Boolean])
private[api] def killed: Future[Boolean] = {
val p = Promise[Boolean]()
monitor ! IsKilled(p)
p.future
}
private case class IsAvailable(result: Promise[Boolean]) {
override val toString = "IsAvailable?"
}
private case class IsPrimaryAvailable(result: Promise[Boolean]) {
override val toString = "IsPrimaryAvailable?"
}
private def isAvailable: Future[Boolean] = {
val p = Promise[Boolean]()
val check = {
if (options.readPreference.slaveOk) IsAvailable(p)
else IsPrimaryAvailable(p)
}
monitor ! check
p.future
}
private[api] val monitor = actorSystem.actorOf(
Props(new MonitorActor), "Monitor-" + MongoDriver.nextCounter)
@volatile private[api] var metadata: Option[ProtocolMetadata] = None
private class MonitorActor extends Actor {
import MonitorActor._
import scala.collection.mutable.Queue
mongosystem ! RegisterMonitor
private val waitingForPrimary = Queue[ActorRef]()
private var primaryAvailable = false
private val waitingForClose = Queue[ActorRef]()
private var killed = false
private var setAvailable = false
override val receive: Receive = {
case pa @ PrimaryAvailable(metadata) => {
logger.debug("set: a primary is available")
primaryAvailable = true
waitingForPrimary.dequeueAll(_ => true).foreach(_ ! pa)
}
case PrimaryUnavailable =>
logger.debug("set: no primary available")
primaryAvailable = false
case SetAvailable(meta) => {
logger.debug(s"set: a node is available: $meta")
setAvailable = true
metadata = Some(meta)
}
case SetUnavailable =>
setAvailable = false
logger.debug("set: no node seems to be available")
/* TODO: Remove
case WaitForPrimary => {
if (killed) {
sender ! Failure(ConnectionException(
"MongoDBSystem actor shutting down or no longer active"))
} else if (primaryAvailable && metadata.isDefined) {
logger.debug(s"$sender is waiting for a primary... available right now, go!")
sender ! PrimaryAvailable(metadata.get)
} else {
logger.debug(s"$sender is waiting for a primary... not available, warning as soon a primary is available.")
waitingForPrimary += sender
}
}
*/
case Close => {
logger.debug("Monitor received Close")
killed = true
mongosystem ! Close
waitingForClose += sender
waitingForPrimary.dequeueAll(_ => true).foreach(
_ ! Failure(new RuntimeException(
"MongoDBSystem actor shutting down or no longer active")))
}
case Closed => {
logger.debug(s"Monitor $self closed, stopping...")
waitingForClose.dequeueAll(_ => true).foreach(_ ! Closed)
context.stop(self)
}
case IsKilled(result) => result success killed
case IsAvailable(result) => result success setAvailable
case IsPrimaryAvailable(result) => result success primaryAvailable
}
override def postStop = logger.debug(s"Monitor $self stopped.")
}
object MonitorActor {
private val logger = LazyLogger("reactivemongo.core.actors.MonitorActor")
}
}
object MongoConnection {
val DefaultHost = "localhost"
val DefaultPort = 27017
final class URIParsingException(message: String)
extends Exception with NoStackTrace {
override def getMessage() = message
}
/**
* @param hosts the hosts of the servers of the MongoDB replica set
* @param options the connection options
* @param db the name of the database
* @param authentication the authenticate information (see [[MongoConnectionOptions.authMode]])
*/
final case class ParsedURI(
hosts: List[(String, Int)],
options: MongoConnectionOptions,
ignoredOptions: List[String],
db: Option[String],
authenticate: Option[Authenticate])
/**
* Parses a MongoURI.
*
* @param uri the connection URI (see [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information)
*/
def parseURI(uri: String): Try[ParsedURI] = {
val prefix = "mongodb://"
Try {
val useful = uri.replace(prefix, "")
def opts = makeOptions(parseOptions(useful))
if (useful.indexOf("@") == -1) {
val (db, hosts) = parseHostsAndDbName(useful)
val (unsupportedKeys, options) = opts
ParsedURI(hosts, options, unsupportedKeys, db, None)
} else {
val WithAuth = """([^:]+):([^@]*)@(.+)""".r
useful match {
case WithAuth(user, pass, hostsPortsAndDbName) => {
val (db, hosts) = parseHostsAndDbName(hostsPortsAndDbName)
db.fold[ParsedURI](throw new URIParsingException(s"Could not parse URI '$uri': authentication information found but no database name in URI")) { database =>
val (unsupportedKeys, options) = opts
ParsedURI(hosts, options, unsupportedKeys, Some(database), Some(Authenticate.apply(options.authSource.getOrElse(database), user, pass)))
}
}
case _ => throw new URIParsingException(s"Could not parse URI '$uri'")
}
}
}
}
private def parseHosts(hosts: String) = hosts.split(",").toList.map { host =>
host.split(':').toList match {
case host :: port :: Nil => host -> {
try {
val p = port.toInt
if (p > 0 && p < 65536) p
else throw new URIParsingException(s"Could not parse hosts '$hosts' from URI: invalid port '$port'")
} catch {
case _: NumberFormatException => throw new URIParsingException(s"Could not parse hosts '$hosts' from URI: invalid port '$port'")
case NonFatal(e) => throw e
}
}
case host :: Nil => host -> DefaultPort
case _ => throw new URIParsingException(s"Could not parse hosts from URI: invalid definition '$hosts'")
}
}
private def parseHostsAndDbName(hostsPortAndDbName: String): (Option[String], List[(String, Int)]) = hostsPortAndDbName.split("/").toList match {
case hosts :: Nil => None -> parseHosts(hosts.takeWhile(_ != '?'))
case hosts :: dbName :: Nil => Some(dbName.takeWhile(_ != '?')) -> parseHosts(hosts)
case _ =>
throw new URIParsingException(s"Could not parse hosts and database from URI: '$hostsPortAndDbName'")
}
private def parseOptions(uriAndOptions: String): Map[String, String] =
uriAndOptions.split('?').toList match {
case uri :: options :: Nil => options.split("&").map { option =>
option.split("=").toList match {
case key :: value :: Nil => (key -> value)
case _ => throw new URIParsingException(s"Could not parse URI '$uri': invalid options '$options'")
}
}.toMap
case _ => Map.empty
}
val IntRe = "^([0-9]+)$".r
private def makeOptions(opts: Map[String, String]): (List[String], MongoConnectionOptions) = {
val (remOpts, step1) = opts.iterator.foldLeft(
Map.empty[String, String] -> MongoConnectionOptions()) {
case ((unsupported, result), kv) => kv match {
case ("authSource", v) => unsupported -> result.copy(authSource = Some(v))
case ("authMode", "scram-sha1") => unsupported -> result.copy(authMode = ScramSha1Authentication)
case ("authMode", _) => unsupported -> result.copy(authMode = CrAuthentication)
case ("connectTimeoutMS", v) => unsupported -> result.copy(connectTimeoutMS = v.toInt)
case ("socketTimeoutMS", v) => unsupported -> result.copy(socketTimeoutMS = v.toInt)
case ("sslEnabled", v) => unsupported -> result.copy(sslEnabled = v.toBoolean)
case ("sslAllowsInvalidCert", v) => unsupported -> result.copy(sslAllowsInvalidCert = v.toBoolean)
case ("rm.tcpNoDelay", v) => unsupported -> result.copy(tcpNoDelay = v.toBoolean)
case ("rm.keepAlive", v) => unsupported -> result.copy(keepAlive = v.toBoolean)
case ("rm.nbChannelsPerNode", v) => unsupported -> result.copy(nbChannelsPerNode = v.toInt)
case ("writeConcern", "unacknowledged") => unsupported -> result.
copy(writeConcern = WriteConcern.Unacknowledged)
case ("writeConcern", "acknowledged") => unsupported -> result.
copy(writeConcern = WriteConcern.Acknowledged)
case ("writeConcern", "journaled") => unsupported -> result.
copy(writeConcern = WriteConcern.Journaled)
case ("writeConcern", "default") => unsupported -> result.
copy(writeConcern = WriteConcern.Default)
case ("readPreference", "primary") => unsupported -> result.
copy(readPreference = ReadPreference.primary)
case ("readPreference", "primaryPreferred") =>
unsupported -> result.copy(
readPreference = ReadPreference.primaryPreferred)
case ("readPreference", "secondary") => unsupported -> result.copy(
readPreference = ReadPreference.secondary)
case ("readPreference", "secondaryPreferred") =>
unsupported -> result.copy(
readPreference = ReadPreference.secondaryPreferred)
case ("readPreference", "nearest") => unsupported -> result.copy(
readPreference = ReadPreference.nearest)
case kv => (unsupported + kv) -> result
}
}
// Overriding options
remOpts.iterator.foldLeft(List.empty[String] -> step1) {
case ((unsupported, result), kv) => kv match {
case ("writeConcernW", "majority") => unsupported -> result.
copy(writeConcern = result.writeConcern.
copy(w = WriteConcern.Majority))
case ("writeConcernW", IntRe(str)) => unsupported -> result.
copy(writeConcern = result.writeConcern.
copy(w = WriteConcern.WaitForAknowledgments(str.toInt)))
case ("writeConcernW", tag) => unsupported -> result.
copy(writeConcern = result.writeConcern.
copy(w = WriteConcern.TagSet(tag)))
case ("writeConcernJ", journaled) => unsupported -> result.
copy(writeConcern = result.writeConcern.
copy(j = journaled.toBoolean))
case ("writeConcernTimeout", t @ IntRe(ms)) => unsupported -> result.
copy(writeConcern = result.writeConcern.
copy(wtimeout = Some(ms.toInt)))
case (k, _) => (k :: unsupported) -> result
}
}
}
}
class MongoDriver(config: Option[Config] = None) {
import scala.collection.mutable.{ Map => MutableMap }
import MongoDriver.logger
/* MongoDriver always uses its own ActorSystem so it can have complete control separate from other
* Actor Systems in the application
*/
val system = {
import com.typesafe.config.ConfigFactory
val reference = config getOrElse ConfigFactory.load()
val cfg = if (!reference.hasPath("mongo-async-driver")) {
logger.warn("No mongo-async-driver configuration found")
ConfigFactory.empty()
} else reference.getConfig("mongo-async-driver")
ActorSystem("reactivemongo", cfg)
}
private val supervisorActor = system.actorOf(Props(new SupervisorActor(this)), s"Supervisor-${MongoDriver.nextCounter}")
private val connectionMonitors = MutableMap.empty[ActorRef, MongoConnection]
/** Keep a list of all connections so that we can terminate the actors */
def connections: Iterable[MongoConnection] = connectionMonitors.values
def numConnections: Int = connectionMonitors.size
def close(timeout: FiniteDuration = FiniteDuration(1, SECONDS)) = {
// Terminate actors used by MongoConnections
connections.foreach(_.monitor ! Close)
// Tell the supervisor to close.
// It will shut down all the connections and monitors
// and then shut down the ActorSystem as it is exiting.
supervisorActor ! Close
// When the actorSystem is shutdown,
// it means that supervisorActor has exited (run its postStop).
// So, wait for that event.
system.awaitTermination(timeout)
}
/**
* Creates a new MongoConnection.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param nodes A list of node names, like ''node1.foo.com:27017''. Port is optional, it is 27017 by default.
* @param authentications A list of Authenticates.
* @param nbChannelsPerNode Number of channels to open per node. Defaults to 10.
* @param name The name of the newly created [[reactivemongo.core.actors.MongoDBSystem]] actor, if needed.
* @param options Options for the new connection pool.
*/
@deprecated(message = "Must use `connection` with `nbChannelsPerNode` set in the `options`.", since = "0.11.3")
def connection(nodes: Seq[String], options: MongoConnectionOptions, authentications: Seq[Authenticate], nbChannelsPerNode: Int, name: Option[String]): MongoConnection = connection(nodes, options, authentications, name)
/**
* Creates a new MongoConnection.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param nodes A list of node names, like ''node1.foo.com:27017''. Port is optional, it is 27017 by default.
* @param authentications A list of Authenticates.
* @param name The name of the newly created [[reactivemongo.core.actors.MongoDBSystem]] actor, if needed.
* @param options Options for the new connection pool.
*/
def connection(nodes: Seq[String], options: MongoConnectionOptions = MongoConnectionOptions(), authentications: Seq[Authenticate] = Seq.empty, name: Option[String] = None): MongoConnection = {
def dbsystem: MongoDBSystem = options.authMode match {
case ScramSha1Authentication =>
new StandardDBSystem(nodes, authentications, options)()
case _ =>
new LegacyDBSystem(nodes, authentications, options)()
}
val props = Props(dbsystem)
val mongosystem = name match {
case Some(nm) => system.actorOf(props, nm);
case None =>
system.actorOf(props, s"Connection-${+MongoDriver.nextCounter}")
}
val connection = (supervisorActor ? AddConnection(options, mongosystem))(Timeout(10, SECONDS))
Await.result(connection.mapTo[MongoConnection], Duration.Inf)
}
/**
* Creates a new MongoConnection from URI.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param parsedURI The URI parsed by [[reactivemongo.api.MongoConnection.parseURI]]
* @param nbChannelsPerNode Number of channels to open per node.
* @param name The name of the newly created [[reactivemongo.core.actors.MongoDBSystem]] actor, if needed.
*/
@deprecated(message = "Must you reactivemongo.api.MongoDriver.connection(reactivemongo.api.MongoConnection.ParsedURI,Option[String]):reactivemongo.api.MongoConnection connection(..)]] with `nbChannelsPerNode` set in the `parsedURI`.", since = "0.11.3")
def connection(parsedURI: MongoConnection.ParsedURI, nbChannelsPerNode: Int, name: Option[String]): MongoConnection = connection(parsedURI, name)
/**
* Creates a new MongoConnection from URI.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param parsedURI The URI parsed by [[reactivemongo.api.MongoConnection.parseURI]]
* @param name The name of the newly created [[reactivemongo.core.actors.MongoDBSystem]] actor, if needed.
*/
def connection(parsedURI: MongoConnection.ParsedURI, name: Option[String]): MongoConnection = {
if (!parsedURI.ignoredOptions.isEmpty)
logger.warn(s"Some options were ignored because they are not supported (yet): ${parsedURI.ignoredOptions.mkString(", ")}")
connection(parsedURI.hosts.map(h => h._1 + ':' + h._2), parsedURI.options, parsedURI.authenticate.toSeq, name)
}
/**
* Creates a new MongoConnection from URI.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param parsedURI The URI parsed by [[reactivemongo.api.MongoConnection.parseURI]]
* @param nbChannelsPerNode Number of channels to open per node.
*/
@deprecated(message = "Must you `connection` with `nbChannelsPerNode` set in the options of the `parsedURI`.", since = "0.11.3")
def connection(parsedURI: MongoConnection.ParsedURI, nbChannelsPerNode: Int): MongoConnection = connection(parsedURI)
/**
* Creates a new MongoConnection from URI.
*
* See [[http://docs.mongodb.org/manual/reference/connection-string/ the MongoDB URI documentation]] for more information.
*
* @param parsedURI The URI parsed by [[reactivemongo.api.MongoConnection.parseURI]]
*/
def connection(parsedURI: MongoConnection.ParsedURI): MongoConnection =
connection(parsedURI, None)
private case class AddConnection(options: MongoConnectionOptions, mongosystem: ActorRef)
private case class CloseWithTimeout(timeout: FiniteDuration)
private case class SupervisorActor(driver: MongoDriver) extends Actor {
def isEmpty = driver.connectionMonitors.isEmpty
override def receive = {
case AddConnection(opts, sys) =>
val connection = new MongoConnection(driver.system, sys, opts)
driver.connectionMonitors.put(connection.monitor, connection)
context.watch(connection.monitor)
sender ! connection
case Terminated(actor) => driver.connectionMonitors.remove(actor)
case CloseWithTimeout(timeout) =>
if (isEmpty) context.stop(self)
else context.become(closing(timeout))
case Close =>
if (isEmpty) context.stop(self)
else context.become(closing(Duration.Zero))
}
def closing(shutdownTimeout: FiniteDuration): Receive = {
case ac: AddConnection =>
logger.warn("Refusing to add connection while MongoDriver is closing.")
case Terminated(actor) =>
driver.connectionMonitors.remove(actor)
if (isEmpty) {
context.stop(self)
}
case CloseWithTimeout(timeout) =>
logger.warn("CloseWithTimeout ignored, already closing.")
case Close => logger.warn("Close ignored, already closing.")
}
override def postStop: Unit = driver.system.shutdown()
}
}
object MongoDriver {
private val logger = LazyLogger("reactivemongo.api.MongoDriver")
/** Creates a new [[MongoDriver]] with a new ActorSystem. */
def apply(): MongoDriver = new MongoDriver
/** Creates a new [[MongoDriver]] with the given `config`. */
def apply(config: Config): MongoDriver = new MongoDriver(Some(config))
private[api] val _counter = new AtomicLong(0)
private[api] def nextCounter: Long = _counter.incrementAndGet()
}
|
charleskubicek/ReactiveMongo
|
driver/src/main/scala/api/api.scala
|
Scala
|
apache-2.0
| 33,517 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.db.counter
import java.io.IOException
import java.io.RandomAccessFile
import java.util.Hashtable
import scouter.lang.CounterKey
import scouter.lang.TimeTypeEnum
import scouter.lang.value.Value
import scouter.lang.value.ValueEnum
import scouter.io.DataInputX
import scouter.io.DataOutputX
import scouter.util.FileUtil
import scouter.util.IClose
object DailyCounterData {
val table = new Hashtable[String, DailyCounterData]();
def open( file:String) :DailyCounterData={
table.synchronized {
var reader = table.get(file);
if (reader != null) {
reader.refrence+=1;
} else {
reader = new DailyCounterData(file);
table.put(file, reader);
}
return reader;
}
}
}
class DailyCounterData(file:String) extends IClose {
var refrence = 0;
var dataFile = new RandomAccessFile(file + ".data", "rw");
override def close() {
DailyCounterData.table.synchronized {
if (this.refrence == 0) {
DailyCounterData.table.remove(this.file);
try {
dataFile = FileUtil.close(dataFile);
} catch {
case e:Throwable=>
e.printStackTrace();
}
} else {
this.refrence-=1;
}
}
}
def read( pos:Long):Array[Byte]= {
this.synchronized{
try {
dataFile.seek(pos);
val valueType = dataFile.readByte();
val timetype = dataFile.readByte()
val valueLen = DailyCounterUtils.getLength(valueType);
val bucketCount = DailyCounterUtils.getBucketCount(timetype);
dataFile.seek(pos + 2);
val buffer = new Array[Byte](valueLen * bucketCount);
dataFile.read(buffer);
return buffer;
} catch{
case e:IOException=>
throw new RuntimeException(e);
}
}
}
def getValues( location:Long):Array[Value]= {
this.synchronized{
try {
dataFile.seek(location);
val valueType = dataFile.readByte();
val timetype = dataFile.readByte()
val valueLen = DailyCounterUtils.getLength(valueType);
val bucketCount = DailyCounterUtils.getBucketCount(timetype);
val buffer = new Array[Byte](valueLen * bucketCount)
dataFile.read(buffer);
val values = new Array[Value](bucketCount)
for ( i <- 0 to values.length-1) {
values(i) = new DataInputX(buffer, i*valueLen).readValue();
}
return values;
} catch {
case e:IOException=>
throw new RuntimeException(e);
}
}}
def getValue( location:Long, hhmm:Int):Value= {
this.synchronized{
try {
dataFile.seek(location);
val valueType = dataFile.readByte();
val intervalType = dataFile.readByte()
val valueLen = DailyCounterUtils.getLength(valueType);
val bucketCount = DailyCounterUtils.getBucketCount(intervalType);
val bucketPos = DailyCounterUtils.getBucketPos(intervalType, hhmm);
if (bucketPos < bucketCount) {
dataFile.seek(location + 2 + valueLen * bucketPos);
val buffer = new Array[Byte](valueLen);
dataFile.read(buffer);
return new DataInputX(buffer).readValue();
}
return null;
} catch {
case e:IOException=>
throw new RuntimeException(e);
}
}
}
def write( location:Long, key:CounterKey, hhmm:Int, value:Value) {
dataFile.seek(location);
val valueType = dataFile.readByte();
if (valueType != value.getValueType() && value.getValueType() != ValueEnum.NULL)
return;
val timetype = dataFile.readByte()
if (timetype != key.timetype)
return;
val valueLen = DailyCounterUtils.getLength(valueType);
val bucketPos = DailyCounterUtils.getBucketPos(timetype, hhmm);
dataFile.seek(location + 2 + bucketPos * valueLen);
dataFile.write(new DataOutputX().writeValue(value).toByteArray());
}
def write( key:CounterKey, hhmm:Int, value:Value):Long= {
//νμΌμ μ‘΄μ¬νμ§ μλ λ μ½λ...
val valueType = value.getValueType();
val valueLen = DailyCounterUtils.getLength(valueType);
if (valueLen <= 0)
return 0;
val bucketCount = DailyCounterUtils.getBucketCount(key.timetype);
if (bucketCount <= 0)
return 0;
val bucketPos = DailyCounterUtils.getBucketPos(key.timetype, hhmm);
val location = dataFile.length();
dataFile.seek(location);
dataFile.writeByte(value.getValueType());
dataFile.writeByte(key.timetype);
dataFile.write(new Array[Byte](valueLen * bucketCount)); // νλ£»μΉλ°μ΄ν° μ 체λ₯Ό λ¨Όμ 곡백μΌλ‘ κΈ°λ‘νλ€.
dataFile.seek(location + 2 + bucketPos * valueLen);
dataFile.write(new DataOutputX().writeValue(value).toByteArray());
return location;
}
}
|
jhshin9/scouter
|
scouter.server/src/scouter/server/db/counter/DailyCounterData.scala
|
Scala
|
apache-2.0
| 5,197 |
import java.io.File
object TreeRecursiveTraverse {
def main(args: Array[String]): Unit = {
traverse(".")
}
def traverse(path: String) = {
def output(level: Int, file: File): Unit = {
println("\\t" * level + file.getName)
}
def traverse0(level: Int, file: File): Unit = {
output(level, file)
if (file.isDirectory) {
val children = file.listFiles()
for (child <- children) {
traverse0(level + 1, child)
}
}
}
traverse0(0, new File(path))
}
}
|
rockie-yang/explore-spark
|
src/main/scala/TreeRecursiveTraverse.scala
|
Scala
|
mit
| 536 |
import sbt._
import Keys._
object FPInScalaBuild extends Build {
val opts = Project.defaultSettings ++ Seq(
scalaVersion := "2.11.6",
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
libraryDependencies += "org.scalatest" %% "scalatest" % "2.2.4" % "test",
libraryDependencies += "junit" % "junit" % "4.12" % "test",
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.11.5" % "test"
)
lazy val root =
Project(id = "fpinscala",
base = file("."),
settings = opts ++ Seq(
onLoadMessage ~= (_ + nio2check())
)) aggregate (chapterCode, exercises, answers)
lazy val chapterCode =
Project(id = "chapter-code",
base = file("chaptercode"),
settings = opts)
lazy val exercises =
Project(id = "exercises",
base = file("exercises"),
settings = opts)
lazy val answers =
Project(id = "answers",
base = file("answers"),
settings = opts)
def nio2check(): String = {
val cls = "java.nio.channels.AsynchronousFileChannel"
try {Class.forName(cls); ""}
catch {case _: ClassNotFoundException =>
("\\nWARNING: JSR-203 \\"NIO.2\\" (" + cls + ") not found.\\n" +
"You are probably running Java < 1.7; answers will not compile.\\n" +
"You seem to be running " + System.getProperty("java.version") + ".\\n" +
"Try `project exercises' before compile, or upgrading your JDK.")
}
}
}
|
jstjohn/fpinscala
|
project/Build.scala
|
Scala
|
mit
| 1,515 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import collection._
import collection.JavaConversions._
import java.util.concurrent.atomic.AtomicBoolean
import kafka.common.{TopicAndPartition, StateChangeFailedException}
import kafka.utils.{ZkUtils, Logging}
import org.I0Itec.zkclient.IZkChildListener
import org.apache.log4j.Logger
/**
* This class represents the state machine for replicas. It defines the states that a replica can be in, and
* transitions to move the replica to another legal state. The different states that a replica can be in are -
* 1. NewReplica : The controller can create new replicas during partition reassignment. In this state, a
* replica can only get become follower state change request. Valid previous
* state is NonExistentReplica
* 2. OnlineReplica : Once a replica is started and part of the assigned replicas for its partition, it is in this
* state. In this state, it can get either become leader or become follower state change requests.
* Valid previous state are NewReplica, OnlineReplica or OfflineReplica
* 3. OfflineReplica : If a replica dies, it moves to this state. This happens when the broker hosting the replica
* is down. Valid previous state are NewReplica, OnlineReplica
* 4. NonExistentReplica: If a replica is deleted, it is moved to this state. Valid previous state is OfflineReplica
*/
class ReplicaStateMachine(controller: KafkaController) extends Logging {
private val controllerContext = controller.controllerContext
private val controllerId = controller.config.brokerId
private val zkClient = controllerContext.zkClient
var replicaState: mutable.Map[(String, Int, Int), ReplicaState] = mutable.Map.empty
val brokerRequestBatch = new ControllerBrokerRequestBatch(controller.controllerContext, controller.sendRequest,
controllerId, controller.clientId)
private val hasStarted = new AtomicBoolean(false)
this.logIdent = "[Replica state machine on controller " + controller.config.brokerId + "]: "
private val stateChangeLogger = Logger.getLogger(KafkaController.stateChangeLogger)
/**
* Invoked on successful controller election. First registers a broker change listener since that triggers all
* state transitions for replicas. Initializes the state of replicas for all partitions by reading from zookeeper.
* Then triggers the OnlineReplica state change for all replicas.
*/
def startup() {
// initialize replica state
initializeReplicaState()
hasStarted.set(true)
// move all Online replicas to Online
handleStateChanges(getAllReplicasOnBroker(controllerContext.allTopics.toSeq,
controllerContext.liveBrokerIds.toSeq), OnlineReplica)
info("Started replica state machine with initial state -> " + replicaState.toString())
}
// register broker change listener
def registerListeners() {
registerBrokerChangeListener()
}
/**
* Invoked on controller shutdown.
*/
def shutdown() {
hasStarted.set(false)
replicaState.clear()
}
/**
* This API is invoked by the broker change controller callbacks and the startup API of the state machine
* @param replicas The list of replicas (brokers) that need to be transitioned to the target state
* @param targetState The state that the replicas should be moved to
* The controller's allLeaders cache should have been updated before this
*/
def handleStateChanges(replicas: Set[PartitionAndReplica], targetState: ReplicaState) {
info("Invoking state change to %s for replicas %s".format(targetState, replicas.mkString(",")))
try {
brokerRequestBatch.newBatch()
replicas.foreach(r => handleStateChange(r.topic, r.partition, r.replica, targetState))
brokerRequestBatch.sendRequestsToBrokers(controller.epoch, controllerContext.correlationId.getAndIncrement, controllerContext.liveBrokers)
}catch {
case e => error("Error while moving some replicas to %s state".format(targetState), e)
}
}
/**
* This API exercises the replica's state machine. It ensures that every state transition happens from a legal
* previous state to the target state.
* @param topic The topic of the replica for which the state transition is invoked
* @param partition The partition of the replica for which the state transition is invoked
* @param replicaId The replica for which the state transition is invoked
* @param targetState The end state that the replica should be moved to
*/
def handleStateChange(topic: String, partition: Int, replicaId: Int, targetState: ReplicaState) {
val topicAndPartition = TopicAndPartition(topic, partition)
if (!hasStarted.get)
throw new StateChangeFailedException(("Controller %d epoch %d initiated state change of replica %d for partition %s " +
"to %s failed because replica state machine has not started")
.format(controllerId, controller.epoch, replicaId, topicAndPartition, targetState))
try {
replicaState.getOrElseUpdate((topic, partition, replicaId), NonExistentReplica)
val replicaAssignment = controllerContext.partitionReplicaAssignment(topicAndPartition)
targetState match {
case NewReplica =>
assertValidPreviousStates(topic, partition, replicaId, List(NonExistentReplica), targetState)
// start replica as a follower to the current leader for its partition
val leaderIsrAndControllerEpochOpt = ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition)
leaderIsrAndControllerEpochOpt match {
case Some(leaderIsrAndControllerEpoch) =>
if(leaderIsrAndControllerEpoch.leaderAndIsr.leader == replicaId)
throw new StateChangeFailedException("Replica %d for partition %s cannot be moved to NewReplica"
.format(replicaId, topicAndPartition) + "state as it is being requested to become leader")
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId),
topic, partition, leaderIsrAndControllerEpoch,
replicaAssignment)
case None => // new leader request will be sent to this replica when one gets elected
}
replicaState.put((topic, partition, replicaId), NewReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s to NewReplica"
.format(controllerId, controller.epoch, replicaId, topicAndPartition))
case NonExistentReplica =>
assertValidPreviousStates(topic, partition, replicaId, List(OfflineReplica), targetState)
// send stop replica command
brokerRequestBatch.addStopReplicaRequestForBrokers(List(replicaId), topic, partition, deletePartition = true)
// remove this replica from the assigned replicas list for its partition
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas.filterNot(_ == replicaId))
replicaState.remove((topic, partition, replicaId))
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s to NonExistentReplica"
.format(controllerId, controller.epoch, replicaId, topicAndPartition))
case OnlineReplica =>
assertValidPreviousStates(topic, partition, replicaId, List(NewReplica, OnlineReplica, OfflineReplica), targetState)
replicaState((topic, partition, replicaId)) match {
case NewReplica =>
// add this replica to the assigned replicas list for its partition
val currentAssignedReplicas = controllerContext.partitionReplicaAssignment(topicAndPartition)
if(!currentAssignedReplicas.contains(replicaId))
controllerContext.partitionReplicaAssignment.put(topicAndPartition, currentAssignedReplicas :+ replicaId)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s to OnlineReplica"
.format(controllerId, controller.epoch, replicaId, topicAndPartition))
case _ =>
// check if the leader for this partition ever existed
controllerContext.partitionLeadershipInfo.get(topicAndPartition) match {
case Some(leaderIsrAndControllerEpoch) =>
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(replicaId), topic, partition, leaderIsrAndControllerEpoch,
replicaAssignment)
replicaState.put((topic, partition, replicaId), OnlineReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s to OnlineReplica"
.format(controllerId, controller.epoch, replicaId, topicAndPartition))
case None => // that means the partition was never in OnlinePartition state, this means the broker never
// started a log for that partition and does not have a high watermark value for this partition
}
}
replicaState.put((topic, partition, replicaId), OnlineReplica)
case OfflineReplica =>
assertValidPreviousStates(topic, partition, replicaId, List(NewReplica, OnlineReplica), targetState)
// As an optimization, the controller removes dead replicas from the ISR
val leaderAndIsrIsEmpty: Boolean =
controllerContext.partitionLeadershipInfo.get(topicAndPartition) match {
case Some(currLeaderIsrAndControllerEpoch) =>
if (currLeaderIsrAndControllerEpoch.leaderAndIsr.isr.contains(replicaId))
controller.removeReplicaFromIsr(topic, partition, replicaId) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
// send the shrunk ISR state change request only to the leader
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(List(updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader),
topic, partition, updatedLeaderIsrAndControllerEpoch, replicaAssignment)
replicaState.put((topic, partition, replicaId), OfflineReplica)
stateChangeLogger.trace("Controller %d epoch %d changed state of replica %d for partition %s to OfflineReplica"
.format(controllerId, controller.epoch, replicaId, topicAndPartition))
false
case None =>
true
}
else false
case None =>
true
}
if (leaderAndIsrIsEmpty)
throw new StateChangeFailedException(
"Failed to change state of replica %d for partition %s since the leader and isr path in zookeeper is empty"
.format(replicaId, topicAndPartition))
}
}
catch {
case t: Throwable =>
stateChangeLogger.error("Controller %d epoch %d initiated state change of replica %d for partition [%s,%d] to %s failed"
.format(controllerId, controller.epoch, replicaId, topic, partition, targetState), t)
}
}
private def assertValidPreviousStates(topic: String, partition: Int, replicaId: Int, fromStates: Seq[ReplicaState],
targetState: ReplicaState) {
assert(fromStates.contains(replicaState((topic, partition, replicaId))),
"Replica %s for partition [%s,%d] should be in the %s states before moving to %s state"
.format(replicaId, topic, partition, fromStates.mkString(","), targetState) +
". Instead it is in %s state".format(replicaState((topic, partition, replicaId))))
}
private def registerBrokerChangeListener() = {
zkClient.subscribeChildChanges(ZkUtils.BrokerIdsPath, new BrokerChangeListener())
}
/**
* Invoked on startup of the replica's state machine to set the initial state for replicas of all existing partitions
* in zookeeper
*/
private def initializeReplicaState() {
for((topicPartition, assignedReplicas) <- controllerContext.partitionReplicaAssignment) {
val topic = topicPartition.topic
val partition = topicPartition.partition
assignedReplicas.foreach { replicaId =>
controllerContext.liveBrokerIds.contains(replicaId) match {
case true => replicaState.put((topic, partition, replicaId), OnlineReplica)
case false => replicaState.put((topic, partition, replicaId), OfflineReplica)
}
}
}
}
private def getAllReplicasOnBroker(topics: Seq[String], brokerIds: Seq[Int]): Set[PartitionAndReplica] = {
brokerIds.map { brokerId =>
val partitionsAssignedToThisBroker =
controllerContext.partitionReplicaAssignment.filter(p => topics.contains(p._1.topic) && p._2.contains(brokerId))
if(partitionsAssignedToThisBroker.size == 0)
info("No state transitions triggered since no partitions are assigned to brokers %s".format(brokerIds.mkString(",")))
partitionsAssignedToThisBroker.map(p => new PartitionAndReplica(p._1.topic, p._1.partition, brokerId))
}.flatten.toSet
}
def getPartitionsAssignedToBroker(topics: Seq[String], brokerId: Int):Seq[TopicAndPartition] = {
controllerContext.partitionReplicaAssignment.filter(_._2.contains(brokerId)).keySet.toSeq
}
/**
* This is the zookeeper listener that triggers all the state transitions for a replica
*/
class BrokerChangeListener() extends IZkChildListener with Logging {
this.logIdent = "[BrokerChangeListener on Controller " + controller.config.brokerId + "]: "
def handleChildChange(parentPath : String, currentBrokerList : java.util.List[String]) {
//ControllerStats.leaderElectionTimer.time {
info("Broker change listener fired for path %s with children %s".format(parentPath, currentBrokerList.mkString(",")))
controllerContext.controllerLock synchronized {
if (hasStarted.get) {
try {
val curBrokerIds = currentBrokerList.map(_.toInt).toSet
val newBrokerIds = curBrokerIds -- controllerContext.liveOrShuttingDownBrokerIds
val newBrokerInfo = newBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _))
val newBrokers = newBrokerInfo.filter(_.isDefined).map(_.get)
val deadBrokerIds = controllerContext.liveOrShuttingDownBrokerIds -- curBrokerIds
controllerContext.liveBrokers = curBrokerIds.map(ZkUtils.getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get)
info("Newly added brokers: %s, deleted brokers: %s, all live brokers: %s"
.format(newBrokerIds.mkString(","), deadBrokerIds.mkString(","), controllerContext.liveBrokerIds.mkString(",")))
newBrokers.foreach(controllerContext.controllerChannelManager.addBroker(_))
deadBrokerIds.foreach(controllerContext.controllerChannelManager.removeBroker(_))
if(newBrokerIds.size > 0)
controller.onBrokerStartup(newBrokerIds.toSeq)
if(deadBrokerIds.size > 0)
controller.onBrokerFailure(deadBrokerIds.toSeq)
} catch {
case e => error("Error while handling broker changes", e)
}
}
}
//}
}
}
}
sealed trait ReplicaState { def state: Byte }
case object NewReplica extends ReplicaState { val state: Byte = 1 }
case object OnlineReplica extends ReplicaState { val state: Byte = 2 }
case object OfflineReplica extends ReplicaState { val state: Byte = 3 }
case object NonExistentReplica extends ReplicaState { val state: Byte = 4 }
|
kavink92/kafka-0.8.0-beta1-src
|
core/src/main/scala/kafka/controller/ReplicaStateMachine.scala
|
Scala
|
apache-2.0
| 16,940 |
package com.github.mdr.mash.evaluator
class PrivateMethodsTest extends AbstractEvaluatorTest {
"class A { @private def a = 42 }; A.new.a".shouldThrowAnException
"class A { @private def a = 42 }; A.new['a']".shouldThrowAnException
"class A { @private def a = 42 }; 'a' A.new".shouldThrowAnException
"class A { @private def a = 42 }; [A.new].a".shouldThrowAnException
"class A { @private def a = 42; def b = a }; A.new.b" ==> 42
"class A { @private def a = 42; def b = this.a }; A.new.b" ==> 42
"class A { @private def a = 42; def b = this['a'].invoke }; A.new.b" ==> 42
}
|
mdr/mash
|
src/test/scala/com/github/mdr/mash/evaluator/PrivateMethodsTest.scala
|
Scala
|
mit
| 592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool.docker
import java.io.FileNotFoundException
import java.nio.file.Files
import java.nio.file.Paths
import java.util.concurrent.Semaphore
import akka.actor.ActorSystem
import scala.collection.concurrent.TrieMap
import scala.concurrent.blocking
import scala.concurrent.ExecutionContext
import scala.concurrent.{Await, Future}
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import akka.event.Logging.{ErrorLevel, InfoLevel}
import pureconfig.loadConfigOrThrow
import org.apache.openwhisk.common.{Logging, LoggingMarkers, MetricEmitter, TransactionId}
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.containerpool.ContainerId
import org.apache.openwhisk.core.containerpool.ContainerAddress
import scala.concurrent.duration.Duration
object DockerContainerId {
val containerIdRegex = """^([0-9a-f]{64})$""".r
def parse(id: String): Try[ContainerId] = {
id match {
case containerIdRegex(_) => Success(ContainerId(id))
case _ => Failure(new IllegalArgumentException(s"Does not comply with Docker container ID format: ${id}"))
}
}
}
/**
* Configuration for docker client command timeouts.
*/
case class DockerClientTimeoutConfig(run: Duration,
rm: Duration,
pull: Duration,
ps: Duration,
pause: Duration,
unpause: Duration,
version: Duration,
inspect: Duration)
/**
* Configuration for docker client
*/
case class DockerClientConfig(parallelRuns: Int, timeouts: DockerClientTimeoutConfig)
/**
* Serves as interface to the docker CLI tool.
*
* Be cautious with the ExecutionContext passed to this, as the
* calls to the CLI are blocking.
*
* You only need one instance (and you shouldn't get more).
*/
class DockerClient(dockerHost: Option[String] = None,
config: DockerClientConfig = loadConfigOrThrow[DockerClientConfig](ConfigKeys.dockerClient))(
executionContext: ExecutionContext)(implicit log: Logging, as: ActorSystem)
extends DockerApi
with ProcessRunner {
implicit private val ec = executionContext
// Determines how to run docker. Failure to find a Docker binary implies
// a failure to initialize this instance of DockerClient.
protected val dockerCmd: Seq[String] = {
val alternatives = List("/usr/bin/docker", "/usr/local/bin/docker") ++ executableAlternatives
val dockerBin = Try {
alternatives.find(a => Files.isExecutable(Paths.get(a))).get
} getOrElse {
throw new FileNotFoundException(s"Couldn't locate docker binary (tried: ${alternatives.mkString(", ")}).")
}
val host = dockerHost.map(host => Seq("--host", s"tcp://$host")).getOrElse(Seq.empty[String])
Seq(dockerBin) ++ host
}
protected def executableAlternatives: List[String] = List.empty
// Invoke docker CLI to determine client version.
// If the docker client version cannot be determined, an exception will be thrown and instance initialization will fail.
// Rationale: if we cannot invoke `docker version` successfully, it is unlikely subsequent `docker` invocations will succeed.
protected def getClientVersion(): String = {
val vf = executeProcess(dockerCmd ++ Seq("version", "--format", "{{.Client.Version}}"), config.timeouts.version)
.andThen {
case Success(version) => log.info(this, s"Detected docker client version $version")
case Failure(e) =>
log.error(this, s"Failed to determine docker client version: ${e.getClass} - ${e.getMessage}")
}
Await.result(vf, 2 * config.timeouts.version)
}
val clientVersion: String = getClientVersion()
protected val maxParallelRuns = config.parallelRuns
protected val runSemaphore =
new Semaphore( /* permits= */ if (maxParallelRuns > 0) maxParallelRuns else Int.MaxValue, /* fair= */ true)
// Docker < 1.13.1 has a known problem: if more than 10 containers are created (docker run)
// concurrently, there is a good chance that some of them will fail.
// See https://github.com/moby/moby/issues/29369
// Use a semaphore to make sure that at most 10 `docker run` commands are active
// the same time.
def run(image: String, args: Seq[String] = Seq.empty[String])(
implicit transid: TransactionId): Future[ContainerId] = {
Future {
blocking {
// Acquires a permit from this semaphore, blocking until one is available, or the thread is interrupted.
// Throws InterruptedException if the current thread is interrupted
runSemaphore.acquire()
}
}.flatMap { _ =>
// Iff the semaphore was acquired successfully
runCmd(Seq("run", "-d") ++ args ++ Seq(image), config.timeouts.run)
.andThen {
// Release the semaphore as quick as possible regardless of the runCmd() result
case _ => runSemaphore.release()
}
.map(ContainerId.apply)
.recoverWith {
// https://docs.docker.com/v1.12/engine/reference/run/#/exit-status
// Exit status 125 means an error reported by the Docker daemon.
// Examples:
// - Unrecognized option specified
// - Not enough disk space
case pre: ProcessUnsuccessfulException if pre.exitStatus == ExitStatus(125) =>
Future.failed(
DockerContainerId
.parse(pre.stdout)
.map(BrokenDockerContainer(_, s"Broken container: ${pre.getMessage}"))
.getOrElse(pre))
}
}
}
def inspectIPAddress(id: ContainerId, network: String)(implicit transid: TransactionId): Future[ContainerAddress] =
runCmd(
Seq("inspect", "--format", s"{{.NetworkSettings.Networks.${network}.IPAddress}}", id.asString),
config.timeouts.inspect).flatMap {
case "<no value>" => Future.failed(new NoSuchElementException)
case stdout => Future.successful(ContainerAddress(stdout))
}
def pause(id: ContainerId)(implicit transid: TransactionId): Future[Unit] =
runCmd(Seq("pause", id.asString), config.timeouts.pause).map(_ => ())
def unpause(id: ContainerId)(implicit transid: TransactionId): Future[Unit] =
runCmd(Seq("unpause", id.asString), config.timeouts.unpause).map(_ => ())
def rm(id: ContainerId)(implicit transid: TransactionId): Future[Unit] =
runCmd(Seq("rm", "-f", id.asString), config.timeouts.rm).map(_ => ())
def ps(filters: Seq[(String, String)] = Seq.empty, all: Boolean = false)(
implicit transid: TransactionId): Future[Seq[ContainerId]] = {
val filterArgs = filters.flatMap { case (attr, value) => Seq("--filter", s"$attr=$value") }
val allArg = if (all) Seq("--all") else Seq.empty[String]
val cmd = Seq("ps", "--quiet", "--no-trunc") ++ allArg ++ filterArgs
runCmd(cmd, config.timeouts.ps).map(_.linesIterator.toSeq.map(ContainerId.apply))
}
/**
* Stores pulls that are currently being executed and collapses multiple
* pulls into just one. After a pull is finished, the cached future is removed
* to enable constant updates of an image without changing its tag.
*/
private val pullsInFlight = TrieMap[String, Future[Unit]]()
def pull(image: String)(implicit transid: TransactionId): Future[Unit] =
pullsInFlight.getOrElseUpdate(image, {
runCmd(Seq("pull", image), config.timeouts.pull).map(_ => ()).andThen { case _ => pullsInFlight.remove(image) }
})
def isOomKilled(id: ContainerId)(implicit transid: TransactionId): Future[Boolean] =
runCmd(Seq("inspect", id.asString, "--format", "{{.State.OOMKilled}}"), config.timeouts.inspect).map(_.toBoolean)
protected def runCmd(args: Seq[String], timeout: Duration)(implicit transid: TransactionId): Future[String] = {
val cmd = dockerCmd ++ args
val start = transid.started(
this,
LoggingMarkers.INVOKER_DOCKER_CMD(args.head),
s"running ${cmd.mkString(" ")} (timeout: $timeout)",
logLevel = InfoLevel)
executeProcess(cmd, timeout).andThen {
case Success(_) => transid.finished(this, start)
case Failure(pte: ProcessTimeoutException) =>
transid.failed(this, start, pte.getMessage, ErrorLevel)
MetricEmitter.emitCounterMetric(LoggingMarkers.INVOKER_DOCKER_CMD_TIMEOUT(args.head))
case Failure(t) => transid.failed(this, start, t.getMessage, ErrorLevel)
}
}
}
trait DockerApi {
/**
* The version number of the docker client cli
*
* @return The version of the docker client cli being used by the invoker
*/
def clientVersion: String
/**
* Spawns a container in detached mode.
*
* @param image the image to start the container with
* @param args arguments for the docker run command
* @return id of the started container
*/
def run(image: String, args: Seq[String] = Seq.empty[String])(implicit transid: TransactionId): Future[ContainerId]
/**
* Gets the IP address of a given container.
*
* A container may have more than one network. The container has an
* IP address in each of these networks such that the network name
* is needed.
*
* @param id the id of the container to get the IP address from
* @param network name of the network to get the IP address from
* @return ip of the container
*/
def inspectIPAddress(id: ContainerId, network: String)(implicit transid: TransactionId): Future[ContainerAddress]
/**
* Pauses the container with the given id.
*
* @param id the id of the container to pause
* @return a Future completing according to the command's exit-code
*/
def pause(id: ContainerId)(implicit transid: TransactionId): Future[Unit]
/**
* Unpauses the container with the given id.
*
* @param id the id of the container to unpause
* @return a Future completing according to the command's exit-code
*/
def unpause(id: ContainerId)(implicit transid: TransactionId): Future[Unit]
/**
* Removes the container with the given id.
*
* @param id the id of the container to remove
* @return a Future completing according to the command's exit-code
*/
def rm(id: ContainerId)(implicit transid: TransactionId): Future[Unit]
/**
* Returns a list of ContainerIds in the system.
*
* @param filters Filters to apply to the 'ps' command
* @param all Whether or not to return stopped containers as well
* @return A list of ContainerIds
*/
def ps(filters: Seq[(String, String)] = Seq.empty, all: Boolean = false)(
implicit transid: TransactionId): Future[Seq[ContainerId]]
/**
* Pulls the given image.
*
* @param image the image to pull
* @return a Future completing once the pull is complete
*/
def pull(image: String)(implicit transid: TransactionId): Future[Unit]
/**
* Determines whether the given container was killed due to
* memory constraints.
*
* @param id the id of the container to check
* @return a Future containing whether the container was killed or not
*/
def isOomKilled(id: ContainerId)(implicit transid: TransactionId): Future[Boolean]
}
/** Indicates any error while starting a container that leaves a broken container behind that needs to be removed */
case class BrokenDockerContainer(id: ContainerId, msg: String) extends Exception(msg)
|
markusthoemmes/openwhisk
|
core/invoker/src/main/scala/org/apache/openwhisk/core/containerpool/docker/DockerClient.scala
|
Scala
|
apache-2.0
| 12,262 |
/*
* Copyright 2013-, BjΓΆrn Westlin (bwestlin at gmail dot com) - github: bwestlin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views
import model._
import org.jsoup.Jsoup
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import collection.JavaConversions._
import scala.util.{Failure, Success, Try}
@RunWith(classOf[JUnitRunner])
class TodaysLunchesSpec extends Specification {
"TodaysLunches view" should {
"Render list of meals correctly" in {
val restaurant = Restaurant(1, "Restaurant", "http://restaurant/", None, "Restaurant")
val lunches: Seq[(Restaurant, Try[Seq[model.Meal]])] = Seq(
(restaurant, Success(Seq("meal1", "meal2").map(Meal)))
)
val html = views.html.lunchInfo.todaysLunches(lunches).toString()
val doc = Jsoup.parse(html)
doc.select("h4").text() mustEqual restaurant.name
doc.select("h4 > a").attr("href") mustEqual restaurant.url
doc.select("ul.meals").size() mustEqual 1
doc.select("ul.meals > li").size() mustEqual 2
doc.select("ul.meals > li").iterator().toSeq.map(_.text) mustEqual Seq("meal1", "meal2")
}
"Render empty list of meals correctly" in {
val restaurant = Restaurant(1, "Restaurant", "http://restaurant/", None, "Restaurant")
val lunches: Seq[(Restaurant, Try[Seq[model.Meal]])] = Seq(
(restaurant, Success(Nil))
)
val html = views.html.lunchInfo.todaysLunches(lunches).toString()
val doc = Jsoup.parse(html)
doc.select("h4").text() mustEqual restaurant.name
doc.select("h4 > a").attr("href") mustEqual restaurant.url
doc.select("div:containsOwn(Inga luncher funna.)").size() mustEqual 1
}
"Render failed list of meals correctly" in {
val restaurant = Restaurant(1, "Restaurant", "http://restaurant/", None, "Restaurant")
val lunches: Seq[(Restaurant, Try[Seq[model.Meal]])] = Seq(
(restaurant, Failure(new Exception("Something went wrong")))
)
val html = views.html.lunchInfo.todaysLunches(lunches).toString()
val doc = Jsoup.parse(html)
doc.select("h4").text() mustEqual restaurant.name
doc.select("h4 > a").attr("href") mustEqual restaurant.url
doc.select("div.alert.alert-error").size mustEqual 1
doc.select("div.alert.alert-error").text must contain("Something went wrong")
}
}
}
|
bwestlin/su-lunch
|
test/views/TodaysLunchesSpec.scala
|
Scala
|
apache-2.0
| 2,903 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status
import java.util.Date
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.immutable.{HashSet, TreeSet}
import scala.collection.mutable.HashMap
import com.google.common.collect.Interners
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics}
import org.apache.spark.resource.{ExecutorResourceRequest, ResourceInformation, ResourceProfile, TaskResourceRequest}
import org.apache.spark.scheduler.{AccumulableInfo, StageInfo, TaskInfo}
import org.apache.spark.status.api.v1
import org.apache.spark.storage.{RDDInfo, StorageLevel}
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{AccumulatorContext, Utils}
import org.apache.spark.util.collection.OpenHashSet
/**
* A mutable representation of a live entity in Spark (jobs, stages, tasks, et al). Every live
* entity uses one of these instances to keep track of their evolving state, and periodically
* flush an immutable view of the entity to the app state store.
*/
private[spark] abstract class LiveEntity {
var lastWriteTime = -1L
def write(store: ElementTrackingStore, now: Long, checkTriggers: Boolean = false): Unit = {
// Always check triggers on the first write, since adding an element to the store may
// cause the maximum count for the element type to be exceeded.
store.write(doUpdate(), checkTriggers || lastWriteTime == -1L)
lastWriteTime = now
}
/**
* Returns an updated view of entity data, to be stored in the status store, reflecting the
* latest information collected by the listener.
*/
protected def doUpdate(): Any
}
private class LiveJob(
val jobId: Int,
name: String,
description: Option[String],
val submissionTime: Option[Date],
val stageIds: Seq[Int],
jobGroup: Option[String],
numTasks: Int,
sqlExecutionId: Option[Long]) extends LiveEntity {
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
// Holds both the stage ID and the task index, packed into a single long value.
val completedIndices = new OpenHashSet[Long]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var skippedTasks = 0
var skippedStages = Set[Int]()
var status = JobExecutionStatus.RUNNING
var completionTime: Option[Date] = None
var completedStages: Set[Int] = Set()
var activeStages = 0
var failedStages = 0
override protected def doUpdate(): Any = {
val info = new v1.JobData(
jobId,
name,
description,
submissionTime,
completionTime,
stageIds,
jobGroup,
status,
numTasks,
activeTasks,
completedTasks,
skippedTasks,
failedTasks,
killedTasks,
completedIndices.size,
activeStages,
completedStages.size,
skippedStages.size,
failedStages,
killedSummary)
new JobDataWrapper(info, skippedStages, sqlExecutionId)
}
}
private class LiveTask(
var info: TaskInfo,
stageId: Int,
stageAttemptId: Int,
lastUpdateTime: Option[Long]) extends LiveEntity {
import LiveEntityHelpers._
// The task metrics use a special value when no metrics have been reported. The special value is
// checked when calculating indexed values when writing to the store (see [[TaskDataWrapper]]).
private var metrics: v1.TaskMetrics = createMetrics(default = -1L)
var errorMessage: Option[String] = None
/**
* Update the metrics for the task and return the difference between the previous and new
* values.
*/
def updateMetrics(metrics: TaskMetrics): v1.TaskMetrics = {
if (metrics != null) {
val old = this.metrics
val newMetrics = createMetrics(
metrics.executorDeserializeTime,
metrics.executorDeserializeCpuTime,
metrics.executorRunTime,
metrics.executorCpuTime,
metrics.resultSize,
metrics.jvmGCTime,
metrics.resultSerializationTime,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
metrics.peakExecutionMemory,
metrics.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead,
metrics.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten,
metrics.shuffleReadMetrics.remoteBlocksFetched,
metrics.shuffleReadMetrics.localBlocksFetched,
metrics.shuffleReadMetrics.fetchWaitTime,
metrics.shuffleReadMetrics.remoteBytesRead,
metrics.shuffleReadMetrics.remoteBytesReadToDisk,
metrics.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead,
metrics.shuffleWriteMetrics.bytesWritten,
metrics.shuffleWriteMetrics.writeTime,
metrics.shuffleWriteMetrics.recordsWritten)
this.metrics = newMetrics
// Only calculate the delta if the old metrics contain valid information, otherwise
// the new metrics are the delta.
if (old.executorDeserializeTime >= 0L) {
subtractMetrics(newMetrics, old)
} else {
newMetrics
}
} else {
null
}
}
override protected def doUpdate(): Any = {
val duration = if (info.finished) {
info.duration
} else {
info.timeRunning(lastUpdateTime.getOrElse(System.currentTimeMillis()))
}
val hasMetrics = metrics.executorDeserializeTime >= 0
/**
* SPARK-26260: For non successful tasks, store the metrics as negative to avoid
* the calculation in the task summary. `toApi` method in the `TaskDataWrapper` will make
* it actual value.
*/
val taskMetrics: v1.TaskMetrics = if (hasMetrics && !info.successful) {
makeNegative(metrics)
} else {
metrics
}
new TaskDataWrapper(
info.taskId,
info.index,
info.attemptNumber,
info.launchTime,
if (info.gettingResult) info.gettingResultTime else -1L,
duration,
weakIntern(info.executorId),
weakIntern(info.host),
weakIntern(info.status),
weakIntern(info.taskLocality.toString()),
info.speculative,
newAccumulatorInfos(info.accumulables),
errorMessage,
hasMetrics,
taskMetrics.executorDeserializeTime,
taskMetrics.executorDeserializeCpuTime,
taskMetrics.executorRunTime,
taskMetrics.executorCpuTime,
taskMetrics.resultSize,
taskMetrics.jvmGcTime,
taskMetrics.resultSerializationTime,
taskMetrics.memoryBytesSpilled,
taskMetrics.diskBytesSpilled,
taskMetrics.peakExecutionMemory,
taskMetrics.inputMetrics.bytesRead,
taskMetrics.inputMetrics.recordsRead,
taskMetrics.outputMetrics.bytesWritten,
taskMetrics.outputMetrics.recordsWritten,
taskMetrics.shuffleReadMetrics.remoteBlocksFetched,
taskMetrics.shuffleReadMetrics.localBlocksFetched,
taskMetrics.shuffleReadMetrics.fetchWaitTime,
taskMetrics.shuffleReadMetrics.remoteBytesRead,
taskMetrics.shuffleReadMetrics.remoteBytesReadToDisk,
taskMetrics.shuffleReadMetrics.localBytesRead,
taskMetrics.shuffleReadMetrics.recordsRead,
taskMetrics.shuffleWriteMetrics.bytesWritten,
taskMetrics.shuffleWriteMetrics.writeTime,
taskMetrics.shuffleWriteMetrics.recordsWritten,
stageId,
stageAttemptId)
}
}
private class LiveResourceProfile(
val resourceProfileId: Int,
val executorResources: Map[String, ExecutorResourceRequest],
val taskResources: Map[String, TaskResourceRequest],
val maxTasksPerExecutor: Option[Int]) extends LiveEntity {
def toApi(): v1.ResourceProfileInfo = {
new v1.ResourceProfileInfo(resourceProfileId, executorResources, taskResources)
}
override protected def doUpdate(): Any = {
new ResourceProfileWrapper(toApi())
}
}
private[spark] class LiveExecutor(val executorId: String, _addTime: Long) extends LiveEntity {
var hostPort: String = null
var host: String = null
var isActive = true
var totalCores = 0
val addTime = new Date(_addTime)
var removeTime: Date = null
var removeReason: String = null
var rddBlocks = 0
var memoryUsed = 0L
var diskUsed = 0L
var maxTasks = 0
var maxMemory = 0L
var totalTasks = 0
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
var totalDuration = 0L
var totalGcTime = 0L
var totalInputBytes = 0L
var totalShuffleRead = 0L
var totalShuffleWrite = 0L
var isBlacklisted = false
var blacklistedInStages: Set[Int] = TreeSet()
var executorLogs = Map[String, String]()
var attributes = Map[String, String]()
var resources = Map[String, ResourceInformation]()
// Memory metrics. They may not be recorded (e.g. old event logs) so if totalOnHeap is not
// initialized, the store will not contain this information.
var totalOnHeap = -1L
var totalOffHeap = 0L
var usedOnHeap = 0L
var usedOffHeap = 0L
var resourceProfileId = ResourceProfile.DEFAULT_RESOURCE_PROFILE_ID
def hasMemoryInfo: Boolean = totalOnHeap >= 0L
// peak values for executor level metrics
val peakExecutorMetrics = new ExecutorMetrics()
def hostname: String = if (host != null) host else Utils.parseHostPort(hostPort)._1
override protected def doUpdate(): Any = {
val memoryMetrics = if (totalOnHeap >= 0) {
Some(new v1.MemoryMetrics(usedOnHeap, usedOffHeap, totalOnHeap, totalOffHeap))
} else {
None
}
val info = new v1.ExecutorSummary(
executorId,
if (hostPort != null) hostPort else host,
isActive,
rddBlocks,
memoryUsed,
diskUsed,
totalCores,
maxTasks,
activeTasks,
failedTasks,
completedTasks,
totalTasks,
totalDuration,
totalGcTime,
totalInputBytes,
totalShuffleRead,
totalShuffleWrite,
isBlacklisted,
maxMemory,
addTime,
Option(removeTime),
Option(removeReason),
executorLogs,
memoryMetrics,
blacklistedInStages,
Some(peakExecutorMetrics).filter(_.isSet),
attributes,
resources,
resourceProfileId)
new ExecutorSummaryWrapper(info)
}
}
private class LiveExecutorStageSummary(
stageId: Int,
attemptId: Int,
executorId: String) extends LiveEntity {
import LiveEntityHelpers._
var taskTime = 0L
var succeededTasks = 0
var failedTasks = 0
var killedTasks = 0
var isBlacklisted = false
var metrics = createMetrics(default = 0L)
override protected def doUpdate(): Any = {
val info = new v1.ExecutorStageSummary(
taskTime,
failedTasks,
succeededTasks,
killedTasks,
metrics.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead,
metrics.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten,
metrics.shuffleReadMetrics.remoteBytesRead + metrics.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead,
metrics.shuffleWriteMetrics.bytesWritten,
metrics.shuffleWriteMetrics.recordsWritten,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
isBlacklisted)
new ExecutorStageSummaryWrapper(stageId, attemptId, executorId, info)
}
}
private class LiveStage extends LiveEntity {
import LiveEntityHelpers._
var jobs = Seq[LiveJob]()
var jobIds = Set[Int]()
var info: StageInfo = null
var status = v1.StageStatus.PENDING
var description: Option[String] = None
var schedulingPool: String = SparkUI.DEFAULT_POOL_NAME
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
val completedIndices = new OpenHashSet[Int]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var firstLaunchTime = Long.MaxValue
var localitySummary: Map[String, Long] = Map()
var metrics = createMetrics(default = 0L)
val executorSummaries = new HashMap[String, LiveExecutorStageSummary]()
val activeTasksPerExecutor = new HashMap[String, Int]().withDefaultValue(0)
var blackListedExecutors = new HashSet[String]()
// Used for cleanup of tasks after they reach the configured limit. Not written to the store.
@volatile var cleaning = false
var savedTasks = new AtomicInteger(0)
def executorSummary(executorId: String): LiveExecutorStageSummary = {
executorSummaries.getOrElseUpdate(executorId,
new LiveExecutorStageSummary(info.stageId, info.attemptNumber, executorId))
}
def toApi(): v1.StageData = {
new v1.StageData(
status = status,
stageId = info.stageId,
attemptId = info.attemptNumber,
numTasks = info.numTasks,
numActiveTasks = activeTasks,
numCompleteTasks = completedTasks,
numFailedTasks = failedTasks,
numKilledTasks = killedTasks,
numCompletedIndices = completedIndices.size,
submissionTime = info.submissionTime.map(new Date(_)),
firstTaskLaunchedTime =
if (firstLaunchTime < Long.MaxValue) Some(new Date(firstLaunchTime)) else None,
completionTime = info.completionTime.map(new Date(_)),
failureReason = info.failureReason,
executorDeserializeTime = metrics.executorDeserializeTime,
executorDeserializeCpuTime = metrics.executorDeserializeCpuTime,
executorRunTime = metrics.executorRunTime,
executorCpuTime = metrics.executorCpuTime,
resultSize = metrics.resultSize,
jvmGcTime = metrics.jvmGcTime,
resultSerializationTime = metrics.resultSerializationTime,
memoryBytesSpilled = metrics.memoryBytesSpilled,
diskBytesSpilled = metrics.diskBytesSpilled,
peakExecutionMemory = metrics.peakExecutionMemory,
inputBytes = metrics.inputMetrics.bytesRead,
inputRecords = metrics.inputMetrics.recordsRead,
outputBytes = metrics.outputMetrics.bytesWritten,
outputRecords = metrics.outputMetrics.recordsWritten,
shuffleRemoteBlocksFetched = metrics.shuffleReadMetrics.remoteBlocksFetched,
shuffleLocalBlocksFetched = metrics.shuffleReadMetrics.localBlocksFetched,
shuffleFetchWaitTime = metrics.shuffleReadMetrics.fetchWaitTime,
shuffleRemoteBytesRead = metrics.shuffleReadMetrics.remoteBytesRead,
shuffleRemoteBytesReadToDisk = metrics.shuffleReadMetrics.remoteBytesReadToDisk,
shuffleLocalBytesRead = metrics.shuffleReadMetrics.localBytesRead,
shuffleReadBytes =
metrics.shuffleReadMetrics.localBytesRead + metrics.shuffleReadMetrics.remoteBytesRead,
shuffleReadRecords = metrics.shuffleReadMetrics.recordsRead,
shuffleWriteBytes = metrics.shuffleWriteMetrics.bytesWritten,
shuffleWriteTime = metrics.shuffleWriteMetrics.writeTime,
shuffleWriteRecords = metrics.shuffleWriteMetrics.recordsWritten,
name = info.name,
description = description,
details = info.details,
schedulingPool = schedulingPool,
rddIds = info.rddInfos.map(_.id),
accumulatorUpdates = newAccumulatorInfos(info.accumulables.values),
tasks = None,
executorSummary = None,
killedTasksSummary = killedSummary,
resourceProfileId = info.resourceProfileId)
}
override protected def doUpdate(): Any = {
new StageDataWrapper(toApi(), jobIds, localitySummary)
}
}
/**
* Data about a single partition of a cached RDD. The RDD storage level is used to compute the
* effective storage level of the partition, which takes into account the storage actually being
* used by the partition in the executors, and thus may differ from the storage level requested
* by the application.
*/
private class LiveRDDPartition(val blockName: String, rddLevel: StorageLevel) {
import LiveEntityHelpers._
// Pointers used by RDDPartitionSeq.
@volatile var prev: LiveRDDPartition = null
@volatile var next: LiveRDDPartition = null
var value: v1.RDDPartitionInfo = null
def executors: Seq[String] = value.executors
def memoryUsed: Long = value.memoryUsed
def diskUsed: Long = value.diskUsed
def update(
executors: Seq[String],
memoryUsed: Long,
diskUsed: Long): Unit = {
val level = StorageLevel(diskUsed > 0, memoryUsed > 0, rddLevel.useOffHeap,
if (memoryUsed > 0) rddLevel.deserialized else false, executors.size)
value = new v1.RDDPartitionInfo(
blockName,
weakIntern(level.description),
memoryUsed,
diskUsed,
executors)
}
}
private class LiveRDDDistribution(exec: LiveExecutor) {
import LiveEntityHelpers._
val executorId = exec.executorId
var memoryUsed = 0L
var diskUsed = 0L
var onHeapUsed = 0L
var offHeapUsed = 0L
// Keep the last update handy. This avoids recomputing the API view when not needed.
var lastUpdate: v1.RDDDataDistribution = null
def toApi(): v1.RDDDataDistribution = {
if (lastUpdate == null) {
lastUpdate = new v1.RDDDataDistribution(
weakIntern(exec.hostPort),
memoryUsed,
exec.maxMemory - exec.memoryUsed,
diskUsed,
if (exec.hasMemoryInfo) Some(onHeapUsed) else None,
if (exec.hasMemoryInfo) Some(offHeapUsed) else None,
if (exec.hasMemoryInfo) Some(exec.totalOnHeap - exec.usedOnHeap) else None,
if (exec.hasMemoryInfo) Some(exec.totalOffHeap - exec.usedOffHeap) else None)
}
lastUpdate
}
}
/**
* Tracker for data related to a persisted RDD.
*
* The RDD storage level is immutable, following the current behavior of `RDD.persist()`, even
* though it is mutable in the `RDDInfo` structure. Since the listener does not track unpersisted
* RDDs, this covers the case where an early stage is run on the unpersisted RDD, and a later stage
* it started after the RDD is marked for caching.
*/
private class LiveRDD(val info: RDDInfo, storageLevel: StorageLevel) extends LiveEntity {
import LiveEntityHelpers._
var memoryUsed = 0L
var diskUsed = 0L
private val levelDescription = weakIntern(storageLevel.description)
private val partitions = new HashMap[String, LiveRDDPartition]()
private val partitionSeq = new RDDPartitionSeq()
private val distributions = new HashMap[String, LiveRDDDistribution]()
def partition(blockName: String): LiveRDDPartition = {
partitions.getOrElseUpdate(blockName, {
val part = new LiveRDDPartition(blockName, storageLevel)
part.update(Nil, 0L, 0L)
partitionSeq.addPartition(part)
part
})
}
def removePartition(blockName: String): Unit = {
partitions.remove(blockName).foreach(partitionSeq.removePartition)
}
def distribution(exec: LiveExecutor): LiveRDDDistribution = {
distributions.getOrElseUpdate(exec.executorId, new LiveRDDDistribution(exec))
}
def removeDistribution(exec: LiveExecutor): Boolean = {
distributions.remove(exec.executorId).isDefined
}
def distributionOpt(exec: LiveExecutor): Option[LiveRDDDistribution] = {
distributions.get(exec.executorId)
}
def getPartitions(): scala.collection.Map[String, LiveRDDPartition] = partitions
def getDistributions(): scala.collection.Map[String, LiveRDDDistribution] = distributions
override protected def doUpdate(): Any = {
val dists = if (distributions.nonEmpty) {
Some(distributions.values.map(_.toApi()).toSeq)
} else {
None
}
val rdd = new v1.RDDStorageInfo(
info.id,
info.name,
info.numPartitions,
partitions.size,
levelDescription,
memoryUsed,
diskUsed,
dists,
Some(partitionSeq))
new RDDStorageInfoWrapper(rdd)
}
}
private class SchedulerPool(name: String) extends LiveEntity {
var stageIds = Set[Int]()
override protected def doUpdate(): Any = {
new PoolData(name, stageIds)
}
}
private[spark] object LiveEntityHelpers {
private val stringInterner = Interners.newWeakInterner[String]()
private def accuValuetoString(value: Any): String = value match {
case list: java.util.List[_] =>
// SPARK-30379: For collection accumulator, string representation might
// takes much more memory (e.g. long => string of it) and cause OOM.
// So we only show first few elements.
if (list.size() > 5) {
list.asScala.take(5).mkString("[", ",", "," + "... " + (list.size() - 5) + " more items]")
} else {
list.toString
}
case _ => value.toString
}
def newAccumulatorInfos(accums: Iterable[AccumulableInfo]): Seq[v1.AccumulableInfo] = {
accums
.filter { acc =>
// We don't need to store internal or SQL accumulables as their values will be shown in
// other places, so drop them to reduce the memory usage.
!acc.internal && acc.metadata != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER)
}
.map { acc =>
new v1.AccumulableInfo(
acc.id,
acc.name.map(weakIntern).orNull,
acc.update.map(accuValuetoString),
acc.value.map(accuValuetoString).orNull)
}
.toSeq
}
/** String interning to reduce the memory usage. */
def weakIntern(s: String): String = {
stringInterner.intern(s)
}
// scalastyle:off argcount
def createMetrics(
executorDeserializeTime: Long,
executorDeserializeCpuTime: Long,
executorRunTime: Long,
executorCpuTime: Long,
resultSize: Long,
jvmGcTime: Long,
resultSerializationTime: Long,
memoryBytesSpilled: Long,
diskBytesSpilled: Long,
peakExecutionMemory: Long,
inputBytesRead: Long,
inputRecordsRead: Long,
outputBytesWritten: Long,
outputRecordsWritten: Long,
shuffleRemoteBlocksFetched: Long,
shuffleLocalBlocksFetched: Long,
shuffleFetchWaitTime: Long,
shuffleRemoteBytesRead: Long,
shuffleRemoteBytesReadToDisk: Long,
shuffleLocalBytesRead: Long,
shuffleRecordsRead: Long,
shuffleBytesWritten: Long,
shuffleWriteTime: Long,
shuffleRecordsWritten: Long): v1.TaskMetrics = {
new v1.TaskMetrics(
executorDeserializeTime,
executorDeserializeCpuTime,
executorRunTime,
executorCpuTime,
resultSize,
jvmGcTime,
resultSerializationTime,
memoryBytesSpilled,
diskBytesSpilled,
peakExecutionMemory,
new v1.InputMetrics(
inputBytesRead,
inputRecordsRead),
new v1.OutputMetrics(
outputBytesWritten,
outputRecordsWritten),
new v1.ShuffleReadMetrics(
shuffleRemoteBlocksFetched,
shuffleLocalBlocksFetched,
shuffleFetchWaitTime,
shuffleRemoteBytesRead,
shuffleRemoteBytesReadToDisk,
shuffleLocalBytesRead,
shuffleRecordsRead),
new v1.ShuffleWriteMetrics(
shuffleBytesWritten,
shuffleWriteTime,
shuffleRecordsWritten))
}
// scalastyle:on argcount
def createMetrics(default: Long): v1.TaskMetrics = {
createMetrics(default, default, default, default, default, default, default, default,
default, default, default, default, default, default, default, default,
default, default, default, default, default, default, default, default)
}
/** Add m2 values to m1. */
def addMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics): v1.TaskMetrics = addMetrics(m1, m2, 1)
/** Subtract m2 values from m1. */
def subtractMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics): v1.TaskMetrics = {
addMetrics(m1, m2, -1)
}
/**
* Convert all the metric values to negative as well as handle zero values.
* This method assumes that all the metric values are greater than or equal to zero
*/
def makeNegative(m: v1.TaskMetrics): v1.TaskMetrics = {
// To handle 0 metric value, add 1 and make the metric negative.
// To recover actual value do `math.abs(metric + 1)`
// Eg: if the metric values are (5, 3, 0, 1) => Updated metric values will be (-6, -4, -1, -2)
// To get actual metric value, do math.abs(metric + 1) => (5, 3, 0, 1)
def updateMetricValue(metric: Long): Long = {
metric * -1L - 1L
}
createMetrics(
updateMetricValue(m.executorDeserializeTime),
updateMetricValue(m.executorDeserializeCpuTime),
updateMetricValue(m.executorRunTime),
updateMetricValue(m.executorCpuTime),
updateMetricValue(m.resultSize),
updateMetricValue(m.jvmGcTime),
updateMetricValue(m.resultSerializationTime),
updateMetricValue(m.memoryBytesSpilled),
updateMetricValue(m.diskBytesSpilled),
updateMetricValue(m.peakExecutionMemory),
updateMetricValue(m.inputMetrics.bytesRead),
updateMetricValue(m.inputMetrics.recordsRead),
updateMetricValue(m.outputMetrics.bytesWritten),
updateMetricValue(m.outputMetrics.recordsWritten),
updateMetricValue(m.shuffleReadMetrics.remoteBlocksFetched),
updateMetricValue(m.shuffleReadMetrics.localBlocksFetched),
updateMetricValue(m.shuffleReadMetrics.fetchWaitTime),
updateMetricValue(m.shuffleReadMetrics.remoteBytesRead),
updateMetricValue(m.shuffleReadMetrics.remoteBytesReadToDisk),
updateMetricValue(m.shuffleReadMetrics.localBytesRead),
updateMetricValue(m.shuffleReadMetrics.recordsRead),
updateMetricValue(m.shuffleWriteMetrics.bytesWritten),
updateMetricValue(m.shuffleWriteMetrics.writeTime),
updateMetricValue(m.shuffleWriteMetrics.recordsWritten))
}
private def addMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics, mult: Int): v1.TaskMetrics = {
createMetrics(
m1.executorDeserializeTime + m2.executorDeserializeTime * mult,
m1.executorDeserializeCpuTime + m2.executorDeserializeCpuTime * mult,
m1.executorRunTime + m2.executorRunTime * mult,
m1.executorCpuTime + m2.executorCpuTime * mult,
m1.resultSize + m2.resultSize * mult,
m1.jvmGcTime + m2.jvmGcTime * mult,
m1.resultSerializationTime + m2.resultSerializationTime * mult,
m1.memoryBytesSpilled + m2.memoryBytesSpilled * mult,
m1.diskBytesSpilled + m2.diskBytesSpilled * mult,
m1.peakExecutionMemory + m2.peakExecutionMemory * mult,
m1.inputMetrics.bytesRead + m2.inputMetrics.bytesRead * mult,
m1.inputMetrics.recordsRead + m2.inputMetrics.recordsRead * mult,
m1.outputMetrics.bytesWritten + m2.outputMetrics.bytesWritten * mult,
m1.outputMetrics.recordsWritten + m2.outputMetrics.recordsWritten * mult,
m1.shuffleReadMetrics.remoteBlocksFetched + m2.shuffleReadMetrics.remoteBlocksFetched * mult,
m1.shuffleReadMetrics.localBlocksFetched + m2.shuffleReadMetrics.localBlocksFetched * mult,
m1.shuffleReadMetrics.fetchWaitTime + m2.shuffleReadMetrics.fetchWaitTime * mult,
m1.shuffleReadMetrics.remoteBytesRead + m2.shuffleReadMetrics.remoteBytesRead * mult,
m1.shuffleReadMetrics.remoteBytesReadToDisk +
m2.shuffleReadMetrics.remoteBytesReadToDisk * mult,
m1.shuffleReadMetrics.localBytesRead + m2.shuffleReadMetrics.localBytesRead * mult,
m1.shuffleReadMetrics.recordsRead + m2.shuffleReadMetrics.recordsRead * mult,
m1.shuffleWriteMetrics.bytesWritten + m2.shuffleWriteMetrics.bytesWritten * mult,
m1.shuffleWriteMetrics.writeTime + m2.shuffleWriteMetrics.writeTime * mult,
m1.shuffleWriteMetrics.recordsWritten + m2.shuffleWriteMetrics.recordsWritten * mult)
}
}
/**
* A custom sequence of partitions based on a mutable linked list.
*
* The external interface is an immutable Seq, which is thread-safe for traversal. There are no
* guarantees about consistency though - iteration might return elements that have been removed
* or miss added elements.
*
* Internally, the sequence is mutable, and elements can modify the data they expose. Additions and
* removals are O(1). It is not safe to do multiple writes concurrently.
*/
private class RDDPartitionSeq extends Seq[v1.RDDPartitionInfo] {
@volatile private var _head: LiveRDDPartition = null
@volatile private var _tail: LiveRDDPartition = null
@volatile var count = 0
override def apply(idx: Int): v1.RDDPartitionInfo = {
var curr = 0
var e = _head
while (curr < idx && e != null) {
curr += 1
e = e.next
}
if (e != null) e.value else throw new IndexOutOfBoundsException(idx.toString)
}
override def iterator: Iterator[v1.RDDPartitionInfo] = {
new Iterator[v1.RDDPartitionInfo] {
var current = _head
override def hasNext: Boolean = current != null
override def next(): v1.RDDPartitionInfo = {
if (current != null) {
val tmp = current
current = tmp.next
tmp.value
} else {
throw new NoSuchElementException()
}
}
}
}
override def length: Int = count
def addPartition(part: LiveRDDPartition): Unit = {
part.prev = _tail
if (_tail != null) {
_tail.next = part
}
if (_head == null) {
_head = part
}
_tail = part
count += 1
}
def removePartition(part: LiveRDDPartition): Unit = {
count -= 1
// Remove the partition from the list, but leave the pointers unchanged. That ensures a best
// effort at returning existing elements when iterations still reference the removed partition.
if (part.prev != null) {
part.prev.next = part.next
}
if (part eq _head) {
_head = part.next
}
if (part.next != null) {
part.next.prev = part.prev
}
if (part eq _tail) {
_tail = part.prev
}
}
}
|
dbtsai/spark
|
core/src/main/scala/org/apache/spark/status/LiveEntity.scala
|
Scala
|
apache-2.0
| 30,092 |
package st.emily.swayze.data
case class FailedParseException(message: String) extends Exception(message)
|
ErinCall/swayze
|
src/main/scala/data/Exceptions.scala
|
Scala
|
mit
| 107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.linalg
import org.apache.spark.ml.SparkMLFunSuite
import org.apache.spark.ml.linalg.BLAS._
import org.apache.spark.ml.util.TestingUtils._
class BLASSuite extends SparkMLFunSuite {
test("nativeL1Threshold") {
assert(getBLAS(128) == BLAS.f2jBLAS)
assert(getBLAS(256) == BLAS.nativeBLAS)
assert(getBLAS(512) == BLAS.nativeBLAS)
}
test("copy") {
val sx = Vectors.sparse(4, Array(0, 2), Array(1.0, -2.0))
val dx = Vectors.dense(1.0, 0.0, -2.0, 0.0)
val sy = Vectors.sparse(4, Array(0, 1, 3), Array(2.0, 1.0, 1.0))
val dy = Array(2.0, 1.0, 0.0, 1.0)
val dy1 = Vectors.dense(dy.clone())
copy(sx, dy1)
assert(dy1 ~== dx absTol 1e-15)
val dy2 = Vectors.dense(dy.clone())
copy(dx, dy2)
assert(dy2 ~== dx absTol 1e-15)
intercept[IllegalArgumentException] {
copy(sx, sy)
}
intercept[IllegalArgumentException] {
copy(dx, sy)
}
withClue("vector sizes must match") {
intercept[Exception] {
copy(sx, Vectors.dense(0.0, 1.0, 2.0))
}
}
}
test("scal") {
val a = 0.1
val sx = Vectors.sparse(3, Array(0, 2), Array(1.0, -2.0))
val dx = Vectors.dense(1.0, 0.0, -2.0)
scal(a, sx)
assert(sx ~== Vectors.sparse(3, Array(0, 2), Array(0.1, -0.2)) absTol 1e-15)
scal(a, dx)
assert(dx ~== Vectors.dense(0.1, 0.0, -0.2) absTol 1e-15)
}
test("axpy") {
val alpha = 0.1
val sx = Vectors.sparse(3, Array(0, 2), Array(1.0, -2.0))
val dx = Vectors.dense(1.0, 0.0, -2.0)
val dy = Array(2.0, 1.0, 0.0)
val expected = Vectors.dense(2.1, 1.0, -0.2)
val dy1 = Vectors.dense(dy.clone())
axpy(alpha, sx, dy1)
assert(dy1 ~== expected absTol 1e-15)
val dy2 = Vectors.dense(dy.clone())
axpy(alpha, dx, dy2)
assert(dy2 ~== expected absTol 1e-15)
val sy = Vectors.sparse(4, Array(0, 1), Array(2.0, 1.0))
intercept[IllegalArgumentException] {
axpy(alpha, sx, sy)
}
intercept[IllegalArgumentException] {
axpy(alpha, dx, sy)
}
withClue("vector sizes must match") {
intercept[Exception] {
axpy(alpha, sx, Vectors.dense(1.0, 2.0))
}
}
}
test("dot") {
val sx = Vectors.sparse(3, Array(0, 2), Array(1.0, -2.0))
val dx = Vectors.dense(1.0, 0.0, -2.0)
val sy = Vectors.sparse(3, Array(0, 1), Array(2.0, 1.0))
val dy = Vectors.dense(2.0, 1.0, 0.0)
assert(dot(sx, sy) ~== 2.0 absTol 1e-15)
assert(dot(sy, sx) ~== 2.0 absTol 1e-15)
assert(dot(sx, dy) ~== 2.0 absTol 1e-15)
assert(dot(dy, sx) ~== 2.0 absTol 1e-15)
assert(dot(dx, dy) ~== 2.0 absTol 1e-15)
assert(dot(dy, dx) ~== 2.0 absTol 1e-15)
assert(dot(sx, sx) ~== 5.0 absTol 1e-15)
assert(dot(dx, dx) ~== 5.0 absTol 1e-15)
assert(dot(sx, dx) ~== 5.0 absTol 1e-15)
assert(dot(dx, sx) ~== 5.0 absTol 1e-15)
val sx1 = Vectors.sparse(10, Array(0, 3, 5, 7, 8), Array(1.0, 2.0, 3.0, 4.0, 5.0))
val sx2 = Vectors.sparse(10, Array(1, 3, 6, 7, 9), Array(1.0, 2.0, 3.0, 4.0, 5.0))
assert(dot(sx1, sx2) ~== 20.0 absTol 1e-15)
assert(dot(sx2, sx1) ~== 20.0 absTol 1e-15)
withClue("vector sizes must match") {
intercept[Exception] {
dot(sx, Vectors.dense(2.0, 1.0))
}
}
}
test("spr") {
// test dense vector
val alpha = 0.1
val x = new DenseVector(Array(1.0, 2, 2.1, 4))
val U = new DenseVector(Array(1.0, 2, 2, 3, 3, 3, 4, 4, 4, 4))
val expected = new DenseVector(Array(1.1, 2.2, 2.4, 3.21, 3.42, 3.441, 4.4, 4.8, 4.84, 5.6))
spr(alpha, x, U)
assert(U ~== expected absTol 1e-9)
val matrix33 = new DenseVector(Array(1.0, 2, 3, 4, 5))
withClue("Size of vector must match the rank of matrix") {
intercept[Exception] {
spr(alpha, x, matrix33)
}
}
// test sparse vector
val sv = new SparseVector(4, Array(0, 3), Array(1.0, 2))
val U2 = new DenseVector(Array(1.0, 2, 2, 3, 3, 3, 4, 4, 4, 4))
spr(0.1, sv, U2)
val expectedSparse = new DenseVector(Array(1.1, 2.0, 2.0, 3.0, 3.0, 3.0, 4.2, 4.0, 4.0, 4.4))
assert(U2 ~== expectedSparse absTol 1e-15)
}
test("syr") {
val dA = new DenseMatrix(4, 4,
Array(0.0, 1.2, 2.2, 3.1, 1.2, 3.2, 5.3, 4.6, 2.2, 5.3, 1.8, 3.0, 3.1, 4.6, 3.0, 0.8))
val x = new DenseVector(Array(0.0, 2.7, 3.5, 2.1))
val alpha = 0.15
val expected = new DenseMatrix(4, 4,
Array(0.0, 1.2, 2.2, 3.1, 1.2, 4.2935, 6.7175, 5.4505, 2.2, 6.7175, 3.6375, 4.1025, 3.1,
5.4505, 4.1025, 1.4615))
syr(alpha, x, dA)
assert(dA ~== expected absTol 1e-15)
val dB =
new DenseMatrix(3, 4, Array(0.0, 1.2, 2.2, 3.1, 1.2, 3.2, 5.3, 4.6, 2.2, 5.3, 1.8, 3.0))
withClue("Matrix A must be a symmetric Matrix") {
intercept[Exception] {
syr(alpha, x, dB)
}
}
val dC =
new DenseMatrix(3, 3, Array(0.0, 1.2, 2.2, 1.2, 3.2, 5.3, 2.2, 5.3, 1.8))
withClue("Size of vector must match the rank of matrix") {
intercept[Exception] {
syr(alpha, x, dC)
}
}
val y = new DenseVector(Array(0.0, 2.7, 3.5, 2.1, 1.5))
withClue("Size of vector must match the rank of matrix") {
intercept[Exception] {
syr(alpha, y, dA)
}
}
val xSparse = new SparseVector(4, Array(0, 2, 3), Array(1.0, 3.0, 4.0))
val dD = new DenseMatrix(4, 4,
Array(0.0, 1.2, 2.2, 3.1, 1.2, 3.2, 5.3, 4.6, 2.2, 5.3, 1.8, 3.0, 3.1, 4.6, 3.0, 0.8))
syr(0.1, xSparse, dD)
val expectedSparse = new DenseMatrix(4, 4,
Array(0.1, 1.2, 2.5, 3.5, 1.2, 3.2, 5.3, 4.6, 2.5, 5.3, 2.7, 4.2, 3.5, 4.6, 4.2, 2.4))
assert(dD ~== expectedSparse absTol 1e-15)
}
test("gemm") {
val dA =
new DenseMatrix(4, 3, Array(0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 3.0))
val sA = new SparseMatrix(4, 3, Array(0, 1, 3, 4), Array(1, 0, 2, 3), Array(1.0, 2.0, 1.0, 3.0))
val B = new DenseMatrix(3, 2, Array(1.0, 0.0, 0.0, 0.0, 2.0, 1.0))
val expected = new DenseMatrix(4, 2, Array(0.0, 1.0, 0.0, 0.0, 4.0, 0.0, 2.0, 3.0))
val BTman = new DenseMatrix(2, 3, Array(1.0, 0.0, 0.0, 2.0, 0.0, 1.0))
val BT = B.transpose
assert(dA.multiply(B) ~== expected absTol 1e-15)
assert(sA.multiply(B) ~== expected absTol 1e-15)
val C1 = new DenseMatrix(4, 2, Array(1.0, 0.0, 2.0, 1.0, 0.0, 0.0, 1.0, 0.0))
val C2 = C1.copy
val C3 = C1.copy
val C4 = C1.copy
val C5 = C1.copy
val C6 = C1.copy
val C7 = C1.copy
val C8 = C1.copy
val C9 = C1.copy
val C10 = C1.copy
val C11 = C1.copy
val C12 = C1.copy
val C13 = C1.copy
val C14 = C1.copy
val C15 = C1.copy
val C16 = C1.copy
val C17 = C1.copy
val expected2 = new DenseMatrix(4, 2, Array(2.0, 1.0, 4.0, 2.0, 4.0, 0.0, 4.0, 3.0))
val expected3 = new DenseMatrix(4, 2, Array(2.0, 2.0, 4.0, 2.0, 8.0, 0.0, 6.0, 6.0))
val expected4 = new DenseMatrix(4, 2, Array(5.0, 0.0, 10.0, 5.0, 0.0, 0.0, 5.0, 0.0))
val expected5 = C1.copy
gemm(1.0, dA, B, 2.0, C1)
gemm(1.0, sA, B, 2.0, C2)
gemm(2.0, dA, B, 2.0, C3)
gemm(2.0, sA, B, 2.0, C4)
assert(C1 ~== expected2 absTol 1e-15)
assert(C2 ~== expected2 absTol 1e-15)
assert(C3 ~== expected3 absTol 1e-15)
assert(C4 ~== expected3 absTol 1e-15)
gemm(1.0, dA, B, 0.0, C17)
assert(C17 ~== expected absTol 1e-15)
gemm(1.0, sA, B, 0.0, C17)
assert(C17 ~== expected absTol 1e-15)
withClue("columns of A don't match the rows of B") {
intercept[Exception] {
gemm(1.0, dA.transpose, B, 2.0, C1)
}
}
val dATman =
new DenseMatrix(3, 4, Array(0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 3.0))
val sATman =
new SparseMatrix(3, 4, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0))
val dATT = dATman.transpose
val sATT = sATman.transpose
val BTT = BTman.transpose.asInstanceOf[DenseMatrix]
assert(dATT.multiply(B) ~== expected absTol 1e-15)
assert(sATT.multiply(B) ~== expected absTol 1e-15)
assert(dATT.multiply(BTT) ~== expected absTol 1e-15)
assert(sATT.multiply(BTT) ~== expected absTol 1e-15)
gemm(1.0, dATT, BTT, 2.0, C5)
gemm(1.0, sATT, BTT, 2.0, C6)
gemm(2.0, dATT, BTT, 2.0, C7)
gemm(2.0, sATT, BTT, 2.0, C8)
gemm(1.0, dA, BTT, 2.0, C9)
gemm(1.0, sA, BTT, 2.0, C10)
gemm(2.0, dA, BTT, 2.0, C11)
gemm(2.0, sA, BTT, 2.0, C12)
assert(C5 ~== expected2 absTol 1e-15)
assert(C6 ~== expected2 absTol 1e-15)
assert(C7 ~== expected3 absTol 1e-15)
assert(C8 ~== expected3 absTol 1e-15)
assert(C9 ~== expected2 absTol 1e-15)
assert(C10 ~== expected2 absTol 1e-15)
assert(C11 ~== expected3 absTol 1e-15)
assert(C12 ~== expected3 absTol 1e-15)
gemm(0, dA, B, 5, C13)
gemm(0, sA, B, 5, C14)
gemm(0, dA, B, 1, C15)
gemm(0, sA, B, 1, C16)
assert(C13 ~== expected4 absTol 1e-15)
assert(C14 ~== expected4 absTol 1e-15)
assert(C15 ~== expected5 absTol 1e-15)
assert(C16 ~== expected5 absTol 1e-15)
}
test("gemv") {
val dA =
new DenseMatrix(4, 3, Array(0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 3.0))
val sA = new SparseMatrix(4, 3, Array(0, 1, 3, 4), Array(1, 0, 2, 3), Array(1.0, 2.0, 1.0, 3.0))
val dA2 =
new DenseMatrix(4, 3, Array(0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 3.0), true)
val sA2 =
new SparseMatrix(4, 3, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0),
true)
val dx = new DenseVector(Array(1.0, 2.0, 3.0))
val sx = dx.toSparse
val expected = new DenseVector(Array(4.0, 1.0, 2.0, 9.0))
assert(dA.multiply(dx) ~== expected absTol 1e-15)
assert(sA.multiply(dx) ~== expected absTol 1e-15)
assert(dA.multiply(sx) ~== expected absTol 1e-15)
assert(sA.multiply(sx) ~== expected absTol 1e-15)
val y1 = new DenseVector(Array(1.0, 3.0, 1.0, 0.0))
val y2 = y1.copy
val y3 = y1.copy
val y4 = y1.copy
val y5 = y1.copy
val y6 = y1.copy
val y7 = y1.copy
val y8 = y1.copy
val y9 = y1.copy
val y10 = y1.copy
val y11 = y1.copy
val y12 = y1.copy
val y13 = y1.copy
val y14 = y1.copy
val y15 = y1.copy
val y16 = y1.copy
val expected2 = new DenseVector(Array(6.0, 7.0, 4.0, 9.0))
val expected3 = new DenseVector(Array(10.0, 8.0, 6.0, 18.0))
gemv(1.0, dA, dx, 2.0, y1)
gemv(1.0, sA, dx, 2.0, y2)
gemv(1.0, dA, sx, 2.0, y3)
gemv(1.0, sA, sx, 2.0, y4)
gemv(1.0, dA2, dx, 2.0, y5)
gemv(1.0, sA2, dx, 2.0, y6)
gemv(1.0, dA2, sx, 2.0, y7)
gemv(1.0, sA2, sx, 2.0, y8)
gemv(2.0, dA, dx, 2.0, y9)
gemv(2.0, sA, dx, 2.0, y10)
gemv(2.0, dA, sx, 2.0, y11)
gemv(2.0, sA, sx, 2.0, y12)
gemv(2.0, dA2, dx, 2.0, y13)
gemv(2.0, sA2, dx, 2.0, y14)
gemv(2.0, dA2, sx, 2.0, y15)
gemv(2.0, sA2, sx, 2.0, y16)
assert(y1 ~== expected2 absTol 1e-15)
assert(y2 ~== expected2 absTol 1e-15)
assert(y3 ~== expected2 absTol 1e-15)
assert(y4 ~== expected2 absTol 1e-15)
assert(y5 ~== expected2 absTol 1e-15)
assert(y6 ~== expected2 absTol 1e-15)
assert(y7 ~== expected2 absTol 1e-15)
assert(y8 ~== expected2 absTol 1e-15)
assert(y9 ~== expected3 absTol 1e-15)
assert(y10 ~== expected3 absTol 1e-15)
assert(y11 ~== expected3 absTol 1e-15)
assert(y12 ~== expected3 absTol 1e-15)
assert(y13 ~== expected3 absTol 1e-15)
assert(y14 ~== expected3 absTol 1e-15)
assert(y15 ~== expected3 absTol 1e-15)
assert(y16 ~== expected3 absTol 1e-15)
withClue("columns of A don't match the rows of B") {
intercept[Exception] {
gemv(1.0, dA.transpose, dx, 2.0, y1)
}
intercept[Exception] {
gemv(1.0, sA.transpose, dx, 2.0, y1)
}
intercept[Exception] {
gemv(1.0, dA.transpose, sx, 2.0, y1)
}
intercept[Exception] {
gemv(1.0, sA.transpose, sx, 2.0, y1)
}
}
val y17 = new DenseVector(Array(0.0, 0.0))
val y18 = y17.copy
val sA3 = new SparseMatrix(3, 2, Array(0, 2, 4), Array(1, 2, 0, 1), Array(2.0, 1.0, 1.0, 2.0))
.transpose
val sA4 =
new SparseMatrix(2, 3, Array(0, 1, 3, 4), Array(1, 0, 1, 0), Array(1.0, 2.0, 2.0, 1.0))
val sx3 = new SparseVector(3, Array(1, 2), Array(2.0, 1.0))
val expected4 = new DenseVector(Array(5.0, 4.0))
gemv(1.0, sA3, sx3, 0.0, y17)
gemv(1.0, sA4, sx3, 0.0, y18)
assert(y17 ~== expected4 absTol 1e-15)
assert(y18 ~== expected4 absTol 1e-15)
val dAT =
new DenseMatrix(3, 4, Array(0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 3.0))
val sAT =
new SparseMatrix(3, 4, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0))
val dATT = dAT.transpose
val sATT = sAT.transpose
assert(dATT.multiply(dx) ~== expected absTol 1e-15)
assert(sATT.multiply(dx) ~== expected absTol 1e-15)
assert(dATT.multiply(sx) ~== expected absTol 1e-15)
assert(sATT.multiply(sx) ~== expected absTol 1e-15)
}
test("spmv") {
/*
A = [[3.0, -2.0, 2.0, -4.0],
[-2.0, -8.0, 4.0, 7.0],
[2.0, 4.0, -3.0, -3.0],
[-4.0, 7.0, -3.0, 0.0]]
x = [5.0, 2.0, -1.0, -9.0]
Ax = [ 45., -93., 48., -3.]
*/
val A = new DenseVector(Array(3.0, -2.0, -8.0, 2.0, 4.0, -3.0, -4.0, 7.0, -3.0, 0.0))
val x = new DenseVector(Array(5.0, 2.0, -1.0, -9.0))
val n = 4
val y1 = new DenseVector(Array(-3.0, 6.0, -8.0, -3.0))
val y2 = y1.copy
val y3 = y1.copy
val y4 = y1.copy
val y5 = y1.copy
val y6 = y1.copy
val y7 = y1.copy
val expected1 = new DenseVector(Array(42.0, -87.0, 40.0, -6.0))
val expected2 = new DenseVector(Array(19.5, -40.5, 16.0, -4.5))
val expected3 = new DenseVector(Array(-25.5, 52.5, -32.0, -1.5))
val expected4 = new DenseVector(Array(-3.0, 6.0, -8.0, -3.0))
val expected5 = new DenseVector(Array(43.5, -90.0, 44.0, -4.5))
val expected6 = new DenseVector(Array(46.5, -96.0, 52.0, -1.5))
val expected7 = new DenseVector(Array(45.0, -93.0, 48.0, -3.0))
dspmv(n, 1.0, A, x, 1.0, y1)
dspmv(n, 0.5, A, x, 1.0, y2)
dspmv(n, -0.5, A, x, 1.0, y3)
dspmv(n, 0.0, A, x, 1.0, y4)
dspmv(n, 1.0, A, x, 0.5, y5)
dspmv(n, 1.0, A, x, -0.5, y6)
dspmv(n, 1.0, A, x, 0.0, y7)
assert(y1 ~== expected1 absTol 1e-8)
assert(y2 ~== expected2 absTol 1e-8)
assert(y3 ~== expected3 absTol 1e-8)
assert(y4 ~== expected4 absTol 1e-8)
assert(y5 ~== expected5 absTol 1e-8)
assert(y6 ~== expected6 absTol 1e-8)
assert(y7 ~== expected7 absTol 1e-8)
}
}
|
matthewfranglen/spark
|
mllib-local/src/test/scala/org/apache/spark/ml/linalg/BLASSuite.scala
|
Scala
|
mit
| 15,419 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Matthew Saltz, John Miller, Ayushi Jain
* @version 1.3
* @date Thu Jul 25 11:28:31 EDT 2013
* @see LICENSE (MIT style license file).
*/
package scalation.graphalytics
import scala.collection.immutable.{Set => SET}
import scalation.util.time
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualIso` class provides an implementation for Subgraph Isomorphism
* that uses Dual Graph Simulation for pruning.
* @param g the data graph G(V, E, l)
* @param q the query graph Q(U, D, k)
*/
class DualIso (g: Graph, q: Graph)
extends GraphMatcher (g, q)
{
private val duals = new DualSim2 (g, q) // object for Dual Simulation algorithm
private var t0 = 0.0 // start time for timer
private var matches = SET [Array [SET [Int]]] () // initialize matches to empty
private var noBijections = true // no results yet
private var limit = 1000000 // limit on number of matches
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set an upper bound on the number matches to allow before quitting.
* @param _limit the number of matches before quitting
*/
def setLimit (_limit: Int) { limit = _limit }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Subgraph Isomorphism algorithm to find subgraphs of data
* graph 'g' that isomorphically match query graph 'q'. These are represented
* by a set of single-valued bijections {'psi'} where each 'psi' function
* maps each query graph vertex 'u' to a data graph vertices 'v'.
*/
override def bijections (): SET [Array [Int]] =
{
matches = SET [Array [SET [Int]]] () // initialize matches to empty
val phi = duals.feasibleMates () // initial mappings from label match
saltzDualIso (duals.saltzDualSim (phi), 0) // recursively find all bijections
val psi = simplify (matches) // pull bijections out matches
noBijections = false // results now available
psi // return the set of bijections
} // bijections
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Subgraph Isomorphism pattern matching algorithm to find
* the mappings from the query graph 'q' to the data graph 'g'. These are
* represented by a multi-valued function 'phi' that maps each query graph
* vertex 'u' to a set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] =
{
var psi: SET [Array [Int]] = null // mappings from Dual Simulation
if (noBijections) psi = bijections () // if no results, create them
merge (psi) // merge bijections to create mappings
} // mappings
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the count of the number of matches.
*/
def numMatches (): Int = matches.size
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Refine the mappings 'phi' using the Dual Subgraph Isomorphism algorithm.
* Enumerate bijections by using an Ullmann-like recursion that uses Dual
* Graph Simulation for pruning.
* @param phi array of mappings from a query vertex u_q to { graph vertices v_g }
* @param depth the depth of recursion
*/
private def saltzDualIso (phi: Array [SET [Int]], depth: Int)
{
if (depth == q.size) {
if (! phi.isEmpty) {
matches += phi
if (matches.size % CHECK == 0) println ("dualIso: matches so far = " + matches.size)
} // if
} else if (! phi.isEmpty) {
for (i <- phi (depth) if (! contains (phi, depth, i))) {
val phiCopy = phi.map (x => x) // make a copy of phi
phiCopy (depth) = SET [Int] (i) // isolate vertex i
if (matches.size >= limit) return // quit if at LIMIT
saltzDualIso (duals.saltzDualSim (phiCopy), depth + 1) // solve recursively for the next depth
} // for
} // if
} // saltzDualIso
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Determine whether vertex 'j' is contained in any 'phi(i)' for the previous depths.
* @param phi array of mappings from a query vertex u_q to { graph vertices v_g }
* @param depth the current depth of recursion
* @param j the vertex j to check
*/
private def contains (phi: Array [SET [Int]], depth: Int, j: Int): Boolean =
{
for (i <- 0 until depth if phi(i) contains j) return true
false
} // contains
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Create an array to hold matches for each vertex 'u' in the query graph
* 'q' and initialize it to contain all empty sets. Then for each bijection,
* add each element of the bijection to its corresponding match set.
* @param psi the set of bijections
*/
private def merge (psi: SET [Array [Int]]): Array [SET [Int]] =
{
val matches = Array.ofDim [SET [Int]] (q.size).map (_ => SET [Int] ())
for (b <- bijections; i <- b.indices) matches(i) += b(i)
matches
} // merge
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Pull the bijections out of the complete match set.
* @param matches the complete match set embedding all bijections
*/
private def simplify (matches: SET [Array [SET [Int]]]): SET [Array [Int]] =
{
matches.map (m => m.map (set => set.iterator.next))
} // simplify
} // DualIso class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualIsoTest` object is used to test the `DualIso` class.
* > run-main scalation.graphalytics.DualIsoTest
*/
object DualIsoTest extends App
{
val g = Graph.g1
val q = Graph.q1
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
val matcher = new DualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // DualIsoTest
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualIsoTest2` object is used to test the `DualIso` class.
* > run-main scalation.graphalytics.DualIsoTest2
*/
object DualIsoTest2 extends App
{
val g = Graph.g2
val q = Graph.q2
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
val matcher = new DualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // DualIsoTest2
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualIsoTest3` object is used to test the `DualIso` class.
* > run-main scalation.graphalytics.DualIsoTest3
*/
object DualIsoTest3 extends App
{
val gSize = 1000 // size of the data graph
val qSize = 10 // size of the query graph
val nLabels = 100 // number of distinct labels
val gAvDegree = 5 // average vertex out degree for data graph
val qAvDegree = 2 // average vertex out degree for query graph
val g = GraphGen.genRandomGraph (gSize, nLabels, gAvDegree, false, "g")
val q = GraphGen.genBFSQuery (qSize, qAvDegree, g, false, "q")
val matcher = new DualIso (g, q) // Dual Subgraph Isomorphism Pattern Matcher
val psi = time { matcher.bijections () } // time the matcher
println ("Number of Matches: " + matcher.numMatches)
for (p <- psi) println (s"psi = ${p.deep}")
} // DualIsoTest3
|
NBKlepp/fda
|
scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/DualIso.scala
|
Scala
|
mit
| 8,603 |
package message
/**
* Created by dylan on 2/14/16.
*/
abstract class Msg
case class Send(msg: String) extends Msg
case class NewMsg(from: String, msg: String) extends Msg
case class Info(msg: String) extends Msg
case class Connect(username: String) extends Msg
case class Broadcast(msg: String) extends Msg
case object Disconnect extends Msg
|
astray1988/reactive-farm
|
simple-akka-chat/src/main/scala/message/Msg.scala
|
Scala
|
mit
| 354 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.rest
import java.io.{DataOutputStream, FileNotFoundException}
import java.net.{ConnectException, HttpURLConnection, SocketException, URL}
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeoutException
import javax.servlet.http.HttpServletResponse
import scala.collection.mutable
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.io.Source
import scala.util.control.NonFatal
import com.fasterxml.jackson.core.JsonProcessingException
import org.apache.spark.{SPARK_VERSION => sparkVersion, SparkConf, SparkException}
import org.apache.spark.deploy.SparkApplication
import org.apache.spark.internal.Logging
import org.apache.spark.util.Utils
/**
* A client that submits applications to a [[RestSubmissionServer]].
*
* In protocol version v1, the REST URL takes the form http://[host:port]/v1/submissions/[action],
* where [action] can be one of create, kill, or status. Each type of request is represented in
* an HTTP message sent to the following prefixes:
* (1) submit - POST to /submissions/create
* (2) kill - POST /submissions/kill/[submissionId]
* (3) status - GET /submissions/status/[submissionId]
*
* In the case of (1), parameters are posted in the HTTP body in the form of JSON fields.
* Otherwise, the URL fully specifies the intended action of the client.
*
* Since the protocol is expected to be stable across Spark versions, existing fields cannot be
* added or removed, though new optional fields can be added. In the rare event that forward or
* backward compatibility is broken, Spark must introduce a new protocol version (e.g. v2).
*
* The client and the server must communicate using the same version of the protocol. If there
* is a mismatch, the server will respond with the highest protocol version it supports. A future
* implementation of this client can use that information to retry using the version specified
* by the server.
*/
private[spark] class RestSubmissionClient(master: String) extends Logging {
import RestSubmissionClient._
private val masters: Array[String] = if (master.startsWith("spark://")) {
Utils.parseStandaloneMasterUrls(master)
} else {
Array(master)
}
// Set of masters that lost contact with us, used to keep track of
// whether there are masters still alive for us to communicate with
private val lostMasters = new mutable.HashSet[String]
/**
* Submit an application specified by the parameters in the provided request.
*
* If the submission was successful, poll the status of the submission and report
* it to the user. Otherwise, report the error message provided by the server.
*/
def createSubmission(request: CreateSubmissionRequest): SubmitRestProtocolResponse = {
logInfo(s"Submitting a request to launch an application in $master.")
var handled: Boolean = false
var response: SubmitRestProtocolResponse = null
for (m <- masters if !handled) {
validateMaster(m)
val url = getSubmitUrl(m)
try {
response = postJson(url, request.toJson)
response match {
case s: CreateSubmissionResponse =>
if (s.success) {
reportSubmissionStatus(s)
handleRestResponse(s)
handled = true
}
case unexpected =>
handleUnexpectedRestResponse(unexpected)
}
} catch {
case e: SubmitRestConnectionException =>
if (handleConnectionException(m)) {
throw new SubmitRestConnectionException("Unable to connect to server", e)
}
}
}
response
}
/** Request that the server kill the specified submission. */
def killSubmission(submissionId: String): SubmitRestProtocolResponse = {
logInfo(s"Submitting a request to kill submission $submissionId in $master.")
var handled: Boolean = false
var response: SubmitRestProtocolResponse = null
for (m <- masters if !handled) {
validateMaster(m)
val url = getKillUrl(m, submissionId)
try {
response = post(url)
response match {
case k: KillSubmissionResponse =>
if (!Utils.responseFromBackup(k.message)) {
handleRestResponse(k)
handled = true
}
case unexpected =>
handleUnexpectedRestResponse(unexpected)
}
} catch {
case e: SubmitRestConnectionException =>
if (handleConnectionException(m)) {
throw new SubmitRestConnectionException("Unable to connect to server", e)
}
}
}
response
}
/** Request the status of a submission from the server. */
def requestSubmissionStatus(
submissionId: String,
quiet: Boolean = false): SubmitRestProtocolResponse = {
logInfo(s"Submitting a request for the status of submission $submissionId in $master.")
var handled: Boolean = false
var response: SubmitRestProtocolResponse = null
for (m <- masters if !handled) {
validateMaster(m)
val url = getStatusUrl(m, submissionId)
try {
response = get(url)
response match {
case s: SubmissionStatusResponse if s.success =>
if (!quiet) {
handleRestResponse(s)
}
handled = true
case unexpected =>
handleUnexpectedRestResponse(unexpected)
}
} catch {
case e: SubmitRestConnectionException =>
if (handleConnectionException(m)) {
throw new SubmitRestConnectionException("Unable to connect to server", e)
}
}
}
response
}
/** Construct a message that captures the specified parameters for submitting an application. */
def constructSubmitRequest(
appResource: String,
mainClass: String,
appArgs: Array[String],
sparkProperties: Map[String, String],
environmentVariables: Map[String, String]): CreateSubmissionRequest = {
val message = new CreateSubmissionRequest
message.clientSparkVersion = sparkVersion
message.appResource = appResource
message.mainClass = mainClass
message.appArgs = appArgs
message.sparkProperties = sparkProperties
message.environmentVariables = environmentVariables
message.validate()
message
}
/** Send a GET request to the specified URL. */
private def get(url: URL): SubmitRestProtocolResponse = {
logDebug(s"Sending GET request to server at $url.")
val conn = url.openConnection().asInstanceOf[HttpURLConnection]
conn.setRequestMethod("GET")
readResponse(conn)
}
/** Send a POST request to the specified URL. */
private def post(url: URL): SubmitRestProtocolResponse = {
logDebug(s"Sending POST request to server at $url.")
val conn = url.openConnection().asInstanceOf[HttpURLConnection]
conn.setRequestMethod("POST")
readResponse(conn)
}
/** Send a POST request with the given JSON as the body to the specified URL. */
private def postJson(url: URL, json: String): SubmitRestProtocolResponse = {
logDebug(s"Sending POST request to server at $url:\n$json")
val conn = url.openConnection().asInstanceOf[HttpURLConnection]
conn.setRequestMethod("POST")
conn.setRequestProperty("Content-Type", "application/json")
conn.setRequestProperty("charset", "utf-8")
conn.setDoOutput(true)
try {
val out = new DataOutputStream(conn.getOutputStream)
Utils.tryWithSafeFinally {
out.write(json.getBytes(StandardCharsets.UTF_8))
} {
out.close()
}
} catch {
case e: ConnectException =>
throw new SubmitRestConnectionException("Connect Exception when connect to server", e)
}
readResponse(conn)
}
/**
* Read the response from the server and return it as a validated [[SubmitRestProtocolResponse]].
* If the response represents an error, report the embedded message to the user.
* Exposed for testing.
*/
private[rest] def readResponse(connection: HttpURLConnection): SubmitRestProtocolResponse = {
// scalastyle:off executioncontextglobal
import scala.concurrent.ExecutionContext.Implicits.global
// scalastyle:on executioncontextglobal
val responseFuture = Future {
val responseCode = connection.getResponseCode
if (responseCode != HttpServletResponse.SC_OK) {
val errString = Some(Source.fromInputStream(connection.getErrorStream())
.getLines().mkString("\n"))
if (responseCode == HttpServletResponse.SC_INTERNAL_SERVER_ERROR &&
!connection.getContentType().contains("application/json")) {
throw new SubmitRestProtocolException(s"Server responded with exception:\n${errString}")
}
logError(s"Server responded with error:\n${errString}")
val error = new ErrorResponse
if (responseCode == RestSubmissionServer.SC_UNKNOWN_PROTOCOL_VERSION) {
error.highestProtocolVersion = RestSubmissionServer.PROTOCOL_VERSION
}
error.message = errString.get
error
} else {
val dataStream = connection.getInputStream
// If the server threw an exception while writing a response, it will not have a body
if (dataStream == null) {
throw new SubmitRestProtocolException("Server returned empty body")
}
val responseJson = Source.fromInputStream(dataStream).mkString
logDebug(s"Response from the server:\n$responseJson")
val response = SubmitRestProtocolMessage.fromJson(responseJson)
response.validate()
response match {
// If the response is an error, log the message
case error: ErrorResponse =>
logError(s"Server responded with error:\n${error.message}")
error
// Otherwise, simply return the response
case response: SubmitRestProtocolResponse => response
case unexpected =>
throw new SubmitRestProtocolException(
s"Message received from server was not a response:\n${unexpected.toJson}")
}
}
}
// scalastyle:off awaitresult
try { Await.result(responseFuture, 10.seconds) } catch {
// scalastyle:on awaitresult
case unreachable @ (_: FileNotFoundException | _: SocketException) =>
throw new SubmitRestConnectionException("Unable to connect to server", unreachable)
case malformed @ (_: JsonProcessingException | _: SubmitRestProtocolException) =>
throw new SubmitRestProtocolException("Malformed response received from server", malformed)
case timeout: TimeoutException =>
throw new SubmitRestConnectionException("No response from server", timeout)
case NonFatal(t) =>
throw new SparkException("Exception while waiting for response", t)
}
}
/** Return the REST URL for creating a new submission. */
private def getSubmitUrl(master: String): URL = {
val baseUrl = getBaseUrl(master)
new URL(s"$baseUrl/create")
}
/** Return the REST URL for killing an existing submission. */
private def getKillUrl(master: String, submissionId: String): URL = {
val baseUrl = getBaseUrl(master)
new URL(s"$baseUrl/kill/$submissionId")
}
/** Return the REST URL for requesting the status of an existing submission. */
private def getStatusUrl(master: String, submissionId: String): URL = {
val baseUrl = getBaseUrl(master)
new URL(s"$baseUrl/status/$submissionId")
}
/** Return the base URL for communicating with the server, including the protocol version. */
private def getBaseUrl(master: String): String = {
var masterUrl = master
supportedMasterPrefixes.foreach { prefix =>
if (master.startsWith(prefix)) {
masterUrl = master.stripPrefix(prefix)
}
}
masterUrl = masterUrl.stripSuffix("/")
s"http://$masterUrl/$PROTOCOL_VERSION/submissions"
}
/** Throw an exception if this is not standalone mode. */
private def validateMaster(master: String): Unit = {
val valid = supportedMasterPrefixes.exists { prefix => master.startsWith(prefix) }
if (!valid) {
throw new IllegalArgumentException(
"This REST client only supports master URLs that start with " +
"one of the following: " + supportedMasterPrefixes.mkString(","))
}
}
/** Report the status of a newly created submission. */
private def reportSubmissionStatus(
submitResponse: CreateSubmissionResponse): Unit = {
if (submitResponse.success) {
val submissionId = submitResponse.submissionId
if (submissionId != null) {
logInfo(s"Submission successfully created as $submissionId. Polling submission state...")
pollSubmissionStatus(submissionId)
} else {
// should never happen
logError("Application successfully submitted, but submission ID was not provided!")
}
} else {
val failMessage = Option(submitResponse.message).map { ": " + _ }.getOrElse("")
logError(s"Application submission failed$failMessage")
}
}
/**
* Poll the status of the specified submission and log it.
* This retries up to a fixed number of times before giving up.
*/
private def pollSubmissionStatus(submissionId: String): Unit = {
(1 to REPORT_DRIVER_STATUS_MAX_TRIES).foreach { _ =>
val response = requestSubmissionStatus(submissionId, quiet = true)
val statusResponse = response match {
case s: SubmissionStatusResponse => s
case _ => return // unexpected type, let upstream caller handle it
}
if (statusResponse.success) {
val driverState = Option(statusResponse.driverState)
val workerId = Option(statusResponse.workerId)
val workerHostPort = Option(statusResponse.workerHostPort)
val exception = Option(statusResponse.message)
// Log driver state, if present
driverState match {
case Some(state) => logInfo(s"State of driver $submissionId is now $state.")
case _ => logError(s"State of driver $submissionId was not found!")
}
// Log worker node, if present
(workerId, workerHostPort) match {
case (Some(id), Some(hp)) => logInfo(s"Driver is running on worker $id at $hp.")
case _ =>
}
// Log exception stack trace, if present
exception.foreach { e => logError(e) }
return
}
Thread.sleep(REPORT_DRIVER_STATUS_INTERVAL)
}
logError(s"Error: Master did not recognize driver $submissionId.")
}
/** Log the response sent by the server in the REST application submission protocol. */
private def handleRestResponse(response: SubmitRestProtocolResponse): Unit = {
logInfo(s"Server responded with ${response.messageType}:\n${response.toJson}")
}
/** Log an appropriate error if the response sent by the server is not of the expected type. */
private def handleUnexpectedRestResponse(unexpected: SubmitRestProtocolResponse): Unit = {
logError(s"Error: Server responded with message of unexpected type ${unexpected.messageType}.")
}
/**
* When a connection exception is caught, return true if all masters are lost.
* Note that the heuristic used here does not take into account that masters
* can recover during the lifetime of this client. This assumption should be
* harmless because this client currently does not support retrying submission
* on failure yet (SPARK-6443).
*/
private def handleConnectionException(masterUrl: String): Boolean = {
if (!lostMasters.contains(masterUrl)) {
logWarning(s"Unable to connect to server ${masterUrl}.")
lostMasters += masterUrl
}
lostMasters.size >= masters.length
}
}
private[spark] object RestSubmissionClient {
val supportedMasterPrefixes = Seq("spark://", "mesos://")
// SPARK_HOME and SPARK_CONF_DIR are filtered out because they are usually wrong
// on the remote machine (SPARK-12345) (SPARK-25934)
private val EXCLUDED_SPARK_ENV_VARS = Set("SPARK_ENV_LOADED", "SPARK_HOME", "SPARK_CONF_DIR")
private val REPORT_DRIVER_STATUS_INTERVAL = 1000
private val REPORT_DRIVER_STATUS_MAX_TRIES = 10
val PROTOCOL_VERSION = "v1"
/**
* Filter non-spark environment variables from any environment.
*/
private[rest] def filterSystemEnvironment(env: Map[String, String]): Map[String, String] = {
env.filterKeys { k =>
(k.startsWith("SPARK_") && !EXCLUDED_SPARK_ENV_VARS.contains(k)) || k.startsWith("MESOS_")
}.toMap
}
private[spark] def supportsRestClient(master: String): Boolean = {
supportedMasterPrefixes.exists(master.startsWith)
}
}
private[spark] class RestSubmissionClientApp extends SparkApplication {
/** Submits a request to run the application and return the response. Visible for testing. */
def run(
appResource: String,
mainClass: String,
appArgs: Array[String],
conf: SparkConf,
env: Map[String, String] = Map()): SubmitRestProtocolResponse = {
val master = conf.getOption("spark.master").getOrElse {
throw new IllegalArgumentException("'spark.master' must be set.")
}
val sparkProperties = conf.getAll.toMap
val client = new RestSubmissionClient(master)
val submitRequest = client.constructSubmitRequest(
appResource, mainClass, appArgs, sparkProperties, env)
client.createSubmission(submitRequest)
}
override def start(args: Array[String], conf: SparkConf): Unit = {
if (args.length < 2) {
sys.error("Usage: RestSubmissionClient [app resource] [main class] [app args*]")
sys.exit(1)
}
val appResource = args(0)
val mainClass = args(1)
val appArgs = args.slice(2, args.length)
val env = RestSubmissionClient.filterSystemEnvironment(sys.env)
run(appResource, mainClass, appArgs, conf, env)
}
}
|
mahak/spark
|
core/src/main/scala/org/apache/spark/deploy/rest/RestSubmissionClient.scala
|
Scala
|
apache-2.0
| 18,649 |
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* @ @ *
* # # # # (c) 2016 CAB *
* # # # # # # *
* # # # # # # # # # # # # *
* # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # *
* # # # # # # # # # # # # # # # # # # *
* @ @ *
\* * http://github.com/alexcab * * * * * * * * * * * * * * * * * * * * * * * * * */
package mathact.parts.control.infrastructure
import akka.actor.{Actor, ActorRef, Props}
import akka.pattern.ask
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import mathact.parts.bricks.WorkbenchContext
import mathact.parts.dummies.{TestSketchWithError, TestSketchWithBigTimeout, TestSketchWithSmallTimeout}
import mathact.parts.model.enums.SketchUIElement._
import mathact.parts.model.enums.SketchUiElemState._
import mathact.parts.{WorkbenchLike, ActorTestSpec}
import mathact.parts.model.config._
import mathact.parts.model.data.sketch.SketchData
import mathact.parts.model.messages.M
import org.scalatest.Suite
import akka.util.Timeout
import scala.concurrent.Await
import scala.concurrent.duration._
/** Workbench controller test
* Created by CAB on 02.09.2016.
*/
class WorkbenchControllerTest extends ActorTestSpec {
//Test model
trait TestCase extends Suite{
//Test config
val testMainConfig = new MainConfigLike{
val config = ConfigFactory.load()
val sketchBuildingTimeout = 5.second
val pumping = new PumpingConfigLike{
val pump = new PumpConfigLike{
val askTimeout = Timeout(1.second) }
val drive = new DriveConfigLike{
val pushTimeoutCoefficient = 0
val startFunctionTimeout = 1.second
val messageProcessingTimeout = 1.second
val stopFunctionTimeout = 1.second
val impellerMaxQueueSize = 0
val uiOperationTimeout = 1.second}}
val sketchUI = null
val userLogging = null
val visualization = null}
//Test SketchData
def newTestSketchData(
clazz: Class[_] = classOf[TestSketchWithSmallTimeout],
autorun: Boolean,
showUserLogUi: Boolean,
showVisualisationUi: Boolean)
:SketchData = SketchData(
clazz,
className = clazz.getName,
sketchName = Some("TestSketch1"),
sketchDescription = Some("Testing sketch 1"),
autorun,
showUserLogUi,
showVisualisationUi)
//Helpers actors
def testAskMainController(workbenchController: ActorRef) = system.actorOf(Props(
new Actor{
def receive = {
case M.NewWorkbenchContext(workbench) β
println(
s"[WorkbenchControllerTest.testAskMainController] Send GetWorkbenchContext, " +
s"sender: $sender, workbench: $workbench")
workbenchController ! M.GetWorkbenchContext(sender)
case m β
println(s"[WorkbenchControllerTest.testAskMainController] Unknown msg: $m")}}),
"TestAskMainController_" + randomString())
lazy val testActor = TestProbe("testActor_" + randomString())
lazy val testMainController = TestProbe("TestMainController_" + randomString())
lazy val testSketchUi = TestProbe("TestSketchUi_" + randomString())
lazy val testUserLogging = TestProbe("TestUserLogging_" + randomString())
lazy val testVisualization = TestProbe("Visualization_" + randomString())
lazy val testPumping = TestProbe("TestPumping_" + randomString())
//WorkbenchController
def newWorkbenchController(sketch: SketchData): ActorRef = system.actorOf(Props(
new WorkbenchController(testMainConfig, sketch, testMainController.ref){
val sketchUi = testSketchUi.ref
val userLogging = testUserLogging.ref
val visualization = testVisualization.ref
val pumping = testPumping.ref}),
"WorkbenchController_" + randomString())
def newBuiltWorkbenchController(): ActorRef = {
val controller = newWorkbenchController( newTestSketchData(
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
testMainController.send(controller, M.StartWorkbenchController)
testSketchUi.expectMsgType[M.SetSketchUIStatusString]
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
testActor.send(controller, M.GetWorkbenchContext(testActor.ref))
testActor.expectMsgType[Either[Exception, WorkbenchContext]]
testSketchUi.expectMsgType[M.SetSketchUIStatusString]
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testUserLogging.expectMsgType[M.LogInfo]
testMainController.expectMsgType[M.SketchBuilt]
controller}
def newStartedWorkbenchController(): ActorRef = {
val controller = newBuiltWorkbenchController()
testSketchUi.send(controller, M.SketchUIActionTriggered(RunBtn, Unit))
testSketchUi.expectMsgType[M.SetSketchUIStatusString]
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testPumping.expectMsg(M.StartPumping)
testPumping.send(controller, M.PumpingStarted)
testSketchUi.expectMsgType[M.SetSketchUIStatusString]
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testUserLogging.expectMsgType[M.LogInfo]
controller}}
//Testing
"WorkbenchController on start" should{
"by WorkbenchControllerStart, create sketch instance show UI, start pumping with autorun on" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithSmallTimeout],
autorun = true,
showUserLogUi = true,
showVisualisationUi = true))
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemShow,
VisualisationBtn β ElemShow)
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Show user logging UI
testUserLogging.expectMsg(M.ShowUserLoggingUI)
testUserLogging.send(controller, M.UserLoggingUIChanged(isShow = true))
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(LogBtn β ElemShow)
//Show visualization UI
testVisualization.expectMsg(M.ShowVisualizationUI)
testVisualization.send(controller, M.VisualizationUIChanged(isShow = true))
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(VisualisationBtn β ElemShow)
//Get context
testActor.send(controller, M.GetWorkbenchContext(testActor.ref))
testActor.expectMsgType[Either[Exception, WorkbenchContext]]
//Update user UI
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(RunBtn β ElemDisabled)
//Run plumbing
testPumping.expectMsg(M.StartPumping)
testPumping.send(controller, M.PumpingStarted)
//Update user UI
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
val statusStr4 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr4 " + statusStr4)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemEnabled,
HideAllToolsUiBtn β ElemEnabled,
SkipAllTimeoutTaskBtn β ElemEnabled,
StopSketchBtn β ElemEnabled)
//Log info
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,Sketch 'TestSketch1' successfully built.)
val info2 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info2: " + info2) //LogInfo(None,Workbench,Pumping started.)
//Sketch built
testMainController.expectMsgType[M.SketchBuilt].workbench.asInstanceOf[TestSketchWithSmallTimeout]
val statusStr5 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr5 " + statusStr5)
//Run plumbing
testMainController.expectNoMsg(1.second)
testSketchUi.expectNoMsg(1.second)
testUserLogging.expectNoMsg(1.second)
testVisualization.expectNoMsg(1.second)
testPumping.expectNoMsg(1.second)}
"by WorkbenchControllerStart, create sketch instance show UI, with autorun off" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithSmallTimeout],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemHide,
VisualisationBtn β ElemHide)
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Get context
testActor.send(controller, M.GetWorkbenchContext(testActor.ref))
testActor.expectMsgType[Either[Exception, WorkbenchContext]]
//Update user UI
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(RunBtn β ElemEnabled)
//Log info
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1)
//Sketch built
testMainController.expectMsgType[M.SketchBuilt].workbench.asInstanceOf[TestSketchWithSmallTimeout]
//Update status str
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
//Run plumbing
testMainController.expectNoMsg(1.second)
testSketchUi.expectNoMsg(1.second)
testUserLogging.expectNoMsg(1.second)
testVisualization.expectNoMsg(1.second)
testPumping.expectNoMsg(1.second)}
"by WorkbenchControllerStart, terminate sketch if not build in time" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithBigTimeout],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemHide,
VisualisationBtn β ElemHide)
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Wait for time out
sleep(5.second)
//Error log
val error1 = testUserLogging.expectMsgType[M.LogError]
println("[WorkbenchController] error1: " + error1)
//Sketch UI update
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)
//Sketch error
testMainController.expectMsgType[M.SketchError]}
"by WorkbenchControllerStart, terminate sketch if error on build" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithError],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemHide,
VisualisationBtn β ElemHide)
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Error log
val error1 = testUserLogging.expectMsgType[M.LogError]
println("[WorkbenchController] error1: " + error1)
//Sketch UI update
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)
//Sketch error
testMainController.expectMsgType[M.SketchError]}
"by GetWorkbenchContext, create and return WorkbenchContext" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithSmallTimeout],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
val askMainController = testAskMainController(controller)
val askTimeout = 1.second
//Start
testMainController.send(controller, M.StartWorkbenchController)
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Construct Workbench and do ask
val workbench = new WorkbenchLike{
val res: Either[Exception,WorkbenchContext] = Await.result(
ask(askMainController, M.NewWorkbenchContext(this))(askTimeout).mapTo[Either[Exception,WorkbenchContext]],
askTimeout)
println("[WorkbenchControllerTest.workbench] res: " + res)
res.isRight shouldEqual true
protected implicit val context: WorkbenchContext = res.right.get}
//UI update, log and built
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testUserLogging.expectMsgType[M.LogInfo]
testMainController.expectMsgType[M.SketchBuilt]}
}
"WorkbenchController in work" should{
"by RunBtn hit, run sketch" in new TestCase {
//Preparing
val controller = newBuiltWorkbenchController()
//Send start
testSketchUi.send(controller, M.SketchUIActionTriggered(RunBtn, Unit))
//Run Pumping
testPumping.expectMsg(M.StartPumping)
testPumping.send(controller, M.PumpingStarted)
//UI update
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(RunBtn β ElemDisabled) //On StartPumping
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map( //On PumpingStarted
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemEnabled,
HideAllToolsUiBtn β ElemEnabled,
SkipAllTimeoutTaskBtn β ElemEnabled,
StopSketchBtn β ElemEnabled)
//User log
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1)
//Update status string
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
//Run plumbing
testMainController.expectNoMsg(1.second)
testSketchUi.expectNoMsg(1.second)
testUserLogging.expectNoMsg(1.second)
testVisualization.expectNoMsg(1.second)
testPumping.expectNoMsg(1.second)}
"by handle buttons hits in Working mode" in new TestCase {
//Preparing
val controller = newStartedWorkbenchController()
//Hit ShowAllToolsUiBtn
testSketchUi.send(controller, M.SketchUIActionTriggered(ShowAllToolsUiBtn, Unit))
testPumping.expectMsg(M.ShowAllToolUi)
//Hit HideAllToolsUiBtn
testSketchUi.send(controller, M.SketchUIActionTriggered(HideAllToolsUiBtn, Unit))
testPumping.expectMsg(M.HideAllToolUi)
//Hit SkipAllTimeoutTaskBtn
testSketchUi.send(controller, M.SketchUIActionTriggered(SkipAllTimeoutTaskBtn, Unit))
testPumping.expectMsg(M.SkipAllTimeoutTask)
//Hit LogBtn
testSketchUi.send(controller, M.SketchUIActionTriggered(LogBtn, ElemShow))
testUserLogging.expectMsg(M.ShowUserLoggingUI)
testUserLogging.send(controller, M.UserLoggingUIChanged(isShow = true))
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
LogBtn β ElemShow)
testSketchUi.send(controller, M.SketchUIActionTriggered(LogBtn, ElemHide))
testUserLogging.expectMsg(M.HideUserLoggingUI)
testUserLogging.send(controller, M.UserLoggingUIChanged(isShow = false))
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
LogBtn β ElemHide)
//Hit VisualisationBtn
testSketchUi.send(controller, M.SketchUIActionTriggered(VisualisationBtn, ElemShow))
testVisualization.expectMsg(M.ShowVisualizationUI)
testVisualization.send(controller, M.VisualizationUIChanged(isShow = true))
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
VisualisationBtn β ElemShow)
testSketchUi.send(controller, M.SketchUIActionTriggered(VisualisationBtn, ElemHide))
testVisualization.expectMsg(M.HideVisualizationUI)
testVisualization.send(controller, M.VisualizationUIChanged(isShow = false))
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
VisualisationBtn β ElemHide)}
"by StopSketchBtn hit, stop sketch" in new TestCase {
//Preparing
val controller = newStartedWorkbenchController()
//Send stop
testSketchUi.send(controller, M.SketchUIActionTriggered(StopSketchBtn, Unit))
//Sopping of pumping
testPumping.expectMsg(M.StopPumping)
testPumping.send(controller, M.PumpingStopped)
//Log
val info3 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info3: " + info3) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//UI update
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)}
}
"WorkbenchController on shutdown" should{
"stop in Creating state" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithSmallTimeout],
autorun = false,
showUserLogUi = true,
showVisualisationUi = true))
testMainController.watch(controller)
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
//Show user logging UI
testUserLogging.expectMsg(M.ShowUserLoggingUI)
testUserLogging.send(controller, M.UserLoggingUIChanged(isShow = true))
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
//Show visualization UI
testVisualization.expectMsg(M.ShowVisualizationUI)
testVisualization.send(controller, M.VisualizationUIChanged(isShow = true))
testSketchUi.expectMsgType[M.UpdateSketchUIState]
//Log
val info2 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info2: " + info2) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//UI update
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemDisabled,
VisualisationBtn β ElemDisabled)
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
testUserLogging.expectMsg(M.TerminateUserLogging)
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop in Building state" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithSmallTimeout],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
testMainController.watch(controller)
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
//Get context
testActor.send(controller, M.GetWorkbenchContext(testActor.ref))
testActor.expectMsgType[Either[Exception, WorkbenchContext]]
//Wait for controller switch to Building state
sleep(1.second)
//UI update
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemEnabled)
//Log
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,Sketch 'TestSketch1' successfully built. Auto-run is off, hit 'play' button to start sketch.)
//Wait for controller switch to Building state
sleep(1.second)
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
val statusStr4 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr4 " + statusStr4)
val statusStr5 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr5 " + statusStr5)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemDisabled,
VisualisationBtn β ElemDisabled)
//Sketch built
testMainController.expectMsgType[M.SketchBuilt]
//Log
val info2 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info2: " + info2) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
testUserLogging.expectMsg(M.TerminateUserLogging)
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchDone]
endData.className shouldEqual classOf[TestSketchWithSmallTimeout].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop in Built state" in new TestCase {
//Preparing
val controller = newBuiltWorkbenchController()
testMainController.watch(controller)
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
testUserLogging.expectMsg(M.TerminateUserLogging)
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchDone]
endData.className shouldEqual classOf[TestSketchWithSmallTimeout].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop in BuildingFailed state" in new TestCase {
//Preparing
val controller = newWorkbenchController( newTestSketchData(
clazz = classOf[TestSketchWithError],
autorun = false,
showUserLogUi = false,
showVisualisationUi = false))
testMainController.watch(controller)
//Send start
testMainController.send(controller, M.StartWorkbenchController)
//Show sketch UI
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsg(M.ShowSketchUI)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemHide,
VisualisationBtn β ElemHide)
testSketchUi.send(controller, M.SketchUIChanged(isShow = true))
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)
//Error log
val error1 = testUserLogging.expectMsgType[M.LogError]
println("[WorkbenchController] error1: " + error1) //LogError(None,Workbench,Some(java.lang.Exception: Oops!!),Exception on building of sketch.)
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
val statusStr4 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr4 " + statusStr4)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemDisabled,
VisualisationBtn β ElemDisabled)
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
testUserLogging.expectMsg(M.TerminateUserLogging)
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchError]
endData.className shouldEqual classOf[TestSketchWithError].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop in Starting state" in new TestCase {
//Preparing
val controller = newBuiltWorkbenchController()
testMainController.watch(controller)
//Send Start
testSketchUi.send(controller, M.SketchUIActionTriggered(RunBtn, Unit))
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled)
testPumping.expectMsg(M.StartPumping)
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//Pumping started
testPumping.send(controller, M.PumpingStarted)
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
val info2 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info2: " + info2) //LogInfo(None,Workbench,Pumping started.)
//Sopping of pumping
testPumping.expectMsg(M.StopPumping)
testPumping.send(controller, M.PumpingStopped)
//UI update
val statusStr4 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr4 " + statusStr4)
val statusStr5 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr5 " + statusStr5)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)
//Log
val info3 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info3: " + info3) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//UI update
val statusStr6 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr6 " + statusStr6)
val statusStr7 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr7 " + statusStr7)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemDisabled,
VisualisationBtn β ElemDisabled)
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
val info4 = testUserLogging.expectMsg(M.TerminateUserLogging)
println("[WorkbenchController] info4: " + info4) //
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchDone]
endData.className shouldEqual classOf[TestSketchWithSmallTimeout].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop in Working state" in new TestCase {
//Preparing
val controller = newStartedWorkbenchController()
testMainController.watch(controller)
//Send stop
testMainController.send(controller, M.ShutdownWorkbenchController)
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//Sopping of pumping
testPumping.expectMsg(M.StopPumping)
testPumping.send(controller, M.PumpingStopped)
//UI update
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled)
//Log
val info3 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info3: " + info3) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
//UI update
val statusStr4 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr4 " + statusStr4)
val statusStr5 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr5 " + statusStr5)
testSketchUi.expectMsgType[M.UpdateSketchUIState].state shouldEqual Map(
RunBtn β ElemDisabled,
ShowAllToolsUiBtn β ElemDisabled,
HideAllToolsUiBtn β ElemDisabled,
SkipAllTimeoutTaskBtn β ElemDisabled,
StopSketchBtn β ElemDisabled,
LogBtn β ElemDisabled,
VisualisationBtn β ElemDisabled)
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
val info4 = testUserLogging.expectMsg(M.TerminateUserLogging)
println("[WorkbenchController] info4: " + info4) //
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchDone]
endData.className shouldEqual classOf[TestSketchWithSmallTimeout].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
"stop by hit of UI close button" in new TestCase {
//Preparing
val controller = newBuiltWorkbenchController()
testMainController.watch(controller)
//Send close button hit
testSketchUi.send(controller, M.SketchUIActionTriggered(CloseBtn, Unit))
val info1 = testUserLogging.expectMsgType[M.LogInfo]
println("[WorkbenchController] info1: " + info1) //LogInfo(None,Workbench,The Shutdown signal received, sketch will terminated.)
val statusStr1 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr1 " + statusStr1)
val statusStr2 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr2 " + statusStr2)
val statusStr3 = testSketchUi.expectMsgType[M.SetSketchUIStatusString]
println("[WorkbenchControllerTest] statusStr3 " + statusStr3)
testSketchUi.expectMsgType[M.UpdateSketchUIState]
//Terminate visualization UI
testVisualization.expectMsg(M.TerminateVisualization)
testVisualization.send(controller, M.VisualizationTerminated)
//Terminate user logging UI
testUserLogging.expectMsg(M.TerminateUserLogging)
testUserLogging.send(controller, M.UserLoggingTerminated)
//Terminate sketch UI
testSketchUi.expectMsg(M.TerminateSketchUI)
testSketchUi.send(controller, M.SketchUITerminated)
//Terminating of controller
val endData = testMainController.expectMsgType[M.SketchDone]
endData.className shouldEqual classOf[TestSketchWithSmallTimeout].getName
testMainController.expectMsg(M.WorkbenchControllerTerminated)
testMainController.expectTerminated(controller)}
}
}
|
AlexCAB/ProbabilisticPlaying
|
mathact/src/test/scala/mathact/parts/control/infrastructure/WorkbenchControllerTest.scala
|
Scala
|
mit
| 41,818 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.