code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package io.circe.generic.util.macros
import io.circe.{ Codec, Decoder, Encoder }
import scala.reflect.macros.blackbox
abstract class JsonCodecMacros {
val c: blackbox.Context
import c.universe._
protected[this] def semiautoObj: Symbol
protected[this] def deriveMethodPrefix: String
private[this] def isCaseClassOrSealed(clsDef: ClassDef) =
clsDef.mods.hasFlag(Flag.CASE) || clsDef.mods.hasFlag(Flag.SEALED)
protected[this] final def constructJsonCodec(annottees: Tree*): Tree = annottees match {
case List(clsDef: ClassDef) if isCaseClassOrSealed(clsDef) =>
q"""
$clsDef
object ${clsDef.name.toTermName} {
..${codec(clsDef)}
}
"""
case List(
clsDef: ClassDef,
q"..$mods object $objName extends { ..$objEarlyDefs } with ..$objParents { $objSelf => ..$objDefs }"
) if isCaseClassOrSealed(clsDef) =>
q"""
$clsDef
$mods object $objName extends { ..$objEarlyDefs } with ..$objParents { $objSelf =>
..$objDefs
..${codec(clsDef)}
}
"""
case _ => c.abort(c.enclosingPosition, "Invalid annotation target: must be a case class or a sealed trait/class")
}
private[this] val DecoderClass = typeOf[Decoder[_]].typeSymbol.asType
private[this] val EncoderClass = typeOf[Encoder[_]].typeSymbol.asType
private[this] val AsObjectEncoderClass = typeOf[Encoder.AsObject[_]].typeSymbol.asType
private[this] val AsObjectCodecClass = typeOf[Codec.AsObject[_]].typeSymbol.asType
private[this] val macroName: Tree = {
c.prefix.tree match {
case Apply(Select(New(name), _), _) => name
case _ => c.abort(c.enclosingPosition, "Unexpected macro application")
}
}
private[this] val codecType: JsonCodecType = {
c.prefix.tree match {
case q"new ${`macroName`}()" => JsonCodecType.Both
case q"new ${`macroName`}(encodeOnly = true)" => JsonCodecType.EncodeOnly
case q"new ${`macroName`}(decodeOnly = true)" => JsonCodecType.DecodeOnly
// format: off
case _ => c.abort(c.enclosingPosition, s"Unsupported arguments supplied to @$macroName")
// format: on
}
}
private[this] def codec(clsDef: ClassDef): List[Tree] = {
val tpname = clsDef.name
val tparams = clsDef.tparams
val decodeName = TermName("decode" + tpname.decodedName)
val encodeName = TermName("encode" + tpname.decodedName)
val codecName = TermName("codecFor" + tpname.decodedName)
def deriveName(suffix: String) = TermName(deriveMethodPrefix + suffix)
if (tparams.isEmpty) {
val Type = tpname
List(
codecType match {
case JsonCodecType.Both =>
q"""implicit val $codecName: $AsObjectCodecClass[$Type] = $semiautoObj.${deriveName("Codec")}[$Type]"""
case JsonCodecType.DecodeOnly =>
q"""implicit val $decodeName: $DecoderClass[$Type] = $semiautoObj.${deriveName("Decoder")}[$Type]"""
case JsonCodecType.EncodeOnly =>
q"""implicit val $encodeName: $AsObjectEncoderClass[$Type] = $semiautoObj.${deriveName("Encoder")}[$Type]"""
}
)
} else {
val tparamNames = tparams.map(_.name)
def mkImplicitParams(prefix: String, typeSymbol: TypeSymbol) =
tparamNames.zipWithIndex.map {
case (tparamName, i) =>
val paramName = TermName(s"$prefix$i")
val paramType = tq"$typeSymbol[$tparamName]"
q"$paramName: $paramType"
}
val decodeParams = mkImplicitParams("decode", DecoderClass)
val encodeParams = mkImplicitParams("encode", EncoderClass)
val Type = tq"$tpname[..$tparamNames]"
val (decoder, encoder) = (
q"""implicit def $decodeName[..$tparams](implicit ..$decodeParams): $DecoderClass[$Type] =
$semiautoObj.${deriveName("Decoder")}[$Type]""",
q"""implicit def $encodeName[..$tparams](implicit ..$encodeParams): $AsObjectEncoderClass[$Type] =
$semiautoObj.${deriveName("Encoder")}[$Type]"""
)
codecType match {
case JsonCodecType.Both => List(decoder, encoder)
case JsonCodecType.DecodeOnly => List(decoder)
case JsonCodecType.EncodeOnly => List(encoder)
}
}
}
}
private sealed trait JsonCodecType
private object JsonCodecType {
case object Both extends JsonCodecType
case object DecodeOnly extends JsonCodecType
case object EncodeOnly extends JsonCodecType
}
| travisbrown/circe | modules/generic/shared/src/main/scala-2/io/circe/generic/util/macros/JsonCodecMacros.scala | Scala | apache-2.0 | 4,498 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.predictionio.core.BaseDataSource
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
/** Base class of a parallel data source.
*
* A parallel data source runs locally within a single machine, or in parallel
* on a cluster, to return data that is distributed across a cluster.
*
* @tparam TD Training data class.
* @tparam EI Evaluation Info class.
* @tparam Q Input query class.
* @tparam A Actual value class.
* @group Data Source
*/
abstract class PDataSource[TD, EI, Q, A]
extends BaseDataSource[TD, EI, Q, A] {
def readTrainingBase(sc: SparkContext): TD = readTraining(sc)
/** Implement this method to only return training data from a data source */
def readTraining(sc: SparkContext): TD
def readEvalBase(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = readEval(sc)
/** To provide evaluation feature for your engine, your must override this
* method to return data for evaluation from a data source. Returned data can
* optionally include a sequence of query and actual value pairs for
* evaluation purpose.
*
* The default implementation returns an empty sequence as a stub, so that
* an engine can be compiled without implementing evaluation.
*/
def readEval(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] =
Seq[(TD, EI, RDD[(Q, A)])]()
@deprecated("Use readEval() instead.", "0.9.0")
def read(sc: SparkContext): Seq[(TD, EI, RDD[(Q, A)])] = readEval(sc)
}
| himanshudhami/PredictionIO | core/src/main/scala/org/apache/predictionio/controller/PDataSource.scala | Scala | apache-2.0 | 2,322 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.secondaryindex
import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.carbondata.spark.exception.ProcessMetaDataException
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.secondaryindex.joins.BroadCastSIFilterPushJoin
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
class TestAlterTableColumnRenameWithSecondaryIndex extends QueryTest with BeforeAndAfterAll {
override protected def beforeAll(): Unit = {
dropTable()
}
test("test direct rename on SI table") {
createTable()
sql("create index index1 on table si_rename(c) AS 'carbondata' ")
val ex = intercept[ProcessMetaDataException] {
sql("alter table index1 change c test string")
}
assert(ex.getMessage.contains("Alter table column rename is not allowed on index table"))
}
test("test column rename with SI table") {
dropTable()
createTable()
sql("create index index1 on table si_rename(c) AS 'carbondata' ")
sql("alter table si_rename change c test string")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", "index1")
assert(null != carbonTable.getColumnByName("test"))
assert(null == carbonTable.getColumnByName("c"))
}
test("test column rename with multiple SI table table") {
dropTable()
createTable()
sql("create index index1 on table si_rename(c) AS 'carbondata' ")
sql("create index index2 on table si_rename(c,d) AS 'carbondata' ")
sql("alter table si_rename change c test string")
sql("alter table si_rename change d testSI string")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("default", "index1")
assert(null != carbonTable1.getColumnByName("test"))
assert(null == carbonTable1.getColumnByName("c"))
val carbonTable2 = CarbonMetadata.getInstance().getCarbonTable("default", "index2")
assert(null != carbonTable2.getColumnByName("testSI"))
assert(null == carbonTable2.getColumnByName("d"))
}
test("test column rename with SI tables load and query") {
dropTable()
createTable()
sql("create index index1 on table si_rename(c) AS 'carbondata'")
sql("create index index2 on table si_rename(c,d) AS 'carbondata'")
sql("insert into si_rename select 'abc',3,'def','mno'")
sql("insert into si_rename select 'def',4,'xyz','pqr'")
val query1 = sql("select c,d from si_rename where d = 'pqr' or c = 'def'").count()
sql("alter table si_rename change c test string")
sql("alter table si_rename change d testSI string")
sql("show indexes on si_rename").collect
val query2 = sql("select test,testsi from si_rename where testsi = 'pqr' or test = 'def'").count()
assert(query1 == query2)
val df = sql("select test,testsi from si_rename where testsi = 'pqr' or test = 'def'").queryExecution.sparkPlan
if (!isFilterPushedDownToSI(df)) {
assert(false)
} else {
assert(true)
}
}
override protected def afterAll(): Unit = {
dropTable()
}
private def dropTable(): Unit = {
sql("drop table if exists si_rename")
}
private def createTable(): Unit = {
sql("create table si_rename (a string,b int, c string, d string) STORED AS carbondata")
}
/**
* Method to check whether the filter is push down to SI table or not
*
* @param sparkPlan
* @return
*/
private def isFilterPushedDownToSI(sparkPlan: SparkPlan): Boolean = {
var isValidPlan = false
sparkPlan.transform {
case broadCastSIFilterPushDown: BroadCastSIFilterPushJoin =>
isValidPlan = true
broadCastSIFilterPushDown
}
isValidPlan
}
}
| jackylk/incubator-carbondata | index/secondary-index/src/test/scala/org/apache/carbondata/spark/testsuite/secondaryindex/TestAlterTableColumnRenameWithSecondaryIndex.scala | Scala | apache-2.0 | 4,502 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.examples
import ai.h2o.sparkling.examples.CraigslistJobTitlesApp._
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.SparkSession
/**
* Variant of Craigslist App with structured streaming support to classify incoming events.
*
* Launch: nc -lk 9999 and send events from your console
*
*/
object CraigslistJobTitlesStructuredStreamingApp {
def main(args: Array[String]) {
val spark = SparkSession
.builder()
.appName("Craigslist Job Titles Structured Streaming App")
.getOrCreate()
val titlesTable = loadTitlesTable(spark)
val model = fitModelPipeline(titlesTable)
// consume data from socket
val dataStream = spark.readStream
.format("socket")
.option("host", "localhost")
.option("port", 9999)
.load()
// bring encoders into scope
import spark.implicits._
// interpret input data as job titles
val jobTitlesStream = dataStream
.as[String]
.withColumnRenamed("value", "jobtitle")
// use model to predict category
val prediction = model.transform(jobTitlesStream)
// select relevant output columns
val categoryPrediction = prediction.select("jobtitle", "prediction", "detailed_prediction.probabilities.*")
// start streaming query, put output to console
val query = categoryPrediction.writeStream
.format("console")
.trigger(Trigger.ProcessingTime("10 seconds"))
.start()
query.awaitTermination()
}
}
| h2oai/sparkling-water | examples/src/main/scala/ai/h2o/sparkling/examples/CraigslistJobTitlesStructuredStreamingApp.scala | Scala | apache-2.0 | 2,307 |
package utils
import models.ZkKafka
import models.ZkKafka._
import org.apache.commons.codec.digest.DigestUtils
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
import org.joda.time.DateTime
import play.api.i18n.Messages
import play.api.libs.json.Json._
import play.api.libs.json._
object JsonFormats {
private def optionLongtoJsValue(maybeId: Option[Long]) = maybeId.map({ l => JsNumber(l) }).getOrElse(JsNull)
implicit object DeltaFormat extends Format[Delta] {
def reads(json: JsValue): JsResult[Delta] = JsSuccess(Delta(
partition = (json \\ "partition").as[Int],
amount = (json \\ "amount").as[Option[Long]],
current = (json \\ "current").as[Long],
storm = (json \\ "storm").as[Option[Long]]
))
def writes(o: Delta): JsValue = {
val doc: Map[String,JsValue] = Map(
"partition" -> JsNumber(o.partition),
"amount" -> optionLongtoJsValue(o.amount),
"current" -> JsNumber(o.current),
"storm" -> optionLongtoJsValue(o.storm)
)
toJson(doc)
}
}
} | evertrue/capillary | app/utils/JsonFormats.scala | Scala | mit | 1,112 |
package com.appliedscala.events
import java.util.UUID
import org.joda.time.DateTime
import play.api.libs.json.{JsObject, Json}
/**
*
* AnswerUpdated class
* <p/>
*/
case class AnswerUpdated(answerId: UUID, answerText: String, questionId: UUID, updatedBy: UUID, updated: DateTime)
extends EventData {
override def action = AnswerUpdated.actionName
override def json: JsObject = Json.writes[AnswerUpdated].writes(this)
}
object AnswerUpdated {
val actionName = "answer-updated"
implicit val reads = Json.reads[AnswerUpdated]
}
| getArtemUsername/play-and-events | events/src/main/scala/com/appliedscala/events/AnswerUpdated.scala | Scala | mit | 548 |
package org.glotaran.kernel.types.messages.supervisor
/**
* Send to supervisor to add a spefific number of workeractors
*/
case class AddWorker(number: Int)
| glotaran/glotaran-kernel | glotaran-kernel-types/src/main/scala/org/glotaran/kernel/types/messages/supervisor/AddWorker.scala | Scala | gpl-3.0 | 164 |
//############################################################################
// Exceptions
//############################################################################
//############################################################################
abstract class IntMap[A] {
def lookup(key: Int): A = this match {
case Empty() => sys.error("KO")
case _ => sys.error("ok")
}
}
case class Empty[A]() extends IntMap[A];
object exceptions {
def check(what: String, actual: Any, expected: Any): Unit = {
val success: Boolean = actual == expected;
Console.print(if (success) "ok" else "KO");
var value: String = if (actual == null) "null" else actual.toString();
if (value == "\\u0000") value = "\\\\u0000";
Console.print(": " + what + " = " + value);
if (!success) Console.print(" != " + expected);
Console.println()
Console.flush()
}
def test: Unit = {
val key = 2000;
val map: IntMap[String] = new Empty[String];
val value = try {
map.lookup(key)
} catch {
case e: Throwable => e.getMessage()
}
check("lookup(" + key + ")", value, "KO");
}
}
//############################################################################
object Test {
def main(args: Array[String]): Unit = {
exceptions.test;
}
}
//############################################################################
| scala/scala | test/files/run/exceptions.scala | Scala | apache-2.0 | 1,468 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import java.util.{Date}
import models._
import views._
/**
* Manage tasks related operations.
*/
class Tasks extends Controller with Secured {
/**
* Display the tasks panel for this project.
*/
def index(project: Long) = IsMemberOf(project) { _ => implicit request =>
Project.findById(project).map { p =>
val tasks = Task.findByProject(project)
val team = Project.membersOf(project)
Ok(html.tasks.index(p, tasks, team))
}.getOrElse(NotFound)
}
val taskForm = Form(
tuple(
"title" -> nonEmptyText,
"dueDate" -> optional(date("MM/dd/yy")),
"assignedTo" -> optional(text)
)
)
// -- Tasks
/**
* Create a task in this project.
*/
def add(project: Long, folder: String) = IsMemberOf(project) { _ => implicit request =>
taskForm.bindFromRequest.fold(
errors => BadRequest,
{
case (title, dueDate, assignedTo) =>
val task = Task.create(
Task(None, folder, project, title, false, dueDate, assignedTo)
)
Ok(html.tasks.item(task))
}
)
}
/**
* Update a task
*/
def update(task: Long) = IsOwnerOf(task) { _ => implicit request =>
Form("done" -> boolean).bindFromRequest.fold(
errors => BadRequest,
isDone => {
Task.markAsDone(task, isDone)
Ok
}
)
}
/**
* Delete a task
*/
def delete(task: Long) = IsOwnerOf(task) { _ => implicit request =>
Task.delete(task)
Ok
}
// -- Task folders
/**
* Add a new folder.
*/
def addFolder = Action {
Ok(html.tasks.folder("New folder"))
}
/**
* Delete a full tasks folder.
*/
def deleteFolder(project: Long, folder: String) = IsMemberOf(project) { _ => implicit request =>
Task.deleteInFolder(project, folder)
Ok
}
/**
* Rename a tasks folder.
*/
def renameFolder(project: Long, folder: String) = IsMemberOf(project) { _ => implicit request =>
Form("name" -> nonEmptyText).bindFromRequest.fold(
errors => BadRequest,
newName => {
Task.renameFolder(project, folder, newName)
Ok(newName)
}
)
}
}
| scoverage/scoverage-maven-samples | playframework/singlemodule/zentasks/zentasks-scala-2.10/app/controllers/Tasks.scala | Scala | apache-2.0 | 2,278 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.shellbase
import org.apache.commons.cli.CommandLine
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ShellCommandSetTest extends CommonWordSpec {
"A ShellCommandSet" should {
"always accept the help command" in {
val sut = new ShellCommandSet("test", "set help text")
sut.commands += new TestCommand("one")
run(sut, "test help") should be(true)
run(sut, "test ?") should be(true)
run(sut, "test ? one") should be(true)
run(sut, "test ? two") should be(false)
run(sut, "test") should be(true)
}
"support multi level help" in {
val sut = new ShellCommandSet("test", "set help text")
val nested = new ShellCommandSet("nested", "some help")
sut.commands += new TestCommand("one")
sut.commands += nested
nested.commands += new TestCommand("two")
run(sut, "help test one") should be(true)
run(sut, "help test nested two") should be(true)
run(sut, "help -a test nested two") should be(true)
run(sut, "help --all test") should be(true)
}
"execute commands in the set" in {
val sut = new ShellCommandSet("test", "set help text")
val one = new TestCommand("one")
val two = new TestCommand("two")
sut.commands += one
sut.commands += two
run(sut, "test one") should be(true)
one.commandLines.size should be(1)
two.commandLines.size should be(0)
}
"support nested sets" in {
val sut = new ShellCommandSet("test", "set help text")
val level = new ShellCommandSet("level", "level help text")
sut.commands += level
val one = new TestCommand("one")
val two = new TestCommand("two")
level.commands += one
level.commands += two
run(sut, "test level one") should be(true)
one.commandLines.size should be(1)
two.commandLines.size should be(0)
}
"return false" when {
"the command does not exist" in {
val sut = new ShellCommandSet("test", "set help text")
run(sut, "test say-what") should be(false)
}
"the command called returns false" in {
val sut = new ShellCommandSet("test", "set help text")
val one = new TestCommand("one")
one.returnResult = false
sut.commands += one
run(sut, "test one") should be(false)
one.commandLines.size should be(1)
}
"the command called throws a Throwable" in {
val sut = new ShellCommandSet("test", "set help text")
val one = new TestCommand("one") {
override def execute(cmdLine: CommandLine) = {
super.execute(cmdLine)
throw new Throwable("urgh!")
}
}
sut.commands += one
run(sut, "test one") should be(false)
one.commandLines.size should be(1)
}
"the command fails preExecute" in {
val sut = new ShellCommandSet("test", "set help text")
val throwExceptionShellHook: ShellCommandSet.ExecuteHook = (_, _) => throw new Exception("test")
sut.preExecuteHooks += throwExceptionShellHook
val one = new TestCommand("one")
one.returnResult = true
sut.commands += one
run(sut, "test one") should be(false)
one.commandLines.size should be(0)
}
"the command fails postExecute" in {
val sut = new ShellCommandSet("test", "set help text")
val throwExceptionShellHook: ShellCommandSet.ExecuteHook = (_, _) => throw new Exception("test")
sut.postExecuteHooks += throwExceptionShellHook
val one = new TestCommand("one")
one.returnResult = true
sut.commands += one
run(sut, "test one") should be(false)
one.commandLines.size should be(1)
}
}
"not run a command" when {
"validation fails on the set" in {
val sut = new ShellCommandSet("test", "set help text") with FailedValidation
val one = new TestCommand("one")
sut.commands += one
val result = run(sut, "test one")
result should be(false)
one.commandLines.size should be(0)
}
"validation fails on the command" in {
val sut = new ShellCommandSet("test", "set help text")
val one = new TestCommand("one") with FailedValidation
sut.commands += one
val result = run(sut, "test one")
result should be(false)
one.commandLines.size should be(0)
}
}
"not allow running old execute line" in {
val sut = new ShellCommandSet("blah", "")
intercept[IllegalAccessException] {
sut.execute(null)
}
}
}
def run(sut: ShellCommandSet, command: String): Boolean = {
val root = new ShellCommandSet("", "root help text")
root.commands += sut
root.executeLine(command.split(" ").toList)
}
}
class TestCommand(name: String) extends ShellCommand(name, "test command " + name) {
var commandLines = List[CommandLine]()
var returnResult = true
def execute(cmdLine: CommandLine) = {
commandLines +:= cmdLine
returnResult
}
}
trait FailedValidation extends ShellCommand {
override def validate(cmdLine: CommandLine) = {
Some("failure")
}
}
| SumoLogic/shellbase | shellbase-core/src/test/scala/com/sumologic/shellbase/ShellCommandSetTest.scala | Scala | apache-2.0 | 6,069 |
package mesosphere.marathon.core.launcher.impl
import mesosphere.marathon.core.launcher.TaskOp
import mesosphere.marathon.core.matcher.base.util.OfferOperationFactory
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.Task.LocalVolume
import mesosphere.util.state.FrameworkId
import org.apache.mesos.{ Protos => Mesos }
class TaskOpFactoryHelper(
private val principalOpt: Option[String],
private val roleOpt: Option[String]) {
private[this] val offerOperationFactory = new OfferOperationFactory(principalOpt, roleOpt)
def launch(
taskInfo: Mesos.TaskInfo,
newTask: Task,
oldTask: Option[Task] = None): TaskOp.Launch = {
assume(newTask.taskId.mesosTaskId == taskInfo.getTaskId, "marathon task id and mesos task id must be equal")
def createOperations = Seq(offerOperationFactory.launch(taskInfo))
TaskOp.Launch(taskInfo, newTask, oldTask, createOperations)
}
def reserveAndCreateVolumes(
frameworkId: FrameworkId,
newTask: Task,
resources: Iterable[Mesos.Resource],
localVolumes: Iterable[LocalVolume],
oldTask: Option[Task] = None): TaskOp.ReserveAndCreateVolumes = {
def createOperations = Seq(
offerOperationFactory.reserve(frameworkId, newTask.taskId, resources),
offerOperationFactory.createVolumes(frameworkId, newTask.taskId, localVolumes))
TaskOp.ReserveAndCreateVolumes(newTask, resources, localVolumes, oldTask, createOperations)
}
}
| pgkelley4/marathon | src/main/scala/mesosphere/marathon/core/launcher/impl/TaskOpFactoryHelper.scala | Scala | apache-2.0 | 1,464 |
package org.fedoraproject.mobile
import Implicits._
import android.content.Context
import android.os.Bundle
import android.util.Log
import android.view.{ LayoutInflater, Menu, MenuItem, View, ViewGroup }
import android.widget.{ AdapterView, ArrayAdapter, LinearLayout, TextView, Toast }
import argonaut._, Argonaut._
import scalaz._, Scalaz._
import uk.co.senab.actionbarpulltorefresh.library.PullToRefreshAttacher
import scala.io.Source
class StatusFragment
extends TypedFragment
with PullToRefreshAttacher.OnRefreshListener {
private lazy val refreshAdapter = new PullToRefreshAttacher(activity)
def onRefreshStarted(view: View): Unit = updateStatuses()
private def updateStatuses(): Unit = {
val progress = findView(TR.progress)
progress.setVisibility(View.VISIBLE)
Status.statuses(getActivity.getApplicationContext).runAsync(_.fold(
err =>
runOnUiThread(Toast.makeText(activity, R.string.status_failure, Toast.LENGTH_LONG).show),
res => {
runOnUiThread(progress.setVisibility(View.GONE))
res.decodeEither[Status.StatusesResponse].fold(
err => {
Log.e("StatusFragment", err.toString)
()
},
parsed => {
val adapter = new StatusAdapter(
activity,
android.R.layout.simple_list_item_1,
parsed.services.toArray.sortBy(_._2.name))
runOnUiThread(findView(TR.statuses).setAdapter(adapter))
runOnUiThread {
val globalInfoView = findView(TR.globalinfo)
globalInfoView.setText(parsed.global_verbose_status)
// TODO: StatusCondition instead of String.
Status.StatusCondition.readOpt(parsed.global_status).map(c =>
globalInfoView.setBackgroundColor(Status.colorFor(c)))
}
}
)
runOnUiThread(refreshAdapter.setRefreshComplete)
}
))
}
override def onCreateView(i: LayoutInflater, c: ViewGroup, b: Bundle): View = {
super.onCreateView(i, c, b)
i.inflate(R.layout.status_activity, c, false)
}
override def onStart(): Unit = {
super.onStart()
val view = findView(TR.statuses)
refreshAdapter.setRefreshableView(view, this)
updateStatuses()
}
}
| fedora-infra/mobile | src/main/scala/fragment/status/StatusFragment.scala | Scala | mpl-2.0 | 2,286 |
/*package parsequery
import org.scalameter.api._
import org.scalameter.picklers.Implicits._
import org.scalameter.Measurer.{RelativeNoise, OutlierElimination, PeriodicReinstantiation, MemoryFootprint}
import org.scalameter.Key
import java.text.SimpleDateFormat
import java.util.Calendar
import parsec._
import parsec.optimised._
object ParsequeryBenchmark extends Bench.ForkedTime with JSONParser {
import Js._
import fastparse.all._
override def executor = SeparateJvmsExecutor(
new Executor.Warmer.Default,
Aggregator.min[Double],
new Measurer.Default
)
override def persistor = Persistor.None
override def reporter = new LoggingReporter
def benchmark(fname: String, f: String => _): Unit = {
import scala.io.Source
val fileName = "data/scala-lang-contributions.json"
val fileContent = Source.fromFile(fileName).mkString
val range = Gen.enumeration("size")(10)
performance of fname in {
measure method "parser" config (
exec.minWarmupRuns -> 5,
exec.maxWarmupRuns -> 10,
exec.benchRuns -> 25,
exec.independentSamples -> 1
) in {
using(range) in { n =>
// we use while to remove overhead of for ... yield
var i = 0
while (i < n) {
f(fileContent)
i += 1
}
}
}
}
}
def runGeneralParser(src: String) = {
val Parsed.Success(resAll, _) = jsonExpr.parse(src)
val ids2totals: List[(Val, Val)] = (resAll match {
case x @ Arr(ls) =>
for (l <- ls) yield (l("author")("id"), l("total"))
case _ => sys.error("Something went wrong")
}).toList
}
def runSpecializedParser(src: String) = {
val Parsed.Success(specialized, _) = projections.parse(src)
val ids2totalsBis: List[Val] = (specialized match {
case x @ Arr(ls) => ls
}).toList
}
benchmark("GeneralParser", runGeneralParser)
benchmark("SpecializedParser", runSpecializedParser)
}
*/
| manojo/parsequery | macros/src/bench/scala/parsequery/ParsequeryBenchmark.scala | Scala | mit | 1,999 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.utils.bin
import java.io.ByteArrayOutputStream
import java.util.{Arrays, Date}
import com.vividsolutions.jts.geom.Point
import org.junit.runner.RunWith
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.bin.BinaryEncodeCallback.{ByteArrayCallback, ByteStreamCallback}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class BinSorterTest extends Specification {
val spec = "name:String,dtg:Date,*geom:Point:srid=4326"
val sft = SimpleFeatureTypes.createType(getClass.getSimpleName, spec)
val seed = 10 // new Random().nextLong(); println("SEED " + seed)
val r = new Random(seed)
val features = (0 until r.nextInt(100) + 1).map { i =>
val dtg = new Date(r.nextInt(999999))
val name = s"name$i"
val geom = s"POINT(40 6$i)"
val sf = new ScalaSimpleFeature(sft, s"$i")
sf.setAttributes(Array[AnyRef](name, dtg, geom))
sf
}
val out = new ByteArrayOutputStream(16 * features.length)
val callback = new ByteStreamCallback(out)
features.foreach { f =>
callback.apply(
f.getAttribute("name").asInstanceOf[String].hashCode,
f.getDefaultGeometry.asInstanceOf[Point].getY.toFloat,
f.getDefaultGeometry.asInstanceOf[Point].getX.toFloat,
f.getAttribute("dtg").asInstanceOf[Date].getTime)
}
val bin = out.toByteArray
"BinAggregatingIterator" should {
"quicksort" in {
val bytes = Arrays.copyOf(bin, bin.length)
BinSorter.quickSort(bytes, 0, bytes.length - 16, 16)
val result = bytes.grouped(16).map(BinaryOutputEncoder.decode).map(_.dtg).toSeq
forall(result.sliding(2))(s => s.head must beLessThanOrEqualTo(s.drop(1).head))
}
"mergesort" in {
val bytes = Arrays.copyOf(bin, bin.length).grouped(48).toSeq
bytes.foreach(b => BinSorter.quickSort(b, 0, b.length - 16, 16))
val result = BinSorter.mergeSort(bytes.iterator, 16).map {
case (b, o) => BinaryOutputEncoder.decode(b.slice(o, o + 16)).dtg
}
forall(result.sliding(2))(s => s.head must beLessThanOrEqualTo(s.drop(1).head))
}
"mergesort in place" in {
val bytes = Arrays.copyOf(bin, bin.length).grouped(48)
val (left, right) = (bytes.next(), bytes.next())
// sort the left and right arrays
BinSorter.quickSort(left, 0, left.length - 16, 16)
BinSorter.quickSort(right, 0, right.length - 16, 16)
val merged = BinSorter.mergeSort(left, right, 16)
val result = merged.grouped(16).map(BinaryOutputEncoder.decode).map(_.dtg).toSeq
forall(result.sliding(2))(s => s.head must beLessThanOrEqualTo(s.drop(1).head))
}
"quicksort 24 byte records" in {
val out = new ByteArrayOutputStream(24 * features.length)
val callback = new ByteStreamCallback(out)
features.foreach { f =>
callback.apply(
f.getAttribute("name").asInstanceOf[String].hashCode,
f.getDefaultGeometry.asInstanceOf[Point].getY.toFloat,
f.getDefaultGeometry.asInstanceOf[Point].getX.toFloat,
f.getAttribute("dtg").asInstanceOf[Date].getTime,
f.getAttribute("dtg").asInstanceOf[Date].getTime * 1000)
}
val bytes = out.toByteArray
BinSorter.quickSort(bytes, 0, bytes.length - 24, 24)
val result = bytes.grouped(24).map(BinaryOutputEncoder.decode).map(_.dtg).toSeq
forall(result.sliding(2))(s => s.head must beLessThanOrEqualTo(s.drop(1).head))
}
"quicksort edge cases" in {
val maxLength = 8 // anything more than 8 takes too long to run
val buffer = Array.ofDim[Byte](maxLength * 16)
val bins = (1 to maxLength).map { i =>
ByteArrayCallback.apply(s"name$i".hashCode, 0f, 0f, i * 1000)
ByteArrayCallback.result
}
(1 to maxLength).foreach { i =>
bins.slice(0, i).permutations.foreach { seq =>
val right = i * 16 - 16
seq.zipWithIndex.foreach { case (b, s) => System.arraycopy(b, 0, buffer, s * 16, 16) }
BinSorter.quickSort(buffer, 0, right, 16)
val result = buffer.take(right + 16).grouped(16).map(BinaryOutputEncoder.decode).map(_.dtg).toSeq
result must haveLength(i)
if (result.length > 1) {
forall(result.sliding(2))(s => s.head must beLessThanOrEqualTo(s.drop(1).head))
}
}
}
success
}
}
}
| ronq/geomesa | geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/utils/bin/BinSorterTest.scala | Scala | apache-2.0 | 5,042 |
package com.emstlk.nacl4s.crypto.box
import com.emstlk.nacl4s.crypto.Utils._
import com.emstlk.nacl4s.crypto.core.HSalsa20
import com.emstlk.nacl4s.crypto.scalarmult.Curve25519
import com.emstlk.nacl4s.crypto.secretbox.XSalsa20Poly1305
object Curve25519XSalsa20Poly1305 {
val seedBytes = 32
val publicKeyBytes = 32
val secretKeyBytes = 32
val beforenmBytes = 32
val nonceBytes = 24
val zeroBytes = 32
val boxZeroBytes = 16
def cryptoBoxBeforenm(k: Array[Byte], pk: Array[Byte], sk: Array[Byte]) = {
val s = new Array[Byte](32)
Curve25519.cryptoScalarmult(s, sk, pk)
HSalsa20.cryptoCore(k, new Array[Byte](16), s, getSigma)
}
def cryptoBoxAfternm(c: Array[Byte], m: Array[Byte], mlen: Int, n: Array[Byte], k: Array[Byte]) = {
XSalsa20Poly1305.cryptoSecretBox(c, m, mlen, n, k)
}
def cryptoBoxOpenAfternm(m: Array[Byte], c: Array[Byte], clen: Int, n: Array[Byte], k: Array[Byte]) = {
XSalsa20Poly1305.cryptoSecretBoxOpen(m, c, clen, n, k)
}
def cryptoBox(c: Array[Byte], m: Array[Byte], mlen: Int, n: Array[Byte], pk: Array[Byte], sk: Array[Byte]) = {
val k = new Array[Byte](beforenmBytes)
cryptoBoxBeforenm(k, pk, sk)
cryptoBoxAfternm(c, m, mlen, n, k)
}
def cryptoBoxOpen(m: Array[Byte], c: Array[Byte], clen: Int, n: Array[Byte], pk: Array[Byte], sk: Array[Byte]) = {
val k = new Array[Byte](beforenmBytes)
cryptoBoxBeforenm(k, pk, sk)
cryptoBoxOpenAfternm(m, c, clen, n, k)
}
//TODO init crypto_box_seed_keypair
def cryptoBoxKeypair(pk: Array[Byte], sk: Array[Byte]) = {
random.nextBytes(sk)
Curve25519.cryptoScalarmultBase(pk, sk)
}
}
| emstlk/nacl4s | src/main/scala/com/emstlk/nacl4s/crypto/box/Curve25519XSalsa20Poly1305.scala | Scala | mit | 1,642 |
/*
* Copyright 2016 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s
import cats.effect.IO
import cats.effect.Resource
import cats.syntax.all._
import fs2._
import fs2.text.utf8
import munit._
import org.scalacheck.Prop
/** Common stack for http4s' munit based tests
*/
trait Http4sSuite
extends CatsEffectSuite
with DisciplineSuite
with munit.ScalaCheckEffectSuite
with Http4sSuitePlatform {
private[this] val suiteFixtures = List.newBuilder[Fixture[_]]
override def munitFixtures: Seq[Fixture[_]] = suiteFixtures.result()
// Override to remove implicit modifier
override def unitToProp = super.unitToProp
// Scala 3 likes this better
implicit def saneUnitToProp(unit: Unit): Prop = unitToProp(unit)
def registerSuiteFixture[A](fixture: Fixture[A]) = {
suiteFixtures += fixture
fixture
}
def resourceSuiteDeferredFixture[A](name: String, resource: Resource[IO, A]) =
registerSuiteFixture(UnsafeResourceSuiteLocalDeferredFixture(name, resource))
implicit class ParseResultSyntax[A](self: ParseResult[A]) {
def yolo: A = self.valueOr(e => sys.error(e.toString))
}
def writeToString[A](a: A)(implicit W: EntityEncoder[IO, A]): IO[String] =
Stream
.emit(W.toEntity(a))
.flatMap(_.body)
.through(utf8.decode)
.foldMonoid
.compile
.last
.map(_.getOrElse(""))
}
| http4s/http4s | testing/shared/src/test/scala/org/http4s/Http4sSuite.scala | Scala | apache-2.0 | 1,916 |
package com.twitter.finatra.thrift.filters
import com.twitter.finagle.{Filter, Service}
import com.twitter.finatra.thrift.exceptions.ExceptionManager
import com.twitter.util.Future
import javax.inject.{Inject, Singleton}
/**
* A [[ThriftFilter]] which handles exceptions by rescuing the exception and passing
* it to the [[ExceptionManager]] to handle it.
*
* @note This Filter SHOULD be as close to the start of the Filter chain as possible
*/
@Singleton
class ExceptionMappingFilter @Inject() (
exceptionManager: ExceptionManager)
extends Filter.TypeAgnostic {
def toFilter[T, U]: Filter[T, U, T, U] = new Filter[T, U, T, U] {
def apply(
request: T,
service: Service[T, U]
): Future[U] = {
service(request).rescue {
case e => exceptionManager.handleException(e)
}
}
}
}
| twitter/finatra | thrift/src/main/scala/com/twitter/finatra/thrift/filters/ExceptionMappingFilter.scala | Scala | apache-2.0 | 834 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.json
import java.io.{ InputStream, InputStreamReader }
import java.nio.charset.Charset
import io.advantageous.boon.json.implementation.{ JsonFastParser, JsonParserUsingCharacterSource }
class Boon extends JsonParser {
private def newFastParser = new JsonFastParser(false, false, true, false)
def parse(bytes: Array[Byte], charset: Charset) = {
val parser = newFastParser
parser.setCharset(charset)
parser.parse(bytes)
}
def parse(string: String) =
newFastParser.parse(string)
def parse(stream: InputStream, charset: Charset) =
new JsonParserUsingCharacterSource().parse(new InputStreamReader(stream, charset))
}
| thkluge/gatling | gatling-core/src/main/scala/io/gatling/core/json/Boon.scala | Scala | apache-2.0 | 1,291 |
/**
* Copyright (C) 2016 Verizon. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.verizon.bda.trapezium.dal.spark.cassandra
import com.verizon.bda.trapezium.dal.sql.BaseSqlDAO
import org.apache.spark.sql._
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.types.StructType
/**
* Created by Faraz Waseem on 12/17/15.
* Cassandrao DAO is wrapper around spark connector which is high level interface to Cassandra
* It has defaults options which can be overwritten in spark conf options.
* like sc.conf.set("spark.cassandra.connection.host", "md-bdadev-54.verizon.com")
*
*/
class CassandraDAO(dbName: String, tableName: String,
hosts: List[String]) (implicit spark: SparkSession)
extends BaseSqlDAO(dbName, tableName) {
protected val schema = null;
var dbMap = Map("table" -> tableName, "keyspace" -> dbName)
/**
* The schema corresponding to the table <code>tableName</code> is returned.
*
* @return <code>StructType</code> containing the schema.
*/
override def getSchema: StructType = schema
def this (dbName: String, tableName: String,
hosts: List[String],
options: Map[String, String]) (implicit spark: SparkSession) {
this(dbName, tableName, hosts)
dbMap ++= options
}
/**
* Write the batch of records (eg. Rows) into the data store.
*
* @param data distributed collection of data to be persisted of type <code>DataFrame</code>
*/
override def write(data: DataFrame): Unit = {
// data.write.insertInto(dbName + "." + tableName)
data.write
.format("org.apache.spark.sql.cassandra")
.options(dbMap)
.mode(SaveMode.Append)
.save()
}
/**
* The requested list of columns are returned as batch.
*
* @param cols List of columns to return
* @return T distributed collection of data containing the requested columns.
*/
override def getColumns(cols: List[String]): DataFrame = {
val sqlText = constructSql(cols)
log.info(s"Executing query: $sqlText")
val df = spark.sqlContext.sql(sqlText)
df
}
def getOptions(): Map[String, String] = {
dbMap.toMap
}
/**
* All the columns from source are returned as batch.
*
* @return T distributed collection of data containing all columns.
*/
override def getAll(): DataFrame = {
val df = spark.sqlContext
.read
.format("org.apache.spark.sql.cassandra")
.options(dbMap)
.load()
df
}
/**
* It is helper method to create a host string from list like
* @param hosts
* @return
*/
private def createStringList( hosts: List[String]): String = {
val builder: StringBuilder = new StringBuilder
hosts.foreach((i: String) => {
builder.append(i); builder.append(" , ") })
val hostString: String = builder.toString();
return hostString.substring(0, hostString.lastIndexOf(","));
}
}
| Verizon/trapezium | sparkcassandra-connector/src/main/scala/com/verizon/bda/trapezium/dal/spark/cassandra/CassandraDAO.scala | Scala | apache-2.0 | 3,468 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rhttpc.transport.amqp
import akka.actor._
import rhttpc.utils.Agent
import com.github.ghik.silencer.silent
import com.rabbitmq.client.AMQP.Queue.DeclareOk
import com.rabbitmq.client.{AMQP, Channel, Connection}
import rhttpc.transport.SerializingPublisher.SerializedMessage
import rhttpc.transport._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Try
trait AmqpTransport extends PubSubTransport {
def queuesStats: Future[Map[String, AmqpQueueStats]]
}
// TODO: actor-based, connection recovery
private[rhttpc] class AmqpTransportImpl(connection: Connection,
prepareExchangeName: OutboundQueueData => String,
consumeTimeout: FiniteDuration,
nackDelay: FiniteDuration,
declarePublisherQueue: AmqpDeclareOutboundQueueData => DeclareOk,
declareSubscriberQueue: AmqpDeclareInboundQueueData => DeclareOk,
prepareProperties: PartialFunction[SerializedMessage, AMQP.BasicProperties])
(implicit actorSystem: ActorSystem) extends AmqpTransport {
import actorSystem.dispatcher
private lazy val statsChannel = connection.createChannel()
@silent private val queueNamesAgent = Agent[Set[String]](Set.empty)
override def publisher[PubMsg](queueData: OutboundQueueData)
(implicit serializer: Serializer[PubMsg]): AmqpPublisher[PubMsg] = {
val channel = connection.createChannel()
val exchangeName = prepareExchangeName(queueData)
declarePublisherQueue(AmqpDeclareOutboundQueueData(queueData, exchangeName, channel))
queueNamesAgent.send(_ + queueData.name)
val publisher = new AmqpPublisher[PubMsg](
channel = channel,
queueName = queueData.name,
exchangeName = exchangeName,
serializer = serializer,
prepareProperties = prepareProperties
)
channel.addConfirmListener(publisher)
channel.confirmSelect()
publisher
}
override def subscriber[SubMsg](queueData: InboundQueueData, consumer: ActorRef)
(implicit deserializer: Deserializer[SubMsg]): Subscriber[SubMsg] = {
val subscribers = (1 to queueData.parallelConsumers).map { _ =>
val channel = connection.createChannel()
declareSubscriberQueue(AmqpDeclareInboundQueueData(queueData, channel))
queueNamesAgent.send(_ + queueData.name)
new AmqpSubscriber[SubMsg](
channel = channel,
queueName = queueData.name,
consumer = consumer,
deserializer = deserializer,
consumeTimeout = consumeTimeout,
nackDelay = nackDelay
) with SendingSimpleMessage[SubMsg]
}
new SubscriberAggregate[SubMsg](subscribers)
}
override def fullMessageSubscriber[SubMsg](queueData: InboundQueueData, consumer: ActorRef)
(implicit deserializer: Deserializer[SubMsg]): Subscriber[SubMsg] = {
val subscribers = (1 to queueData.parallelConsumers).map { _ =>
val channel = connection.createChannel()
declareSubscriberQueue(AmqpDeclareInboundQueueData(queueData, channel))
new AmqpSubscriber[SubMsg](
channel = channel,
queueName = queueData.name,
consumer = consumer,
deserializer = deserializer,
consumeTimeout = consumeTimeout,
nackDelay = nackDelay
) with SendingFullMessage[SubMsg]
}
new SubscriberAggregate[SubMsg](subscribers)
}
override def queuesStats: Future[Map[String, AmqpQueueStats]] = {
queueNamesAgent.future().map { names =>
names.map { queueName =>
val dlqQueueName = AmqpDefaults.prepareDlqQueueName(queueName)
val stats = AmqpQueueStats(
messageCount = messageCount(queueName),
consumerCount = consumerCount(queueName),
dlqMessageCount = messageCount(dlqQueueName),
dlqConsumerCount = consumerCount(dlqQueueName)
)
queueName -> stats
}.toMap
}
}
private def messageCount(queueName: String): Long =
Try(statsChannel.messageCount(queueName)).getOrElse(0L)
private def consumerCount(queueName: String): Long =
Try(statsChannel.consumerCount(queueName)).getOrElse(0L)
override def stop(): Future[Unit] = Future.unit
}
case class AmqpQueueStats(messageCount: Long, consumerCount: Long, dlqMessageCount: Long, dlqConsumerCount: Long)
object AmqpQueueStats {
def zero = AmqpQueueStats(0, 0, 0, 0)
}
object AmqpTransport {
def apply[PubMsg <: AnyRef, SubMsg](connection: Connection,
prepareExchangeName: OutboundQueueData => String = AmqpDefaults.prepareExchangeName,
consumeTimeout: FiniteDuration = AmqpDefaults.consumeTimeout,
nackDelay: FiniteDuration = AmqpDefaults.nackDelay,
declarePublisherQueue: AmqpDeclareOutboundQueueData => DeclareOk = AmqpDefaults.declarePublisherQueueWithDelayedExchangeIfNeed,
declareSubscriberQueue: AmqpDeclareInboundQueueData => DeclareOk = AmqpDefaults.declareSubscriberQueue,
prepareProperties: PartialFunction[SerializedMessage, AMQP.BasicProperties] = AmqpDefaults.preparePersistentMessageProperties)
(implicit actorSystem: ActorSystem): AmqpTransport = {
new AmqpTransportImpl(
connection = connection,
prepareExchangeName = prepareExchangeName,
consumeTimeout = consumeTimeout,
nackDelay = nackDelay,
declarePublisherQueue = declarePublisherQueue,
declareSubscriberQueue = declareSubscriberQueue,
prepareProperties = AmqpDefaults.preparePersistentMessageProperties
)
}
}
case class AmqpDeclareInboundQueueData(queueData: InboundQueueData, channel: Channel)
case class AmqpDeclareOutboundQueueData(queueData: OutboundQueueData, exchangeName: String, channel: Channel) | arkadius/reliable-http-client | rhttpc-amqp/src/main/scala/rhttpc/transport/amqp/AmqpTransport.scala | Scala | apache-2.0 | 6,783 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io.{PrintStream, OutputStream, File}
import java.net.URI
import java.util.jar.Attributes.Name
import java.util.jar.{JarFile, Manifest}
import java.util.zip.ZipFile
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import com.google.common.io.Files
import org.apache.commons.io.FileUtils
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkFunSuite
import org.apache.spark.api.r.RUtils
import org.apache.spark.deploy.SparkSubmitUtils.MavenCoordinate
class RPackageUtilsSuite extends SparkFunSuite with BeforeAndAfterEach {
private val main = MavenCoordinate("a", "b", "c")
private val dep1 = MavenCoordinate("a", "dep1", "c")
private val dep2 = MavenCoordinate("a", "dep2", "d")
private def getJarPath(coord: MavenCoordinate, repo: File): File = {
new File(IvyTestUtils.pathFromCoordinate(coord, repo, "jar", useIvyLayout = false),
IvyTestUtils.artifactName(coord, useIvyLayout = false, ".jar"))
}
private val lineBuffer = ArrayBuffer[String]()
private val noOpOutputStream = new OutputStream {
def write(b: Int) = {}
}
/** Simple PrintStream that reads data into a buffer */
private class BufferPrintStream extends PrintStream(noOpOutputStream) {
// scalastyle:off println
override def println(line: String) {
// scalastyle:on println
lineBuffer += line
}
}
def beforeAll() {
System.setProperty("spark.testing", "true")
}
override def beforeEach(): Unit = {
lineBuffer.clear()
}
test("pick which jars to unpack using the manifest") {
val deps = Seq(dep1, dep2).mkString(",")
IvyTestUtils.withRepository(main, Some(deps), None, withR = true) { repo =>
val jars = Seq(main, dep1, dep2).map(c => new JarFile(getJarPath(c, new File(new URI(repo)))))
assert(RPackageUtils.checkManifestForR(jars(0)), "should have R code")
assert(!RPackageUtils.checkManifestForR(jars(1)), "should not have R code")
assert(!RPackageUtils.checkManifestForR(jars(2)), "should not have R code")
}
}
test("build an R package from a jar end to end") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
val deps = Seq(dep1, dep2).mkString(",")
IvyTestUtils.withRepository(main, Some(deps), None, withR = true) { repo =>
val jars = Seq(main, dep1, dep2).map { c =>
getJarPath(c, new File(new URI(repo)))
}.mkString(",")
RPackageUtils.checkAndBuildRPackage(jars, new BufferPrintStream, verbose = true)
val firstJar = jars.substring(0, jars.indexOf(","))
val output = lineBuffer.mkString("\\n")
assert(output.contains("Building R package"))
assert(output.contains("Extracting"))
assert(output.contains(s"$firstJar contains R source code. Now installing package."))
assert(output.contains("doesn't contain R source code, skipping..."))
}
}
test("jars that don't exist are skipped and print warning") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
val deps = Seq(dep1, dep2).mkString(",")
IvyTestUtils.withRepository(main, Some(deps), None, withR = true) { repo =>
val jars = Seq(main, dep1, dep2).map { c =>
getJarPath(c, new File(new URI(repo))) + "dummy"
}.mkString(",")
RPackageUtils.checkAndBuildRPackage(jars, new BufferPrintStream, verbose = true)
val individualJars = jars.split(",")
val output = lineBuffer.mkString("\\n")
individualJars.foreach { jarFile =>
assert(output.contains(s"$jarFile"))
}
}
}
test("faulty R package shows documentation") {
assume(RUtils.isRInstalled, "R isn't installed on this machine.")
IvyTestUtils.withRepository(main, None, None) { repo =>
val manifest = new Manifest
val attr = manifest.getMainAttributes
attr.put(Name.MANIFEST_VERSION, "1.0")
attr.put(new Name("Spark-HasRPackage"), "true")
val jar = IvyTestUtils.packJar(new File(new URI(repo)), dep1, Nil,
useIvyLayout = false, withR = false, Some(manifest))
RPackageUtils.checkAndBuildRPackage(jar.getAbsolutePath, new BufferPrintStream,
verbose = true)
val output = lineBuffer.mkString("\\n")
assert(output.contains(RPackageUtils.RJarDoc))
}
}
test("SparkR zipping works properly") {
val tempDir = Files.createTempDir()
try {
IvyTestUtils.writeFile(tempDir, "test.R", "abc")
val fakeSparkRDir = new File(tempDir, "SparkR")
assert(fakeSparkRDir.mkdirs())
IvyTestUtils.writeFile(fakeSparkRDir, "abc.R", "abc")
IvyTestUtils.writeFile(fakeSparkRDir, "DESCRIPTION", "abc")
IvyTestUtils.writeFile(tempDir, "package.zip", "abc") // fake zip file :)
val fakePackageDir = new File(tempDir, "packageTest")
assert(fakePackageDir.mkdirs())
IvyTestUtils.writeFile(fakePackageDir, "def.R", "abc")
IvyTestUtils.writeFile(fakePackageDir, "DESCRIPTION", "abc")
val finalZip = RPackageUtils.zipRLibraries(tempDir, "sparkr.zip")
assert(finalZip.exists())
val entries = new ZipFile(finalZip).entries().asScala.map(_.getName).toSeq
assert(entries.contains("/test.R"))
assert(entries.contains("/SparkR/abc.R"))
assert(entries.contains("/SparkR/DESCRIPTION"))
assert(!entries.contains("/package.zip"))
assert(entries.contains("/packageTest/def.R"))
assert(entries.contains("/packageTest/DESCRIPTION"))
} finally {
FileUtils.deleteDirectory(tempDir)
}
}
}
| pronix/spark | core/src/test/scala/org/apache/spark/deploy/RPackageUtilsSuite.scala | Scala | apache-2.0 | 6,337 |
package com.caibowen.prma.webface
import java.io.IOException
import java.sql.ResultSet
import ch.qos.logback.classic.Level
import ch.qos.logback.classic.spi.LoggingEvent
import com.caibowen.prma.api.LogLevel
import com.caibowen.prma.api.model.EventVO
import com.caibowen.prma.logger.logback.LogbackEventAdaptor
import org.slf4j.{MDC, MarkerFactory, LoggerFactory}
/**
* Created by Bowen Cai on 1/5/2015.
*/
class _Mock {
val LOG = LoggerFactory.getLogger(classOf[SearchEngine])
def gen(mk: Boolean, except: Boolean) = {
val exp = new RuntimeException("msg level 3", new IOException("msg level 2"))//, new FileNotFoundException("msg level 1")))
val mk1 = MarkerFactory.getMarker("marker 1")
val mk2 = MarkerFactory.getMarker("marker 2")
mk1.add(mk2)
val mdc1: String = "test mdc 1"
MDC.put(mdc1, "hahaha")
MDC.put("test mdc 2", "hahaha222")
MDC.put("test mdc 3", "wowowo")
// MDC.clear()
val lbEvent = new LoggingEvent("fmt scala logging store test",
LOG.asInstanceOf[ch.qos.logback.classic.Logger],
Level.DEBUG,
"scala logging store test", if (except) exp else null, null)
if (mk)
lbEvent.setMarker(mk1)
val fadapt = new LogbackEventAdaptor
fadapt.from(lbEvent)
}
def _test =
List(
gen(true, false),
// gen(false, true),
gen(false, false)
)
}
| xkommando/PRMA | webface/src/main/scala/com/caibowen/prma/webface/_Mock.scala | Scala | lgpl-3.0 | 1,370 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.common
import java.util.concurrent.TimeUnit.{HOURS, MICROSECONDS}
import org.scalatest.{FunSuite, Matchers}
class DependenciesTest extends FunSuite with Matchers {
val dl1 = DependencyLink("Gizmoduck", "tflock", 4)
val dl2 = DependencyLink("mobileweb", "Gizmoduck", 4)
val dl3 = DependencyLink("tfe", "mobileweb", 2)
val dl4 = DependencyLink("tfe", "mobileweb", 4)
val deps1 = Dependencies(0L, MICROSECONDS.convert(1, HOURS), List(dl1, dl3))
val deps2 = Dependencies(MICROSECONDS.convert(1, HOURS), MICROSECONDS.convert(2, HOURS), List(dl2, dl4))
test("identity on Dependencies.zero") {
deps1 + Dependencies.zero should be(deps1)
Dependencies.zero + deps1 should be(deps1)
}
test("sums where parent/child match") {
val result = deps1 + deps2
result.startTs should be(deps1.startTs)
result.endTs should be(deps2.endTs)
result.links.sortBy(_.parent) should be(Seq(
dl1,
dl2,
dl3.copy(callCount = dl3.callCount + dl4.callCount)
))
}
}
| jfeltesse-mdsol/zipkin | zipkin-common/src/test/scala/com/twitter/zipkin/common/DependenciesTest.scala | Scala | apache-2.0 | 1,637 |
// Copyright 2012 Twitter, Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.twitter.cassie
import com.twitter.util.{ Future, Promise }
import java.util.{ Map => JMap, List => JList, ArrayList => JArrayList }
import org.apache.cassandra.finagle.thrift
import scala.collection.JavaConversions._
import scala.collection.mutable.Buffer
/**
* Async iteration across the columns for a given key.
*
* EXAMPLE
* val cf = new Cluster("127.0.0.1").keyspace("foo")
* .connect().columnFamily("bar", Utf8Codec, Utf8Codec, Utf8Codec)
*
* val done = cf.columnsIteratee.foreach("bam").foreach {col =>
* println(col) // this function is executed asynchronously for each column
* }
* done() // this is a Future[Unit] that will be satisfied when the iteration
* // is done
*/
trait ColumnsIteratee[Key, Name, Value] {
def hasNext(): Boolean
def next(): Future[ColumnsIteratee[Key, Name, Value]]
def foreach(f: Column[Name, Value] => Unit): Future[Unit] = {
val p = new Promise[Unit]
next map (_.visit(p, f)) handle { case e => p.setException(e) }
p
}
def map[A](f: Column[Name, Value] => A): Future[Seq[A]] = {
val buffer = Buffer.empty[A]
foreach { column =>
buffer.append(f(column))
}.map { _ => buffer }
}
def visit(p: Promise[Unit], f: Column[Name, Value] => Unit): Unit
}
object ColumnsIteratee {
def apply[Key, Name, Value](cf: ColumnFamily[Key, Name, Value], key: Key,
start: Option[Name], end: Option[Name], batchSize: Int,
limit: Int, order: Order = Order.Normal) = {
new InitialColumnsIteratee(cf, key, start, end, batchSize, limit, order)
}
}
private[cassie] class InitialColumnsIteratee[Key, Name, Value](
val cf: ColumnFamily[Key, Name, Value], key: Key, start: Option[Name], end: Option[Name],
batchSize: Int, remaining: Int, order: Order) extends ColumnsIteratee[Key, Name, Value] {
def hasNext() = true
def next() = {
// if limit < batchSize
val fetchSize = math.min(batchSize, remaining)
cf.getRowSlice(key, start, end, fetchSize, order).map { buf =>
if (buf.size < batchSize || batchSize == remaining) {
new FinalColumnsIteratee(buf)
} else {
new SubsequentColumnsIteratee(cf, key, batchSize, buf.last.name, end, remaining - buf.size, order, buf)
}
}
}
def visit(p: Promise[Unit], f: Column[Name, Value] => Unit) {
throw new UnsupportedOperationException("no need to visit the initial Iteratee")
}
}
private[cassie] class SubsequentColumnsIteratee[Key, Name, Value](val cf: ColumnFamily[Key, Name, Value],
val key: Key, val batchSize: Int, val start: Name, val end: Option[Name],
val remaining: Int, val order: Order, val buffer: JList[Column[Name, Value]])
extends ColumnsIteratee[Key, Name, Value] {
def hasNext = true
def next() = {
val fetchSize = math.min(batchSize + 1, remaining + 1)
cf.getRowSlice(key, Some(start), end, fetchSize, order).map { buf =>
val skipped = buf.subList(1, buf.length)
if (skipped.size() < batchSize || batchSize == remaining) {
new FinalColumnsIteratee(skipped)
} else {
new SubsequentColumnsIteratee(cf, key, batchSize, skipped.last.name, end, remaining - skipped.size, order, skipped)
}
}
}
def visit(p: Promise[Unit], f: Column[Name, Value] => Unit) {
for (c <- buffer) {
f(c)
}
if (hasNext) {
next map (_.visit(p, f)) handle { case e => p.setException(e) }
} else {
p.setValue(Unit)
}
}
}
private[cassie] class FinalColumnsIteratee[Key, Name, Value](val buffer: JList[Column[Name, Value]])
extends ColumnsIteratee[Key, Name, Value] {
def hasNext = false
def next = Future.exception(new UnsupportedOperationException("no next for the final iteratee"))
def visit(p: Promise[Unit], f: Column[Name, Value] => Unit) {
for (c <- buffer) {
f(c)
}
p.setValue(Unit)
}
}
| travisbrown/zipkin | zipkin-cassandra/src/main/scala/com/twitter/cassie/ColumnsIteratee.scala | Scala | apache-2.0 | 4,420 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.spark.hbase.example
import org.apache.spark.SparkContext
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.client.Put
import org.apache.spark.SparkConf
import com.cloudera.spark.hbase.HBaseContext
object HBaseBulkPutTimestampExample {
def main(args: Array[String]) {
if (args.length == 0) {
System.out.println("HBaseBulkPutTimestampExample {tableName} {columnFamily}");
return ;
}
val tableName = args(0);
val columnFamily = args(1);
val sparkConf = new SparkConf().setAppName("HBaseBulkPutTimestampExample " + tableName + " " + columnFamily)
val sc = new SparkContext(sparkConf)
val rdd = sc.parallelize(Array(
(Bytes.toBytes("6"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("1")))),
(Bytes.toBytes("7"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("2")))),
(Bytes.toBytes("8"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("3")))),
(Bytes.toBytes("9"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("4")))),
(Bytes.toBytes("10"), Array((Bytes.toBytes(columnFamily), Bytes.toBytes("1"), Bytes.toBytes("5"))))));
val conf = HBaseConfiguration.create();
conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
val timeStamp = System.currentTimeMillis()
val hbaseContext = new HBaseContext(sc, conf);
hbaseContext.bulkPut[(Array[Byte], Array[(Array[Byte], Array[Byte], Array[Byte])])](rdd,
tableName,
(putRecord) => {
val put = new Put(putRecord._1)
putRecord._2.foreach((putValue) => put.add(putValue._1, putValue._2, timeStamp, putValue._3))
put
},
true);
}
} | lgscofield/SparkOnHBase | src/main/scala/com/cloudera/spark/hbase/example/HBaseBulkPutTimestampExample.scala | Scala | apache-2.0 | 2,719 |
package model
trait PluginComponent extends TemplateComponent { self: Profile =>
import profile.simple._
import self._
lazy val Plugins = TableQuery[Plugins]
class Plugins(tag: Tag) extends Table[Plugin](tag, "PLUGIN"){
val pluginId = column[String]("PLUGIN_ID", O PrimaryKey)
val version = column[String]("VERSION")
def * = (pluginId, version) <> (Plugin.tupled, Plugin.unapply)
}
}
case class Plugin(
pluginId: String,
version: String
)
| mqshen/gitbucketTest | src/main/scala/model/Plugin.scala | Scala | apache-2.0 | 469 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.cpukernels
import cogx.platform.checkpoint.{ObjectSaver, Saveable}
import cogx.platform.types.KernelTypes.ActuatorKernelType
import cogx.platform.types.{Pixel, VirtualFieldRegister, FieldType, AbstractKernel}
import cogx.platform.opencl.{OpenCLCpuKernel, OpenCLFieldRegister}
import cogx.compiler.parser.op._
import cogx.platform.cpumemory.ColorFieldMemory
/**
* The kernel to handle unpipelined actuators.
*
* @author Dick Carter
*/
private[cogx]
class UnpipelinedColorActuatorKernel(in: VirtualFieldRegister, op: UnpipelinedColorActuatorOp,
actuatorClassname: String, restoreParameters: () => String)
extends OpenCLCpuKernel(op, Array(in), Array[FieldType]())
with Saveable
{
/** The type of the kernel, either DeviceKernel, or one of a number of CPU kernel types. */
override val kernelType = ActuatorKernelType
/** The version of the kernel. */
def majorVersion = 1
/** The version of the kernel. */
def minorVersion = 0
/** Invoke the user-supplied resetHook upon reset */
override def reset() {
op.resetHook()
}
def compute(in: Array[OpenCLFieldRegister], out: Array[OpenCLFieldRegister]) {
// Read the GPU data to the CPU buffer
val cpuMemory = in(0).slave.read.asInstanceOf[ColorFieldMemory]
// Pass the data to the user-defined `nextOutput` function.
op.newOutput(cpuMemory.iterator)
}
/** Create a clone of this kernel that uses a new set of virtual field registers
* as inputs. Useful for breaking a large circuit apart into smaller subcircuits. */
def copyWithNewInputs(inputs: Array[VirtualFieldRegister]): AbstractKernel = {
// Note that we intentionally do NOT copy the old value of 'source' here.
// These copy methods exist to support cloning kernels from a large circuit
// into several smaller subcircuits. There's probably a clone of the
// original actuator driver somewhere, and that's probably what this copy
// needs to be hooked up to.
new UnpipelinedColorActuatorKernel(in, op, actuatorClassname, restoreParameters)
}
/** Save this instance using the facilities of the ObjectSaver */
override def save(saver: ObjectSaver): Unit = {
super.save(saver)
saver.writeInt("majorVersion", majorVersion)
saver.writeInt("minorVersion", minorVersion)
saver.writeString("actuatorClassname", actuatorClassname)
saver.writeString("restoreParameters", restoreParameters())
}
}
| hpe-cct/cct-core | src/main/scala/cogx/compiler/codegenerator/opencl/cpukernels/UnpipelinedColorActuatorKernel.scala | Scala | apache-2.0 | 3,131 |
package gh.test.gh2013.event
import gh2013.events.{GollumEventParser}
import net.liftweb.json._
import org.scalatest.{FlatSpec, Matchers}
class GollumEventTest extends FlatSpec with Matchers
{
"A valid GollumEvent" must "be correctly parsed" in {
val json = parse(
"""
| {
| "actor":"hudec",
| "public":true,
| "created_at":"2013-01-01T01:37:00-08:00",
| "type":"GollumEvent",
| "payload":{
| "pages":[
| {
| "title":"Home",
| "action":"edited",
| "html_url":"https://github.com/hudec/sql-processor/wiki/Home",
| "page_name":"Home",
| "sha":"b5cfd5f62ef7f76abc82495f5e9d3d146b6d7a4d",
| "summary":null
| }
| ]
| },
| "repository":{
| "watchers":8,
| "has_wiki":true,
| "owner":"hudec",
| "created_at":"2011-04-09T09:55:26-07:00",
| "homepage":"",
| "stargazers":8,
| "open_issues":6,
| "pushed_at":"2013-01-01T01:26:32-08:00",
| "url":"https://github.com/hudec/sql-processor",
| "description":"The SQL Processor is an engine producing the ANSI SQL statements and providing their execution without the necessity to write Java plumbing code related to the ORM or JDBC API.",
| "forks":1,
| "has_issues":true,
| "fork":false,
| "size":728,
| "has_downloads":true,
| "name":"sql-processor",
| "language":"Java",
| "id":1592013,
| "private":false
| },
|
| "actor_attributes":{
| "blog":"http://www.linkedin.com/pub/vladim%C3%ADr-hudec/17/477/11",
| "login":"hudec",
| "location":"Prague",
| "gravatar_id":"5bbd09833ad88520dd73b3dbd7cc07aa",
| "type":"User",
| "name":"Vladimír Hudec",
| "email":"[email protected]"
| },
| "url":"https://github.com/hudec/sql-processor/wiki/Home"
|
|}
""".stripMargin)
gh2013.parser(GollumEventParser)(json) shouldBe 'defined
}
"Another valid GollumEvent" must "be correctly parsed" in {
val json = parse(
"""
| {
|
| "actor":"firepick1",
| "public":true,
| "type":"GollumEvent",
| "url":"https://github.com/firepick1/FirePick/wiki/Inventables",
| "repository":{
| "watchers":0,
| "owner":"firepick1",
| "created_at":"2012-12-31T20:46:53-08:00",
| "stargazers":0,
| "open_issues":0,
| "has_issues":true,
| "has_wiki":true,
| "pushed_at":"2012-12-31T20:46:53-08:00",
| "url":"https://github.com/firepick1/FirePick",
| "description":"www.firepick.org GitHub ",
| "forks":0,
| "fork":false,
| "size":0,
| "name":"FirePick",
| "id":7393229,
| "private":false,
| "has_downloads":true
| },
| "actor_attributes":{
| "gravatar_id":"268127beb163e9ba4a6c17975f30d0f0",
| "login":"firepick1",
| "type":"User"
| },
| "payload":{
| "pages":[
| {
| "title":"Inventables",
| "summary":null,
| "sha":"26e67c124d06ff4c030e84007efb8885b6bd6879",
| "page_name":"Inventables",
| "html_url":"https://github.com/firepick1/FirePick/wiki/Inventables",
| "action":"created"
| }
| ]
| },
| "created_at":"2013-01-01T00:14:49-08:00"
|
|}
""".stripMargin)
gh2013.parser(GollumEventParser)(json) shouldBe 'defined
}
}
| mgoeminne/github_etl | src/test/scala/gh/test/gh2013/event/GollumEventTest.scala | Scala | mit | 4,599 |
package com.stulsoft.ysps.peither
import scala.io.StdIn
/**
* Playing with Either.
*
* Convention dictates that Left is used for failure and Right is used for success.
*
* Created by Yuriy Stul on 9/14/2016.
*/
object EitherSample {
def main(args: Array[String]): Unit = {
println("==>main")
test1()
test2()
test3()
println("<==main")
}
def test1(): Unit = {
println("==>test1")
val in = StdIn.readLine("Type Either a string or an Int: ")
val result: Either[String, Int] = try {
Right(in.toInt)
} catch {
case _: Exception =>
Left(in)
}
println(result match {
case Right(x) => "You passed me the Int: " + x + ", which I will increment. " + x + " + 1 = " + (x + 1)
case Left(x) => "You passed me the String: " + x
})
println("<==test1")
}
def test2(): Unit = {
println("==>test2")
val l: Either[String, Int] = Left("flower")
val r: Either[String, Int] = Right(12)
println(s"l.left = ${l.left}")
println(s"l.right = $l")
println(s"r.left = ${r.left}")
println(s"r.right = $r")
println("====================")
println(s"l.left.map(_.size): Either[Int, Int]=${l.left.map(_.length): Either[Int, Int]}")
println(s"l.right.map(_.toDouble): Either[String, Double]=${l.map(_.toDouble): Either[String, Double]}")
println(s"r.left.map(_.size): Either[Int, Int]=${r.left.map(_.length): Either[Int, Int]}")
println(s"r.right.map(_.toDouble): Either[String, Double]=${r.map(_.toDouble): Either[String, Double]}")
println("<==test2")
}
def test3(): Unit ={
println("==>test3")
println(s"""validate("") is ${validate("")}""")
println(s"""validate("abc") is ${validate("abc")}""")
println("<==test3")
}
def validate(t: String): Either[String, String] = {
if (t.nonEmpty) Right(t) else Left("***ERROR")
}
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/peither/EitherSample.scala | Scala | mit | 1,887 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.scalatest._
import scala.collection.mutable.Stack
import scala.collection.mutable
import org.scalatest.events._
import scala.util.Random
import scala.reflect.NameTransformer
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.exceptions.TestFailedException
class UnitedStates extends Suite {
import UnitedStates.allStates
import UnitedStates.nestedSuiteCount
override def nestedSuites: collection.immutable.IndexedSeq[Suite] = allStates.take(nestedSuiteCount).toIndexedSeq
override def run(testName: Option[String], args: Args): org.scalatest.Status = {
if (nestedSuiteCount < allStates.length)
nestedSuiteCount += 1
super.run(testName, args)
}
}
trait StateSuite extends Suite {
import StateSuite.allTestNames
import StateSuite.testCounts
import StateSuite.testStatuses
private def anInitialDuration = Random.nextInt(20)
private val simpleName = getClass.getSimpleName.replaceAll("\\\\$", "")
override def testNames: Set[String] = allTestNames.take(testCounts(simpleName)).toSet
override def tags: Map[String, Set[String]] = Map()
override def run(testName: Option[String], args: Args): org.scalatest.Status = {
val testCount = testCounts(simpleName)
if (testCount < allTestNames.length) {
testCounts(simpleName) += 1
// The new test is at testCount; Decide if it is pending, and if so, how many times it will stay pending
// whether or not it will be pending should be a 50/50 choice
val nameOfNewTest = allTestNames(testCount)
val isPending = Random.nextInt(2) == 0
testStatuses(simpleName)(nameOfNewTest) =
if (isPending) Pending(anInitialDuration, Random.nextInt(30)) // Maximum of 30 times pending before going to some other status
else Succeeded(anInitialDuration)
}
val alterSucceededTests = Random.nextInt(3) == 0
if (alterSucceededTests) {
val nameOfTestToAlter = allTestNames(Random.nextInt(testCounts(simpleName)) )
testStatuses(simpleName)(nameOfTestToAlter) match {
case Succeeded(duration) =>
val isIgnored = Random.nextInt(2) == 0
val isCanceled = !isIgnored && (Random.nextInt(2) == 0) // If not ignored or canceled, then make it failed
val remaining = Random.nextInt(if (isIgnored) 20 else 15)
testStatuses(simpleName)(nameOfTestToAlter) =
if (isIgnored) Ignored(duration, remaining) else if (isCanceled) Canceled(duration, remaining) else Failed(duration, remaining)
case _ =>
}
}
val shouldSlowATest = Random.nextInt(50) == 0
if (shouldSlowATest) {
val nameOfTestToSlow = allTestNames(Random.nextInt(testCounts(simpleName)))
val slowFactor = Random.nextInt(25)
val slowerStatus =
testStatuses(simpleName)(nameOfTestToSlow) match {
case Succeeded(d) => Succeeded(d * slowFactor)
case Pending(d, r) => Pending(d * slowFactor, r)
case Canceled(d, r) => Canceled(d * slowFactor, r)
case Ignored(d, r) => Ignored(d * slowFactor, r)
case Failed(d, r) => Failed(d * slowFactor, r)
}
testStatuses(simpleName)(nameOfTestToSlow) = slowerStatus
}
super.run(testName, args)
}
private def reportTestStarting(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, suiteRerunner: Option[String], location: Option[Location]) {
report(TestStarting(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), testName, testText, Some(MotionToSuppress),
location, suiteRerunner))
}
private def reportTestFailed(theSuite: Suite, report: Reporter, throwable: Throwable, testName: String, testText: String,
suiteRerunner: Option[String], tracker: Tracker, duration: Long, level: Int, includeIcon: Boolean, location: Option[Location]) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testText, level, includeIcon)
report(TestFailed(tracker.nextOrdinal(), message, theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), testName, testText, Vector.empty, Some(throwable), Some(duration), Some(formatter), location, suiteRerunner))
}
private def reportTestCanceled(theSuite: Suite, report: Reporter, throwable: Throwable, testName: String, testText: String,
rerunnable: Option[Rerunner], tracker: Tracker, duration: Long, level: Int, includeIcon: Boolean, location: Option[Location]) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testText, level, includeIcon)
report(TestCanceled(tracker.nextOrdinal(), message, theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), testName, testText, Vector.empty, Some(throwable), Some(duration), Some(formatter), location, rerunnable))
}
private def reportTestSucceeded(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, duration: Long, formatter: Formatter, suiteRerunner: Option[String], location: Option[Location]) {
report(TestSucceeded(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), testName, testText, Vector.empty, Some(duration), Some(formatter),
location, suiteRerunner))
}
private def reportTestPending(theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, testText: String, duration: Long, formatter: Formatter, location: Option[Location]) {
report(TestPending(tracker.nextOrdinal(), theSuite.suiteName, theSuite.suiteId, Some(theSuite.getClass.getName), testName, testText, Vector.empty, Some(duration), Some(formatter),
location))
}
private def getMessageForException(e: Throwable): String =
if (e.getMessage != null)
e.getMessage
else
e.getClass.getName + " was thrown"
private def getIndentedText(testText: String, level: Int, includeIcon: Boolean) = {
val formattedText =
if (includeIcon) {
val testSucceededIcon = "-"
(" " * (if (level == 0) 0 else (level - 1))) + testSucceededIcon + " " + testText
}
else {
(" " * level) + testText
}
IndentedText(formattedText, testText, level)
}
def indentation(level: Int) = " " * level
private def reportTestIgnored(report: Reporter, tracker: Tracker, testName: String, testText: String, level: Int) {
val testSucceededIcon = "-"
val formattedText = indentation(level - 1) + (testSucceededIcon + " " + testText)
report(TestIgnored(tracker.nextOrdinal(), suiteName, suiteId, Some(getClass.getName), testName, testText, Some(IndentedText(formattedText, testText, level)),
None))
}
private def handleFailedTest(throwable: Throwable, testName: String,
report: Reporter, tracker: Tracker, duration: Long, location: Option[Location]) {
val message = getMessageForException(throwable)
val formatter = getIndentedText(testName, 1, true)
report(TestFailed(tracker.nextOrdinal(), message, suiteName, suiteId, Some(getClass.getName), testName, testName, Vector.empty, Some(throwable), Some(duration), Some(formatter), location, None))
}
override def runTest(testName: String, args: Args): org.scalatest.Status = {
if (!testStatuses(simpleName)(testName).isInstanceOf[Ignored])
reportTestStarting(this, args.reporter, args.tracker, testName, testName, None, None)
val formatter = getIndentedText(testName, 1, true)
testStatuses(simpleName)(testName) match {
case Pending(duration, remaining) =>
if (remaining > 1)
testStatuses(simpleName)(testName) = Pending(duration, remaining - 1)
else
testStatuses(simpleName)(testName) = Succeeded(duration)
reportTestPending(this, args.reporter, args.tracker, testName, testName, duration, formatter, None)
org.scalatest.SucceededStatus
case Ignored(duration, remaining) =>
if (remaining > 1)
testStatuses(simpleName)(testName) = Ignored(duration, remaining - 1)
else
testStatuses(simpleName)(testName) = Succeeded(duration)
reportTestIgnored(args.reporter, args.tracker, testName, testName, 1)
org.scalatest.SucceededStatus
case Canceled(duration, remaining) =>
if (remaining > 1)
testStatuses(simpleName)(testName) = Canceled(duration, remaining - 1)
else
testStatuses(simpleName)(testName) = Succeeded(duration)
val e = intercept[TestCanceledException] { cancel("Because of rain") }
val message = getMessageForException(e)
val formatter = getIndentedText(testName, 1, true)
args.reporter(TestCanceled(args.tracker.nextOrdinal(), message, suiteName, suiteId, Some(getClass.getName), testName, testName, Vector.empty, Some(e), Some(duration), Some(formatter), None, None))
org.scalatest.SucceededStatus
case Failed(duration, remaining) =>
if (remaining > 1)
testStatuses(simpleName)(testName) = Failed(duration, remaining - 1)
else
testStatuses(simpleName)(testName) = Succeeded(duration)
val e = intercept[TestFailedException] { fail("1 + 1 did not equal 3, even for very large values of 1") }
handleFailedTest(e, testName, args.reporter, args.tracker, duration, None)
org.scalatest.FailedStatus
case Succeeded(duration) =>
reportTestSucceeded(this, args.reporter, args.tracker, testName, testName, duration, formatter, None, None)
org.scalatest.SucceededStatus
}
}
}
sealed abstract class Status {
val duration: Int
}
case class Pending(duration: Int, remaining: Int) extends Status
case class Succeeded(duration: Int) extends Status
case class Canceled(duration: Int, remaining: Int) extends Status
case class Ignored(duration: Int, remaining: Int) extends Status
case class Failed(duration: Int, remaining: Int) extends Status
object StateSuite {
val testCounts =
mutable.Map(
"Alabama" -> 0,
"Alaska" -> 0,
"Arizona" -> 0,
"Arkansas" -> 0,
"California" -> 0,
"Colorado" -> 0,
"Connecticut" -> 0,
"Delaware" -> 0,
"Florida" -> 0,
"Georgia" -> 0,
"Hawaii" -> 0,
"Idaho" -> 0,
"Illinois" -> 0,
"Indiana" -> 0,
"Iowa" -> 0,
"Kansas" -> 0,
"Kentucky" -> 0,
"Louisiana" -> 0,
"Maine" -> 0,
"Maryland" -> 0,
"Massachusetts" -> 0,
"Michigan" -> 0,
"Minnesota" -> 0,
"Mississippi" -> 0,
"Missouri" -> 0,
"Montana" -> 0,
"Nebraska" -> 0,
"Nevada" -> 0,
"NewHampshire" -> 0,
"NewJersey" -> 0,
"NewMexico" -> 0,
"NewYork" -> 0,
"NorthCarolina" -> 0,
"NorthDakota" -> 0,
"Ohio" -> 0,
"Oklahoma" -> 0,
"Oregon" -> 0,
"Pennsylvania" -> 0,
"RhodeIsland" -> 0,
"SouthCarolina" -> 0,
"SouthDakota" -> 0,
"Tennessee" -> 0,
"Texas" -> 0,
"Utah" -> 0,
"Vermont" -> 0,
"Virginia" -> 0,
"Washington" -> 0,
"WestVirginia" -> 0,
"Wisconsin" -> 0,
"Wyoming" -> 0
)
val testStatuses =
mutable.Map(
"Alabama" -> mutable.Map.empty[String, Status],
"Alaska" -> mutable.Map.empty[String, Status],
"Arizona" -> mutable.Map.empty[String, Status],
"Arkansas" -> mutable.Map.empty[String, Status],
"California" -> mutable.Map.empty[String, Status],
"Colorado" -> mutable.Map.empty[String, Status],
"Connecticut" -> mutable.Map.empty[String, Status],
"Delaware" -> mutable.Map.empty[String, Status],
"Florida" -> mutable.Map.empty[String, Status],
"Georgia" -> mutable.Map.empty[String, Status],
"Hawaii" -> mutable.Map.empty[String, Status],
"Idaho" -> mutable.Map.empty[String, Status],
"Illinois" -> mutable.Map.empty[String, Status],
"Indiana" -> mutable.Map.empty[String, Status],
"Iowa" -> mutable.Map.empty[String, Status],
"Kansas" -> mutable.Map.empty[String, Status],
"Kentucky" -> mutable.Map.empty[String, Status],
"Louisiana" -> mutable.Map.empty[String, Status],
"Maine" -> mutable.Map.empty[String, Status],
"Maryland" -> mutable.Map.empty[String, Status],
"Massachusetts" -> mutable.Map.empty[String, Status],
"Michigan" -> mutable.Map.empty[String, Status],
"Minnesota" -> mutable.Map.empty[String, Status],
"Mississippi" -> mutable.Map.empty[String, Status],
"Missouri" -> mutable.Map.empty[String, Status],
"Montana" -> mutable.Map.empty[String, Status],
"Nebraska" -> mutable.Map.empty[String, Status],
"Nevada" -> mutable.Map.empty[String, Status],
"NewHampshire" -> mutable.Map.empty[String, Status],
"NewJersey" -> mutable.Map.empty[String, Status],
"NewMexico" -> mutable.Map.empty[String, Status],
"NewYork" -> mutable.Map.empty[String, Status],
"NorthCarolina" -> mutable.Map.empty[String, Status],
"NorthDakota" -> mutable.Map.empty[String, Status],
"Ohio" -> mutable.Map.empty[String, Status],
"Oklahoma" -> mutable.Map.empty[String, Status],
"Oregon" -> mutable.Map.empty[String, Status],
"Pennsylvania" -> mutable.Map.empty[String, Status],
"RhodeIsland" -> mutable.Map.empty[String, Status],
"SouthCarolina" -> mutable.Map.empty[String, Status],
"SouthDakota" -> mutable.Map.empty[String, Status],
"Tennessee" -> mutable.Map.empty[String, Status],
"Texas" -> mutable.Map.empty[String, Status],
"Utah" -> mutable.Map.empty[String, Status],
"Vermont" -> mutable.Map.empty[String, Status],
"Virginia" -> mutable.Map.empty[String, Status],
"Washington" -> mutable.Map.empty[String, Status],
"WestVirginia" -> mutable.Map.empty[String, Status],
"Wisconsin" -> mutable.Map.empty[String, Status],
"Wyoming" -> mutable.Map.empty[String, Status]
)
val allTestNames =
Vector(
"When in the Course of human events",
"it becomes necessary for one people to dissolve the political bands which have connected them with another",
"and to assume among the powers of the earth",
"the separate and equal station to which the Laws of Nature and of Nature's God entitle them",
"a decent respect to the opinions of mankind requires that they should declare the causes which impel them to the separation",
"We hold these truths to be self-evident",
"that all men are created equal",
"that they are endowed by their Creator with certain unalienable Rights",
"that among these are Life, Liberty and the pursuit of Happiness.",
"That to secure these rights",
"Governments are instituted among Men",
"deriving their just powers from the consent of the governed",
"That whenever any Form of Government becomes destructive of these ends",
"it is the Right of the People to alter or to abolish it",
"and to institute new Government",
"laying its foundation on such principles and organizing its powers in such form",
"as to them shall seem most likely to effect their Safety and Happiness.",
"Prudence, indeed, will dictate that Governments long established should not be changed for light and transient causes",
"and accordingly all experience hath shewn",
"that mankind are more disposed to suffer",
"while evils are sufferable",
"than to right themselves by abolishing the forms to which they are accustomed",
"But when a long train of abuses and usurpations",
"pursuing invariably the same Object evinces a design to reduce them under absolute Despotism",
"it is their right",
"it is their duty",
"to throw off such Government",
"and to provide new Guards for their future security",
"Such has been the patient sufferance of these Colonies",
"and such is now the necessity which constrains them to alter their former Systems of Government",
"The history of the present King of Great Britain is a history of repeated injuries and usurpations",
"all having in direct object the establishment of an absolute Tyranny over these States",
"To prove this, let Facts be submitted to a candid world.",
"He has refused his Assent to Laws, the most wholesome and necessary for the public good",
"He has forbidden his Governors to pass Laws of immediate and pressing importance",
"unless suspended in their operation till his Assent should be obtained",
"and when so suspended, he has utterly neglected to attend to them",
"He has refused to pass other Laws for the accommodation of large districts of people",
"unless those people would relinquish the right of Representation in the Legislature",
"a right inestimable to them and formidable to tyrants only ",
"He has called together legislative bodies at places unusual, uncomfortable, and distant from the depository of their public Records",
"for the sole purpose of fatiguing them into compliance with his measures ",
"He has dissolved Representative Houses repeatedly",
"for opposing with manly firmness his invasions on the rights of the people.",
"He has refused for a long time, after such dissolutions, to cause others to be elected",
"whereby the Legislative powers, incapable of Annihilation, have returned to the People at large for their exercise",
"the State remaining in the mean time exposed to all the dangers of invasion from without, and convulsions within.",
"He has endeavoured to prevent the population of these States",
"for that purpose obstructing the Laws for Naturalization of Foreigners",
"refusing to pass others to encourage their migrations hither",
"and raising the conditions of new Appropriations of Lands",
"He has obstructed the Administration of Justice",
"by refusing his Assent to Laws for establishing Judiciary powers.",
"He has made Judges dependent on his Will alone",
"for the tenure of their offices",
"and the amount and payment of their salaries.",
"He has erected a multitude of New Offices",
"and sent hither swarms of Officers to harrass our people, and eat out their substance.",
"He has kept among us, in times of peace, Standing Armies without the Consent of our legislatures",
"He has affected to render the Military independent of and superior to the Civil power.",
"He has combined with others to subject us to a jurisdiction foreign to our constitution, and unacknowledged by our laws",
"giving his Assent to their Acts of pretended Legislation:",
"For Quartering large bodies of armed troops among us",
"For protecting them, by a mock Trial, from punishment for any Murders which they should commit on the Inhabitants of these States",
"For cutting off our Trade with all parts of the world",
"For imposing Taxes on us without our Consent:",
"For depriving us in many cases, of the benefits of Trial by Jury",
"For transporting us beyond Seas to be tried for pretended offences",
"For abolishing the free System of English Laws in a neighbouring Province",
"establishing therein an Arbitrary government",
"and enlarging its Boundaries so as to render it at once an example and fit instrument for introducing the same absolute rule into these Colonies",
"For taking away our Charters, abolishing our most valuable Laws, and altering fundamentally the Forms of our Governments",
"For suspending our own Legislatures, and declaring themselves invested with power to legislate for us in all cases whatsoever",
"He has abdicated Government here, by declaring us out of his Protection and waging War against us",
"He has plundered our seas, ravaged our Coasts, burnt our towns, and destroyed the lives of our people",
"He is at this time transporting large Armies of foreign Mercenaries to compleat the works of death, desolation and tyranny",
"already begun with circumstances of Cruelty & perfidy scarcely paralleled in the most barbarous ages",
"and totally unworthy the Head of a civilized nation.",
"He has constrained our fellow Citizens taken Captive on the high Seas to bear Arms against their Country",
"to become the executioners of their friends and Brethren",
"or to fall themselves by their Hands",
"He has excited domestic insurrections amongst us",
"and has endeavoured to bring on the inhabitants of our frontiers",
"the merciless Indian Savages, whose known rule of warfare",
"is an undistinguished destruction of all ages, sexes and conditions.",
"In every stage of these Oppressions We have Petitioned for Redress in the most humble terms",
"Our repeated Petitions have been answered only by repeated injury",
"A Prince whose character is thus marked by every act which may define a Tyrant",
"is unfit to be the ruler of a free people.",
"Nor have We been wanting in attentions to our Brittish brethren",
"We have warned them from time to time of attempts by their legislature to extend an unwarrantable jurisdiction over us",
"We have reminded them of the circumstances of our emigration and settlement here",
"We have appealed to their native justice and magnanimity",
"and we have conjured them by the ties of our common kindred to disavow these usurpations",
"which, would inevitably interrupt our connections and correspondence",
"They too have been deaf to the voice of justice and of consanguinity",
"We must, therefore, acquiesce in the necessity, which denounces our Separation",
"and hold them, as we hold the rest of mankind, Enemies in War, in Peace Friends",
"We, therefore, the Representatives of the united States of America",
"in General Congress, Assembled, appealing to the Supreme Judge of the world for the rectitude of our intentions",
"do, in the Name, and by Authority of the good People of these Colonies",
"solemnly publish and declare, That these United Colonies are",
"and of Right ought to be Free and Independent States",
"that they are Absolved from all Allegiance to the British Crown",
"and that all political connection between them and the State of Great Britain",
"is and ought to be totally dissolved",
"and that as Free and Independent States",
"they have full Power to levy War",
"conclude Peace",
"contract Alliances",
"establish Commerce",
"and to do all other Acts and Things which Independent States may of right do",
"And for the support of this Declaration",
"with a firm reliance on the protection of divine Providence",
"we mutually pledge to each other our Lives",
"our Fortunes and our sacred Honor"
)
}
object UnitedStates {
private var nestedSuiteCount = 0
val allStates =
Vector(
Alabama,
Alaska,
Arizona,
Arkansas,
California,
Colorado,
Connecticut,
Delaware,
Florida,
Georgia,
Hawaii,
Idaho,
Illinois,
Indiana,
Iowa,
Kansas,
Kentucky,
Louisiana,
Maine,
Maryland,
Massachusetts,
Michigan,
Minnesota,
Mississippi,
Missouri,
Montana,
Nebraska,
Nevada,
NewHampshire,
NewJersey,
NewMexico,
NewYork,
NorthCarolina,
NorthDakota,
Ohio,
Oklahoma,
Oregon,
Pennsylvania,
RhodeIsland,
SouthCarolina,
SouthDakota,
Tennessee,
Texas,
Utah,
Vermont,
Virginia,
Washington,
WestVirginia,
Wisconsin,
Wyoming
)
}
object Alabama extends StateSuite
object Alaska extends StateSuite
object Arizona extends StateSuite
object Arkansas extends StateSuite
object California extends StateSuite
object Colorado extends StateSuite
object Connecticut extends StateSuite
object Delaware extends StateSuite
object Florida extends StateSuite
object Georgia extends StateSuite
object Hawaii extends StateSuite
object Idaho extends StateSuite
object Illinois extends StateSuite
object Indiana extends StateSuite
object Iowa extends StateSuite
object Kansas extends StateSuite
object Kentucky extends StateSuite
object Louisiana extends StateSuite
object Maine extends StateSuite
object Maryland extends StateSuite
object Massachusetts extends StateSuite
object Michigan extends StateSuite
object Minnesota extends StateSuite
object Mississippi extends StateSuite
object Missouri extends StateSuite
object Montana extends StateSuite
object Nebraska extends StateSuite
object Nevada extends StateSuite
object NewHampshire extends StateSuite
object NewJersey extends StateSuite
object NewMexico extends StateSuite
object NewYork extends StateSuite
object NorthCarolina extends StateSuite
object NorthDakota extends StateSuite
object Ohio extends StateSuite
object Oklahoma extends StateSuite
object Oregon extends StateSuite
object Pennsylvania extends StateSuite
object RhodeIsland extends StateSuite
object SouthCarolina extends StateSuite
object SouthDakota extends StateSuite
object Tennessee extends StateSuite
object Texas extends StateSuite
object Utah extends StateSuite
object Vermont extends StateSuite
object Virginia extends StateSuite
object Washington extends StateSuite
object WestVirginia extends StateSuite
object Wisconsin extends StateSuite
object Wyoming extends StateSuite
| SRGOM/scalatest | scalatest-test/src/us/US.scala | Scala | apache-2.0 | 26,241 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.storm
import com.twitter.algebird.Semigroup
import com.twitter.storehaus.algebra.MergeableStore
import com.twitter.summingbird.batch.{ BatchID, Batcher }
import com.twitter.summingbird.online._
import com.twitter.util.Future
import java.util.{ Collections, HashMap, Map => JMap, UUID }
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.SynchronizedMap
import java.util.WeakHashMap
import scala.collection.JavaConverters._
object TestStore {
private val testStores = new WeakHashMap[String, TestStore[_, _]]
def apply[K, V: Semigroup](storeID: String): Option[TestStore[K, V]] =
(Option(testStores.get(storeID)).map { s =>
s.asInstanceOf[TestStore[K, V]]
})
private def buildStore[K, V: Semigroup](initialData: Map[K, V]): String = {
val storeID = UUID.randomUUID.toString
val newInitStore = TestStore[K, V](storeID, initialData)
testStores.synchronized {
testStores.put(storeID, newInitStore)
}
storeID
}
def createBatchedStore[K, V](initialData: Map[(K, BatchID), V] = Map.empty[(K, BatchID), V])(implicit batcher: Batcher, valueSG: Semigroup[V]): (String, MergeableStoreFactory[(K, BatchID), V]) = {
val storeID = buildStore[(K, BatchID), V](initialData)
val supplier = MergeableStoreFactory.from(
TestStore.apply[(K, BatchID), V](storeID)
.getOrElse(sys.error("Weak hash map no longer contains store"))
)
(storeID, supplier)
}
def createStore[K, V: Semigroup](initialData: Map[K, V] = Map.empty[K, V]): (String, MergeableStoreFactory[(K, BatchID), V]) = {
val storeID = buildStore[K, V](initialData)
val supplier = MergeableStoreFactory.fromOnlineOnly(
TestStore.apply[K, V](storeID)
.getOrElse(sys.error("Weak hash map no longer contains store"))
)
(storeID, supplier)
}
}
case class TestStore[K, V: Semigroup](storeID: String, initialData: Map[K, V]) extends MergeableStore[K, V] {
private val backingStore: JMap[K, Option[V]] =
Collections.synchronizedMap(new HashMap[K, Option[V]]())
val updates: AtomicInteger = new AtomicInteger(0)
val reads: AtomicInteger = new AtomicInteger(0)
def toScala: Map[K, V] = backingStore.asScala.collect { case (k, Some(v)) => (k, v) }.toMap
private def getOpt(k: K) = {
reads.incrementAndGet
Option(backingStore.get(k)).flatMap(i => i)
}
val semigroup = implicitly[Semigroup[V]]
override def get(k: K) = Future.value(getOpt(k))
override def put(pair: (K, Option[V])) = {
val (k, optV) = pair
if (optV.isDefined)
backingStore.put(k, optV)
else
backingStore.remove(k)
updates.incrementAndGet
Future.Unit
}
override def merge(pair: (K, V)) = {
val (k, v) = pair
val oldV = getOpt(k)
val newV = Semigroup.plus(Some(v), oldV)
updates.incrementAndGet
backingStore.put(k, newV)
Future.value(oldV)
}
}
| rangadi/summingbird | summingbird-storm-test/src/main/scala/com/twitter/summingbird/storm/TestStore.scala | Scala | apache-2.0 | 3,489 |
package gruenewa.grid
import gruenewa.prelude._
class GridRunException(cause: Throwable) extends Exception(cause)
object GridRun {
trait Executor {
def apply[A, B](func: A => B)(arg: A): B
def close()
}
def startScanner(callback: (String, Int) => Unit) = Discovery.startListener(callback)
def startExecutor(): Executor = {
val grid = Grid.startSession()
val dispatcher = new Dispatcher(grid)
val scanner = startScanner(dispatcher.listener)
new Executor {
def apply[A, B](func: A => B)(arg: A): B = {
dispatcher.dispatch(func)(arg)() match {
case Right(b) => b
case Left(e) => throw new GridRunException(e)
}
}
def close() {
scanner.close()
dispatcher.close()
grid.close()
}
}
}
def apply[T](block: Executor => T) {
using(startExecutor()) { executor =>
block(executor)
}
}
} | gruenewa/gruenewa-grid | src/main/scala/gruenewa/grid/GridRun.scala | Scala | gpl-3.0 | 933 |
package com.peterpotts.snake.predicate
case class And[T](predicates: Seq[Predicate[T]]) extends Predicate[T] {
def apply(value: T) = predicates.map(_(value)).reduce(_ && _)
override def toString() = predicates.mkString(" && ")
}
| peterpotts/snake | src/main/scala/com/peterpotts/snake/predicate/And.scala | Scala | mit | 235 |
package me.apidoc.swagger.translators
import lib.Primitives
import io.apibuilder.spec.v0.{ models => apidoc }
import me.apidoc.swagger.Util
import io.swagger.{ models => swagger }
object Response {
def apply(
resolver: Resolver,
code: String,
response: swagger.Response
): apidoc.Response = {
val responseCode = if (code == "default") {
apidoc.ResponseCodeOption.Default
} else {
apidoc.ResponseCodeInt(code.toInt)
}
// getExamples
// getHeaders
apidoc.Response(
code = responseCode,
`type` = Option(response.getSchema) match {
case None => Primitives.Unit.toString
case Some(schema) => resolver.schemaType(schema)
},
description = Option(response.getDescription),
deprecation = None,
attributes = Util.vendorExtensionsToAttributesOpt(response.getVendorExtensions)
)
}
}
| gheine/apidoc | swagger/src/main/scala/me/apidoc/swagger/translators/Response.scala | Scala | mit | 887 |
package com.datawizards.sparklocal.session
import com.datawizards.sparklocal.SparkLocalBaseTest
import com.datawizards.sparklocal.dataset.DataSetAPI
import com.datawizards.sparklocal.rdd.RDDAPI
import com.datawizards.sparklocal.implicits._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CreateDatasetTest extends SparkLocalBaseTest {
val data = Array(1,2,3,4,5)
test("Create DataSet - result") {
assertDatasetOperationResult(createDataset(ExecutionEngine.ScalaEager)) {
data
}
}
test("Create DataSet(RDD) - result") {
assertDatasetOperationResult(createDatasetRDD(ExecutionEngine.ScalaEager)) {
data
}
}
test("Create DataSet - equals") {
assertDatasetEquals(
createDataset(ExecutionEngine.ScalaEager),
createDataset(ExecutionEngine.ScalaLazy)
)
assertDatasetEquals(
createDataset(ExecutionEngine.ScalaEager),
createDataset(ExecutionEngine.ScalaParallel)
)
assertDatasetEquals(
createDataset(ExecutionEngine.ScalaEager),
createDataset(ExecutionEngine.ScalaParallelLazy)
)
assertDatasetEquals(
createDataset(ExecutionEngine.ScalaEager),
createDataset(ExecutionEngine.Spark)
)
}
test("Create DataSet(RDD) - equals") {
assertDatasetEquals(
createDatasetRDD(ExecutionEngine.ScalaEager),
createDatasetRDD(ExecutionEngine.ScalaLazy)
)
assertDatasetEquals(
createDatasetRDD(ExecutionEngine.ScalaEager),
createDatasetRDD(ExecutionEngine.ScalaParallel)
)
assertDatasetEquals(
createDatasetRDD(ExecutionEngine.ScalaEager),
createDatasetRDD(ExecutionEngine.ScalaParallelLazy)
)
assertDatasetEquals(
createDatasetRDD(ExecutionEngine.ScalaEager),
createDatasetRDD(ExecutionEngine.Spark)
)
}
private def createDataset[Session <: SparkSessionAPI](engine: ExecutionEngine[Session]): DataSetAPI[Int] =
SparkSessionAPI
.builder(engine)
.master("local")
.getOrCreate()
.createDataset(data)
private def createDatasetRDD[Session <: SparkSessionAPI](engine: ExecutionEngine[Session]): DataSetAPI[Int] =
SparkSessionAPI
.builder(engine)
.master("local")
.getOrCreate()
.createDataset(RDDAPI(data))
} | piotr-kalanski/spark-local | src/test/scala/com/datawizards/sparklocal/session/CreateDatasetTest.scala | Scala | apache-2.0 | 2,312 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.ml
import org.scalatest.FunSuite
import org.scalatest.Matchers
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.Pipeline
import org.apache.spark.ml.evaluation.BinaryClassificationEvaluator
import org.apache.spark.ml.feature.{HashingTF, Tokenizer}
import org.apache.spark.ml.tuning.{ParamGridBuilder, CrossValidator}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql._
import scala.reflect.runtime.universe._
case class LabeledDocument[T:TypeTag](id: Long, text: String, label: Double)
case class Document[T:TypeTag](id: Long, text: String)
class LogisticRegressionSuite extends FunSuite with WrapperSparkContext with Matchers {
// Note: This is required by every test to ensure that it runs successfully on windows laptop !!!
val loadConfig = ScalaAutomatedTestBase
test("run logistic regression with default") {
//Make sure system ml home set when run wrapper
val newSparkSession = SparkSession.builder().master("local").appName("TestLocal").getOrCreate();
import newSparkSession.implicits._
val training = sc.parallelize(Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5))))
val testing = sc.parallelize(Seq(
LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)),
LabeledPoint(1.0, Vectors.dense(1.0, 0.4, 2.1)),
LabeledPoint(2.0, Vectors.dense(1.2, 0.0, 3.5))))
val lr = new LogisticRegression("log", sc)
val lrmodel = lr.fit(training.toDF)
lrmodel.transform(testing.toDF).show
lr.getIcpt shouldBe 0
lrmodel.getIcpt shouldBe lr.getIcpt
lrmodel.getMaxInnerIter shouldBe lr.getMaxInnerIter
}
test("test logistic regression with mlpipeline"){
//Make sure system ml home set when run wrapper
val newSparkSession = SparkSession.builder().master("local").appName("TestLocal").getOrCreate();
import newSparkSession.implicits._
val training = sc.parallelize(Seq(
LabeledDocument(0L, "a b c d e spark", 1.0),
LabeledDocument(1L, "b d", 2.0),
LabeledDocument(2L, "spark f g h", 1.0),
LabeledDocument(3L, "hadoop mapreduce", 2.0),
LabeledDocument(4L, "b spark who", 1.0),
LabeledDocument(5L, "g d a y", 2.0),
LabeledDocument(6L, "spark fly", 1.0),
LabeledDocument(7L, "was mapreduce", 2.0),
LabeledDocument(8L, "e spark program", 1.0),
LabeledDocument(9L, "a e c l", 2.0),
LabeledDocument(10L, "spark compile", 1.0),
LabeledDocument(11L, "hadoop software", 2.0)))
val tokenizer = new Tokenizer().setInputCol("text").setOutputCol("words")
val hashingTF = new HashingTF().setNumFeatures(1000).setInputCol(tokenizer.getOutputCol).setOutputCol("features")
val lr = new LogisticRegression("log",sc)
val pipeline = new Pipeline().setStages(Array(tokenizer, hashingTF, lr))
val crossval = new CrossValidator().setEstimator(pipeline).setEvaluator(new BinaryClassificationEvaluator)
val paramGrid = new ParamGridBuilder().addGrid(hashingTF.numFeatures, Array(10, 100, 1000)).addGrid(lr.regParam, Array(0.1, 0.01)).build()
crossval.setEstimatorParamMaps(paramGrid)
crossval.setNumFolds(2)
val lrmodel = crossval.fit(training.toDF)
val test = sc.parallelize(Seq(
Document(12L, "spark i j k"),
Document(13L, "l m n"),
Document(14L, "mapreduce spark"),
Document(15L, "apache hadoop")))
lrmodel.transform(test.toDF).show
lr.getIcpt shouldBe 0
// lrmodel.getIcpt shouldBe lr.getIcpt
// lrmodel.getMaxInnerIter shouldBe lr.getMaxInnerIter
}
} | deroneriksson/incubator-systemml | src/test/scala/org/apache/sysml/api/ml/LogisticRegressionSuite.scala | Scala | apache-2.0 | 4,519 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core.validation
import java.util.UUID
import at.nonblocking.cliwix.core.util.{TreeTypeUtils, ListTypeUtils}
import at.nonblocking.cliwix.model.{DocumentLibraryFolder, LiferayConfig}
import scala.collection.mutable
import scala.collection.JavaConversions._
private[core] class UniqueIdsLiferayConfigValidator extends LiferayConfigValidator with ListTypeUtils with TreeTypeUtils {
val UNIQUE_VALUE_WEB_ID = "Company.webId"
val UNIQUE_VALUE_VIRTUAL_HOST = "virtualHost"
val UNIQUE_VALUE_USERGROUP_NAME = "UserGroup.name"
val UNIQUE_VALUE_ROLE_NAME = "Role.name"
val UNIQUE_VALUE_ORGANIZATION_NAME = "Organization.name"
val UNIQUE_VALUE_USER_SCREENNAME = "User.screenName"
val UNIQUE_VALUE_USER_EMAILADDRESS = "User.emailAddress"
val UNIQUE_VALUE_SITE_NAME = "Site.name"
val UNIQUE_VALUE_SITE_FRIENDLY_URL = "Site.friendlyURL"
val UNIQUE_VALUE_PAGE_URL = "Page.url"
val UNIQUE_VALUE_FILE_FOLDER_NAME = "Folder.name and File.name"
val UNIQUE_VALUE_ARTICLE_ID = "Article.articleId"
val UNIQUE_VALUE_ARTICLE_STRUCTURE_ID = "ArticleStructure.structureId"
val UNIQUE_VALUE_ARTICLE_TEMPLATE_ID = "ArticleTemplate.templateId"
var keyMap: mutable.HashMap[String, mutable.MutableList[String]] = _
override def validate(liferayConfig: LiferayConfig): List[ValidationError] = {
assert(liferayConfig != null, "liferayConfig != null")
keyMap = new mutable.HashMap[String, mutable.MutableList[String]]()
val messages = new mutable.MutableList[ValidationError]()
safeForeach(liferayConfig.getCompanies){ company =>
checkUnique(UNIQUE_VALUE_WEB_ID, null, company.getWebId, s"Company: ${company.identifiedBy()}", messages)
if (company.getCompanyConfiguration != null) {
checkUnique(UNIQUE_VALUE_VIRTUAL_HOST, null, company.getCompanyConfiguration.getVirtualHost, s"Company: ${company.identifiedBy()}", messages)
}
safeForeach(company.getRoles) { role =>
checkUnique(UNIQUE_VALUE_ROLE_NAME, company.identifiedBy(), role.getName, s"Company: ${company.identifiedBy()}, Role: ${role.identifiedBy()}", messages, ignoreCase = true)
}
safeForeach(company.getUserGroups) { userGroup =>
checkUnique(UNIQUE_VALUE_USERGROUP_NAME, company.identifiedBy(), userGroup.getName, s"Company: ${company.identifiedBy()}, UserGroup: ${userGroup.identifiedBy()}", messages, ignoreCase = true)
}
safeProcessRecursively(company.getOrganizations) { org =>
checkUnique(UNIQUE_VALUE_ORGANIZATION_NAME, company.identifiedBy(), org.getName, s"Company: ${company.identifiedBy()}, Organization: ${org.identifiedBy()}", messages, ignoreCase = true)
}
safeForeach(company.getUsers) { user =>
checkUnique(UNIQUE_VALUE_USER_SCREENNAME, company.identifiedBy(), user.getScreenName, s"Company: ${company.identifiedBy()}, User: ${user.identifiedBy()}", messages, ignoreCase = true)
checkUnique(UNIQUE_VALUE_USER_EMAILADDRESS, company.identifiedBy(), user.getEmailAddress, s"Company: ${company.identifiedBy()}, User: ${user.identifiedBy()}", messages, ignoreCase = true)
}
safeForeach(company.getSites) { site =>
checkUnique(UNIQUE_VALUE_SITE_NAME, company.identifiedBy(), site.getName, s"Company: ${company.identifiedBy()}, Site:${site.identifiedBy()}", messages, ignoreCase = true)
if (site.getSiteConfiguration != null) {
checkUnique(UNIQUE_VALUE_SITE_FRIENDLY_URL, company.identifiedBy(), site.getSiteConfiguration.getFriendlyURL, s"Site:${site.identifiedBy()}", messages)
checkUnique(UNIQUE_VALUE_VIRTUAL_HOST, null, site.getSiteConfiguration.getVirtualHostPublicPages, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, PublicPages", messages)
checkUnique(UNIQUE_VALUE_VIRTUAL_HOST, null, site.getSiteConfiguration.getVirtualHostPrivatePages, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, PrivatePages", messages)
}
val subKeyBaseSite = company.identifiedBy() + "_" + site.identifiedBy()
if (site.getPublicPages != null) safeProcessRecursivelyWithParent(site.getPublicPages.getPages) { (parent, page) =>
val parentPageName = if (parent == null) "/" else parent.identifiedBy()
val subKey = subKeyBaseSite + "_" + "public"
checkUnique(UNIQUE_VALUE_PAGE_URL, subKey, page.getFriendlyUrl, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, Public pages, Parent page:$parentPageName", messages)
}
if (site.getPrivatePages != null) safeProcessRecursivelyWithParent(site.getPrivatePages.getPages) { (parent, page) =>
val parentPageName = if (parent == null) "/" else parent.identifiedBy()
val subKey = subKeyBaseSite + "_" + "private" + parentPageName
checkUnique(UNIQUE_VALUE_PAGE_URL, subKey, page.getFriendlyUrl, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, Private pages, Parent page:$parentPageName", messages)
}
val siteContent = site.getSiteContent
if (siteContent != null) {
safeProcessRecursivelyWithParent(siteContent.getDocumentLibrary) { (parent, item) =>
item match {
case folder: DocumentLibraryFolder =>
val folderName = if (folder.getName == null) "/" else folder.getName
val subKey = UUID.randomUUID().toString
if (folder.getSubItems != null) folder.getSubItems.foreach { subItem =>
checkUnique(UNIQUE_VALUE_FILE_FOLDER_NAME, subKey, subItem.getName, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, Parent folder: $folderName}", messages, ignoreCase = false)
}
case _ =>
}
}
if (siteContent.getWebContent != null) {
safeProcessRecursively(siteContent.getWebContent.getStructures) { articleStructure =>
checkUnique(UNIQUE_VALUE_ARTICLE_STRUCTURE_ID, subKeyBaseSite, articleStructure.getStructureId, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, ArticleStructure: ${articleStructure.identifiedBy()}", messages)
}
safeForeach(siteContent.getWebContent.getTemplates) { articleTemplates =>
checkUnique(UNIQUE_VALUE_ARTICLE_TEMPLATE_ID, subKeyBaseSite, articleTemplates.getTemplateId, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, ArticleTemplate: ${articleTemplates.identifiedBy()}", messages)
}
safeForeach(siteContent.getWebContent.getArticles) { article =>
checkUnique(UNIQUE_VALUE_ARTICLE_ID, subKeyBaseSite, article.getArticleId, s"Company: ${company.identifiedBy()}, Site: ${site.identifiedBy()}, Article: ${article.identifiedBy()}", messages)
}
}
}
}
}
messages.toList
}
private def checkUnique(key: String, subKey: String, value: String, location: String, messages: mutable.MutableList[ValidationError], ignoreCase: Boolean = false) = {
if (value != null) {
val compareValue =
if (ignoreCase) value.toLowerCase
else value
val fullKey = if (subKey == null) key else key + "_" + subKey
if (!keyMap.contains(fullKey)) keyMap.put(fullKey, new mutable.MutableList[String])
val list = keyMap.get(fullKey).get
if (list.contains(compareValue)) {
messages += new ValidationError(s"$key must be unique! Duplicate value: '$value'.", location, null)
} else {
list += compareValue
}
}
}
}
| nonblocking/cliwix | cliwix-core/src/main/scala/at/nonblocking/cliwix/core/validation/UniqueIdsLiferayConfigValidator.scala | Scala | agpl-3.0 | 8,350 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.electro
import squants._
import squants.energy.Joules
import squants.time.{ Time, TimeIntegral }
/**
* @author garyKeorkunian
* @since 0.1
*
* @param value value in [[squants.electro.Coulombs]]
*/
final class ElectricCharge private (val value: Double, val unit: ElectricChargeUnit)
extends Quantity[ElectricCharge]
with TimeIntegral[ElectricCurrent] {
def dimension = ElectricCharge
protected def timeDerived = Amperes(toCoulombs)
protected def time = Seconds(1)
def *(that: ElectricPotential): Energy = Joules(this.toCoulombs * that.toVolts)
def /(that: ElectricPotential): Capacitance = Farads(this.toCoulombs / that.toVolts)
def /(that: Capacitance): ElectricPotential = Volts(this.toCoulombs / that.toFarads)
def /(that: Length): LinearElectricChargeDensity = CoulombsPerMeter(this.toCoulombs / that.toMeters)
def /(that: Area): AreaElectricChargeDensity = CoulombsPerSquareMeter(this.toCoulombs / that.toSquareMeters)
def /(that: Volume): ElectricChargeDensity = CoulombsPerCubicMeter(this.toCoulombs / that.toCubicMeters)
def /(that: Mass): ElectricChargeMassRatio = CoulombsPerKilogram(this.toCoulombs / that.toKilograms)
def toCoulombs = to(Coulombs)
def toPicocoulombs = to(Picocoulombs)
def toNanocoulombs = to(Nanocoulombs)
def toMicrocoulombs = to(Microcoulombs)
def toMillcoulombs = to(Millicoulombs)
def toAbcoulombs = to(Abcoulombs)
def toAmpereHours = to(AmpereHours)
def toMilliampereHours = to(MilliampereHours)
def toMilliampereSeconds = to(MilliampereSeconds)
}
object ElectricCharge extends Dimension[ElectricCharge] {
private[electro] def apply[A](n: A, unit: ElectricChargeUnit)(implicit num: Numeric[A]) = new ElectricCharge(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "ElectricCharge"
def primaryUnit = Coulombs
def siUnit = Coulombs
def units = Set(Coulombs, Picocoulombs, Nanocoulombs, Microcoulombs, Millicoulombs, Abcoulombs,
AmpereHours, MilliampereHours, MilliampereSeconds)
}
trait ElectricChargeUnit extends UnitOfMeasure[ElectricCharge] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = ElectricCharge(n, this)
}
object Coulombs extends ElectricChargeUnit with PrimaryUnit with SiUnit {
val symbol = "C"
}
object Picocoulombs extends ElectricChargeUnit with SiUnit {
val symbol = "pC"
val conversionFactor = MetricSystem.Pico
}
object Nanocoulombs extends ElectricChargeUnit with SiUnit {
val symbol = "nC"
val conversionFactor = MetricSystem.Nano
}
object Microcoulombs extends ElectricChargeUnit with SiUnit {
val symbol = "µC"
val conversionFactor = MetricSystem.Micro
}
object Millicoulombs extends ElectricChargeUnit with SiUnit {
val symbol = "mC"
val conversionFactor = MetricSystem.Milli
}
object Abcoulombs extends ElectricChargeUnit {
val symbol = "aC"
val conversionFactor = 10d
}
object AmpereHours extends ElectricChargeUnit {
val symbol = "Ah"
val conversionFactor = Time.SecondsPerHour
}
object MilliampereHours extends ElectricChargeUnit {
val symbol = "mAh"
val conversionFactor = AmpereHours.conversionFactor * MetricSystem.Milli
}
object MilliampereSeconds extends ElectricChargeUnit {
val symbol = "mAs"
val conversionFactor = Coulombs.conversionFactor * MetricSystem.Milli
}
object ElectricChargeConversions {
lazy val coulomb = Coulombs(1)
lazy val picocoulomb = Picocoulombs(1)
lazy val nanocoulomb = Nanocoulombs(1)
lazy val microcoulomb = Microcoulombs(1)
lazy val millicoulomb = Millicoulombs(1)
lazy val abcoulomb = Abcoulombs(1)
lazy val ampereHour = AmpereHours(1)
lazy val milliampereHour = MilliampereHours(1)
lazy val milliampereSecond = MilliampereSeconds(1)
implicit class ElectricalChargeConversions[A](n: A)(implicit num: Numeric[A]) {
def coulombs = Coulombs(n)
def picocoulombs = Picocoulombs(n)
def nanocoulombs = Nanocoulombs(n)
def microcoulombs = Microcoulombs(n)
def millicoulombs = Millicoulombs(n)
def abcoulombs = Abcoulombs(n)
def ampereHours = AmpereHours(n)
def milliampereHours = MilliampereHours(n)
def milliampereSeconds = MilliampereSeconds(n)
}
implicit object ElectricalChargeNumeric
extends AbstractQuantityNumeric[ElectricCharge](ElectricCharge.primaryUnit)
}
| garyKeorkunian/squants | shared/src/main/scala/squants/electro/ElectricCharge.scala | Scala | apache-2.0 | 4,829 |
package delta.jdbc.mysql
import java.sql.Connection
import java.sql.SQLException
import delta.jdbc.AbstractJdbcStore
/**
* MySQL syntax adapter for stream process stores.
*/
trait MySQLSyntax {
self: AbstractJdbcStore =>
import MySQLDialect._
override protected def schemaDDL(schema: String): String =
self.schemaDDL(schema).replace("SCHEMA", "DATABASE")
override protected def createIndex(conn: Connection, ddl: String): Unit = {
val validDDL = ddl.replace("IF NOT EXISTS", "")
try self.createIndex(conn, validDDL) catch {
case sqlEx: SQLException if isIndexAlreadyExists(sqlEx) => // Ignore
}
}
override protected def dropIndex(conn: Connection, ddl: String): Unit = {
val validDDL = ddl.replace("IF EXISTS", "")
try self.dropIndex(conn, validDDL) catch {
case sqlEx: SQLException if isIndexDoesNotExist(sqlEx) => // Ignore
}
}
}
| nilskp/delta | delta-jdbc/src/main/scala/delta/jdbc/mysql/MySQLSyntax.scala | Scala | mit | 893 |
/**
* Copyright 2017 Interel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package core3.test.specs.unit.config
import core3.test.specs.unit.UnitSpec
import core3.config.DynamicConfig
class DynamicConfigSpec extends UnitSpec {
case class FixtureParam()
def withFixture(test: OneArgTest) = withFixture(test.toNoArgTest(FixtureParam()))
"A DynamicConfig object" should "successfully retrieve and reload the configuration" in {
_ =>
val config = DynamicConfig.get.getConfig("testing.config")
config.getString("valueOne") should be("1")
config.getDouble("valueTwo") should be(2.3)
config.getBoolean("valueThree") should be(true)
DynamicConfig.reload()
val reloadedConfig = DynamicConfig.get.getConfig("testing.config")
reloadedConfig.getString("valueOne") should be("1")
reloadedConfig.getDouble("valueTwo") should be(2.3)
reloadedConfig.getBoolean("valueThree") should be(true)
}
}
| Interel-Group/core3 | src/test/scala/core3/test/specs/unit/config/DynamicConfigSpec.scala | Scala | apache-2.0 | 1,480 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import org.jetbrains.plugins.scala.lang.psi.api.base.ScIdList
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
/**
* @author Alexander Podkhalyuzin
*/
trait ScVariableDeclaration extends ScVariable with ScTypedDeclaration {
def getIdList: ScIdList
def declaredElements : Seq[ScTypedDefinition]
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScVariableDeclaration.scala | Scala | apache-2.0 | 419 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.{AccumulableInfo, TaskInfo}
import org.apache.spark.util.collection.OpenHashSet
import scala.collection.mutable
import scala.collection.mutable.HashMap
private[spark] object UIData {
class ExecutorSummary {
var taskTime : Long = 0
var failedTasks : Int = 0
var succeededTasks : Int = 0
var inputBytes : Long = 0
var inputRecords : Long = 0
var outputBytes : Long = 0
var outputRecords : Long = 0
var shuffleRead : Long = 0
var shuffleReadRecords : Long = 0
var shuffleWrite : Long = 0
var shuffleWriteRecords : Long = 0
var memoryBytesSpilled : Long = 0
var diskBytesSpilled : Long = 0
}
class JobUIData(
var jobId: Int = -1,
var submissionTime: Option[Long] = None,
var completionTime: Option[Long] = None,
var stageIds: Seq[Int] = Seq.empty,
var jobGroup: Option[String] = None,
var status: JobExecutionStatus = JobExecutionStatus.UNKNOWN,
/* Tasks */
// `numTasks` is a potential underestimate of the true number of tasks that this job will run.
// This may be an underestimate because the job start event references all of the result
// stages' transitive stage dependencies, but some of these stages might be skipped if their
// output is available from earlier runs.
// See https://github.com/apache/spark/pull/3009 for a more extensive discussion.
var numTasks: Int = 0,
var numActiveTasks: Int = 0,
var numCompletedTasks: Int = 0,
var numSkippedTasks: Int = 0,
var numFailedTasks: Int = 0,
/* Stages */
var numActiveStages: Int = 0,
// This needs to be a set instead of a simple count to prevent double-counting of rerun stages:
var completedStageIndices: mutable.HashSet[Int] = new mutable.HashSet[Int](),
var numSkippedStages: Int = 0,
var numFailedStages: Int = 0
)
class StageUIData {
var numActiveTasks: Int = _
var numCompleteTasks: Int = _
var completedIndices = new OpenHashSet[Int]()
var numFailedTasks: Int = _
var executorRunTime: Long = _
var inputBytes: Long = _
var inputRecords: Long = _
var outputBytes: Long = _
var outputRecords: Long = _
var shuffleReadTotalBytes: Long = _
var shuffleReadRecords : Long = _
var shuffleWriteBytes: Long = _
var shuffleWriteRecords: Long = _
var memoryBytesSpilled: Long = _
var diskBytesSpilled: Long = _
var schedulingPool: String = ""
var description: Option[String] = None
var accumulables = new HashMap[Long, AccumulableInfo]
var taskData = new HashMap[Long, TaskUIData]
var executorSummary = new HashMap[String, ExecutorSummary]
def hasInput: Boolean = inputBytes > 0
def hasOutput: Boolean = outputBytes > 0
def hasShuffleRead: Boolean = shuffleReadTotalBytes > 0
def hasShuffleWrite: Boolean = shuffleWriteBytes > 0
def hasBytesSpilled: Boolean = memoryBytesSpilled > 0 && diskBytesSpilled > 0
}
/**
* These are kept mutable and reused throughout a task's lifetime to avoid excessive reallocation.
*/
case class TaskUIData(
var taskInfo: TaskInfo,
var taskMetrics: Option[TaskMetrics] = None,
var errorMessage: Option[String] = None)
case class ExecutorUIData(
val startTime: Long,
var finishTime: Option[Long] = None,
var finishReason: Option[String] = None)
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/ui/jobs/UIData.scala | Scala | apache-2.0 | 4,303 |
package knot.data
object ByteArrayIterator {
def apply(bytes: Array[Byte]): ByteArrayIterator = new ByteArrayIterator(bytes, 0, bytes.length)
def apply(bytes: Array[Byte], from: Int, until: Int) = new ByteArrayIterator(bytes, from, until)
}
class ByteArrayIterator(private var bytes: Array[Byte], private var from: Int, private var until: Int) extends BufferedIterator[Byte] {
def len: Int = until - from
override def head: Byte = bytes(from)
override def hasNext: Boolean = from < until
override def next(): Byte = {
if (!hasNext) Iterator.empty.next
else {
val i = from
from = from + 1
bytes(i)
}
}
override def drop(n: Int): Iterator[Byte] = {
if (n > 0) from = if (n < this.len) from + n else until
this
}
override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = {
val n = 0 max ((xs.length - start) min this.len min len)
Array.copy(bytes, from, xs, start, n)
this.drop(n)
}
}
| defvar/knot | knot-data/src/main/scala/knot/data/ByteArrayIterator.scala | Scala | mit | 985 |
package equellatests
import com.tle.webtests.framework.{PageContext, ScreenshotTaker, TestConfig}
import com.tle.webtests.pageobject.LoginPage
import equellatests.TestChecker.withBrowserDriver
import equellatests.domain.TestLogon
import org.scalacheck.{Prop, Properties}
import scala.util.{Success, Try}
abstract class ShotProperties(name: String) extends Properties(name) {
def withLogon[A](logon: TestLogon)(f: PageContext => Prop): Prop = {
val testConfig = new TestConfig(GlobalConfig.baseFolderForInst(logon.inst), false)
withBrowserDriver(testConfig) { driver =>
val context = new PageContext(driver, testConfig, testConfig.getInstitutionUrl)
def quitDriver(shot: Boolean) = {
if (shot)
Try(
ScreenshotTaker.takeScreenshot(driver,
context.getTestConfig.getScreenshotFolder,
name,
context.getTestConfig.isChromeDriverSet))
Try(driver.quit())
}
Try {
new LoginPage(context).load().login(logon.username, logon.password)
f(context)
}.transform({ p =>
Success(p.map { r =>
quitDriver(r.failure)
r
})
}, { failure =>
quitDriver(true)
throw failure
})
.get
}
}
}
| equella/Equella | autotest/Tests/src/test/scala/equellatests/ShotProperties.scala | Scala | apache-2.0 | 1,384 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.TestUtils
import kafka.zk.ZooKeeperTestHarness
import org.apache.kafka.common.KafkaException
import org.apache.zookeeper.KeeperException.NodeExistsException
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.{After, Test}
class ServerStartupTest extends ZooKeeperTestHarness {
private var server: KafkaServer = null
@After
override def tearDown(): Unit = {
if (server != null)
TestUtils.shutdownServers(Seq(server))
super.tearDown()
}
@Test
def testBrokerCreatesZKChroot(): Unit = {
val brokerId = 0
val zookeeperChroot = "/kafka-chroot-for-unittest"
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
val zooKeeperConnect = props.get("zookeeper.connect")
props.put("zookeeper.connect", zooKeeperConnect.toString + zookeeperChroot)
server = TestUtils.createServer(KafkaConfig.fromProps(props))
val pathExists = zkClient.pathExists(zookeeperChroot)
assertTrue(pathExists)
}
@Test
def testConflictBrokerStartupWithSamePort(): Unit = {
// Create and start first broker
val brokerId1 = 0
val props1 = TestUtils.createBrokerConfig(brokerId1, zkConnect)
server = TestUtils.createServer(KafkaConfig.fromProps(props1))
val port = TestUtils.boundPort(server)
// Create a second broker with same port
val brokerId2 = 1
val props2 = TestUtils.createBrokerConfig(brokerId2, zkConnect, port = port)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Starting a broker with the same port should fail")
} catch {
case _: KafkaException => // expected
}
}
@Test
def testConflictBrokerRegistration(): Unit = {
// Try starting a broker with the a conflicting broker id.
// This shouldn't affect the existing broker registration.
val brokerId = 0
val props1 = TestUtils.createBrokerConfig(brokerId, zkConnect)
server = TestUtils.createServer(KafkaConfig.fromProps(props1))
val brokerRegistration = zkClient.getBroker(brokerId).getOrElse(fail("broker doesn't exists"))
val props2 = TestUtils.createBrokerConfig(brokerId, zkConnect)
try {
TestUtils.createServer(KafkaConfig.fromProps(props2))
fail("Registering a broker with a conflicting id should fail")
} catch {
case _: NodeExistsException =>
// this is expected
}
// broker registration shouldn't change
assertEquals(brokerRegistration, zkClient.getBroker(brokerId).getOrElse(fail("broker doesn't exists")))
}
@Test
def testBrokerSelfAware(): Unit = {
val brokerId = 0
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
server = TestUtils.createServer(KafkaConfig.fromProps(props))
TestUtils.waitUntilTrue(() => server.metadataCache.getAliveBrokers.nonEmpty, "Wait for cache to update")
assertEquals(1, server.metadataCache.getAliveBrokers.size)
assertEquals(brokerId, server.metadataCache.getAliveBrokers.head.id)
}
@Test
def testBrokerStateRunningAfterZK(): Unit = {
val brokerId = 0
val mockBrokerState: BrokerState = EasyMock.niceMock(classOf[BrokerState])
class BrokerStateInterceptor() extends BrokerState {
override def newState(newState: BrokerStates): Unit = {
val brokers = zkClient.getAllBrokersInCluster
assertEquals(1, brokers.size)
assertEquals(brokerId, brokers.head.id)
}
}
class MockKafkaServer(override val config: KafkaConfig, override val brokerState: BrokerState = mockBrokerState) extends KafkaServer(config) {}
val props = TestUtils.createBrokerConfig(brokerId, zkConnect)
server = new MockKafkaServer(KafkaConfig.fromProps(props))
EasyMock.expect(mockBrokerState.newState(RunningAsBroker)).andDelegateTo(new BrokerStateInterceptor).once()
EasyMock.replay(mockBrokerState)
server.startup()
}
}
| sslavic/kafka | core/src/test/scala/unit/kafka/server/ServerStartupTest.scala | Scala | apache-2.0 | 4,690 |
package io.udash.demos.files.rpc
import io.udash.utils.{CallbacksHandler, Registration}
class RPCService extends MainClientRPC {
private val listeners = new CallbacksHandler[Unit]
def listenStorageUpdate(callback: () => Unit): Registration =
listeners.register({ case () => callback() })
override def fileStorageUpdated(): Unit =
listeners.fire(())
}
| UdashFramework/udash-demos | file-upload/frontend/src/main/scala/io/udash/demos/files/rpc/RPCService.scala | Scala | gpl-3.0 | 370 |
package lv.addresses.index
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import java.util
import scala.collection.AbstractIterator
import scala.collection.mutable.{ArrayBuffer => AB}
import scala.collection.mutable.{Map => MM}
import scala.language.postfixOps
object Index {
protected val logger = Logger(LoggerFactory.getLogger("lv.addresses.indexer"))
sealed case class Refs(exact: AB[Int] = AB(), approx: AB[Int] = AB()) {
def add(ref: Int, exactMatch: Boolean): Refs = {
def coll = if (exactMatch) exact else approx
if (coll.isEmpty) coll += ref
else
coll
.lastOption.filterNot(_ == ref)
.foreach(_ => coll += ref) //do not add code twice
this
}
}
sealed case class Result(word: String, refs: AB[Int], editDistance: Int)
sealed case class FuzzyResult(word: String,
refs: AB[Int],
editDistance: Int,
/** Split count of the original string.
* Corresponds to space count in word field
*/
splitDepth: Int = 0)
sealed case class PartialFuzzyResult(word: String,
refs: AB[Int],
editDistance: Int,
rest: String)
sealed case class IndexStats(nodeStats: NodeStats, repWordStats: AB[NodeStats]) {
def render: String = s"${nodeStats.render}. Repeated words stats: ${
repWordStats.zipWithIndex.map { case (s, i) => s"${i + 2}: ${s.render}" }.mkString(", ")
}"
}
sealed case class NodeStats(nodeCount: Long, refCount: Long) {
def +(s: NodeStats): NodeStats = NodeStats(nodeCount + s.nodeCount, refCount + s.refCount)
def render: String = s"Node count - $nodeCount, code ref count - $refCount"
}
sealed class MutableIndexBase(var children: AB[MutableIndexNode]) {
def search(str: String): Refs = {
if (children == null) return Refs()
val c = str.head
val idx = binarySearch[MutableIndexNode, Char](children, c, _.word.head, _ - _)
if (idx < 0) Refs()
else if (str.length == 1) children(idx).refs
else children(idx).search(str.drop(1))
}
def fuzzySearch(str: String,
currentEditDistance: Int,
maxEditDistance: Int,
consumed: String,
partial: MM[String, PartialFuzzyResult],
calcMaxEditDist: String => Int): AB[FuzzyResult] = {
def tryTransformedSearch(excludeChar: Char): AB[FuzzyResult] = {
def replaceOrPrefix(s: String) = {
var fuzzyResult = AB[FuzzyResult]()
val l = children.size
var i = 0
while (i < l) {
val charToTry = children(i).word.head
if (charToTry != excludeChar) {
fuzzyResult ++=
children(i)
.fuzzySearch(s, currentEditDistance + 1, maxEditDistance,
consumed + charToTry, partial, calcMaxEditDist)
}
i += 1
}
fuzzyResult
}
if (currentEditDistance < maxEditDistance) {
//try to prefix c with on of children word values
replaceOrPrefix(str) ++
//try to omit c
fuzzySearch(str drop 1, currentEditDistance + 1, maxEditDistance, consumed, partial, calcMaxEditDist) ++
//try to replace c with one of children word values
replaceOrPrefix(str drop 1)
} else AB()
}
if (str.isEmpty) return AB()
val c = str.head
val idx = binarySearch[MutableIndexNode, Char](children, c, _.word.head, _ - _)
if (idx < 0) {
tryTransformedSearch('\\u0000') //no char excluded
} else {
val r =
children(idx)
.fuzzySearch(str.drop(1), currentEditDistance, maxEditDistance, consumed + c, partial, calcMaxEditDist)
r ++ tryTransformedSearch(c) //exclude found char from further fuzzy search
}
}
def isEmpty = children == null || children.isEmpty
def nonEmpty = !isEmpty
def updateChildren(w: String, ref: Int, exact: Boolean): Unit = {
if (isEmpty) children = AB()
/* Strings are considered equal if they have common prefix */
val i = binarySearch[MutableIndexNode, String](children, w, _.word, _.head - _.head)
if (i < 0) {
children.insert(-(i + 1), new MutableIndexNode(w, Refs().add(ref, exact), null))
} else {
children(i).update(w, ref, exact)
}
}
/** Restores node from path */
def load(path: Vector[Int], word: String, refs: Refs): Unit = {
val idx = path.head
if (children == null) children = AB()
if (idx > children.size)
sys.error(s"Invalid index: $idx, children size: ${children.size}, cannot add node")
else if (idx == children.size) {
val n = new MutableIndexNode(null, null, null)
n.load(path.drop(1), word, refs)
children += n
} else {
children(idx).load(path.drop(1), word, refs)
}
}
/** Calls writer function while traversing index */
def write(writer: (Vector[Int], String, Refs) => Unit, path: Vector[Int]): Unit = {
if (children == null) return
children.zipWithIndex.foreach {
case (node, i) =>
val np = path.appended(i)
node.writeNode(writer, np)
node.write(writer, np)
}
}
/** Debuging info */
def nodeStatistics: NodeStats = {
if (isEmpty)
NodeStats(0, 0)
else
children.foldLeft(NodeStats(children.size, 0)) { (st, n) => st + n.nodeStatistics }
}
private[Index] def validateNodeWord(path: String): AB[(String, String, Int)] = {
if (isEmpty) AB() else children.flatMap(_.validateNodeWord(path))
}
private[Index] def validateIndex(path: String): AB[(String, AB[Int])] = {
if (isEmpty) AB() else children.flatMap(_.validateIndex(path))
}
/** Node word must be of one character length if it does not contains multiplier '*'.
* Returns tuple - (path, word, first address code) */
def invalidWords: AB[(String, String, Int)] = validateNodeWord("")
/** Address codes in node must be unique and in ascending order.
* Returns (invalid path|word, address codes) */
def invalidIndices: AB[(String, AB[Int])] = validateIndex("")
}
final class MutableIndex(_children: AB[MutableIndexNode],
var repeatedWordChildren: AB[MutableIndexBase])
extends MutableIndexBase(_children) {
/** Searches index down the tree */
def apply(str: String): Refs = {
val rep_w_idx = str.indexOf('*')
if (rep_w_idx != -1) { //repeated word found
//repeated words start with 2
val idx = str.substring(0, rep_w_idx).toInt - 2
if (idx < repeatedWordChildren.size) {
val ch = repeatedWordChildren(idx)
if (ch != null) ch.search(str.substring(rep_w_idx + 1)) else Refs()
} else Refs()
} else search(str)
}
/** Searches index down the tree in fuzzy mode */
def apply(str: String, maxEditDistance: Int, calcMaxEditDist: String => Int): AB[FuzzyResult] = {
def fuzzySearchInternal(node: MutableIndexBase, str: String): AB[FuzzyResult] = {
def reduceResults(res: AB[FuzzyResult]): AB[FuzzyResult] = {
val r =
res
.groupBy(_.word)
.map[FuzzyResult] { case (_, searchResults) =>
searchResults.minBy(_.editDistance)
}
AB.from(r)
}
def completePartial(pr: PartialFuzzyResult, depth: Int): AB[FuzzyResult] = {
if (pr.refs.isEmpty) return AB()
val npartialRes = MM[String, PartialFuzzyResult]()
val nr =
reduceResults(node.fuzzySearch(pr.rest, 0,
Math.min(maxEditDistance, calcMaxEditDist(pr.rest)), "", npartialRes, calcMaxEditDist)
)
val presMap = MM[String, FuzzyResult]()
npartialRes.foreach { case (_, npr) =>
val is = intersect(AB(pr.refs, npr.refs), 1024, null)
if (is.nonEmpty) {
val completedRes = completePartial(PartialFuzzyResult(pr.word + " " + npr.word, is,
pr.editDistance + npr.editDistance, npr.rest), depth + 1)
completedRes.foreach { fr =>
//select results with minimal edit distance
presMap.get(fr.word).map { case FuzzyResult(_, _, ed, _) =>
if (fr.editDistance < ed) presMap(fr.word) = fr
}.getOrElse(presMap.addOne((fr.word, fr)))
}
}
}
val pres = AB.from(presMap.values)
if (nr.nonEmpty)
nr.flatMap { fr =>
val is = intersect(AB(pr.refs, fr.refs), 1024, null)
if (is.nonEmpty)
AB(FuzzyResult(pr.word + " " + fr.word, is, pr.editDistance + fr.editDistance, depth))
else AB()
} ++ pres else pres
}
val partialRes = MM[String, PartialFuzzyResult]()
val r = node.fuzzySearch(str,
0, maxEditDistance, "", partialRes, calcMaxEditDist)
if (r.isEmpty && partialRes.nonEmpty) {
val res = AB[FuzzyResult]()
partialRes.foreach { case (_, pr) =>
res ++= completePartial(pr, 1)
}
res
//sort by edit distance asc first and then split depth desc and then reference count desc
.sortBy { fr => (fr.editDistance << 25) - (fr.splitDepth << 24) - fr.refs.length }
} else {
reduceResults(r)
//sort by edit distance asc first and then reference count desc
.sortBy(fr => (fr.editDistance << 24) - fr.refs.length)
}
}
val rep_w_idx = str.indexOf('*')
if (rep_w_idx != -1) { //repeated word found
//repeated words start with 2
val idx = str.substring(0, rep_w_idx).toInt - 2
if (idx < repeatedWordChildren.size) {
val ch = repeatedWordChildren(idx)
if (ch != null) {
val wc = s"${idx + 2}*"
fuzzySearchInternal(ch, str.substring(rep_w_idx + 1))
.map(fr => fr.copy(word = wc + fr.word))
} else AB()
} else AB()
} else {
fuzzySearchInternal(this, str)
}
}
override def updateChildren(w: String, ref: Int, exact: Boolean): Unit = {
if (isEmpty) {
children = AB()
repeatedWordChildren = AB()
}
val rep_w_idx = w.indexOf('*')
if (rep_w_idx != -1) {
// repeated words start with 2
val idx = w.substring(0, rep_w_idx).toInt - 2
val repWord = w.substring(rep_w_idx + 1)
if (repeatedWordChildren.size <= idx) repeatedWordChildren.padToInPlace(idx + 1, null)
if (repeatedWordChildren(idx) == null) {
val n = new MutableIndexBase(null)
n.updateChildren(repWord, ref, exact)
repeatedWordChildren(idx) = n
} else repeatedWordChildren(idx).updateChildren(repWord, ref, exact)
} else {
super.updateChildren(w, ref, exact)
}
}
override def load(path: Vector[Int], word: String, refs: Refs): Unit = {
val idx = path.head
val tail = path.tail
if (repeatedWordChildren == null) repeatedWordChildren = AB()
if (idx == -1) super.load(tail, word, refs)
else if (idx > repeatedWordChildren.size)
sys.error(s"Invalid index for repeated words: $idx, children size: ${
repeatedWordChildren.size
}, cannot add node")
else if (idx == repeatedWordChildren.size) {
val n = new MutableIndexBase(null)
n.load(tail, word, refs)
repeatedWordChildren += n
} else {
repeatedWordChildren(idx).load(tail, word, refs)
}
}
/** Calls writer function while traversing index */
def write(writer: (Vector[Int], String, Refs) => Unit): Unit = {
write(writer, Vector(-1))
repeatedWordChildren.zipWithIndex.foreach { case (node, i) => node.write(writer, Vector(i)) }
}
def statistics: IndexStats = {
val st = nodeStatistics
val repSt = repeatedWordChildren.map(_.nodeStatistics)
IndexStats(st, repSt)
}
}
final class MutableIndexNode(var word: String, var refs: Refs,
_children: AB[MutableIndexNode]) extends MutableIndexBase(_children) {
override def fuzzySearch(str: String,
currentEditDistance: Int,
maxEditDistance: Int,
consumed: String,
partial: MM[String, PartialFuzzyResult],
calcMaxEditDist: String => Int): AB[FuzzyResult] = {
if (str.isEmpty) {
//add exact refs to fuzzy result
(if (currentEditDistance > 0 && refs.exact.isEmpty) AB() else
AB(FuzzyResult(consumed, if (refs.exact.nonEmpty) refs.exact else refs.approx, currentEditDistance))) ++
//add children word values if current edit distance is less than max edit distance
(if (currentEditDistance < maxEditDistance && children != null) {
children.foldLeft(AB[FuzzyResult]()) { case (r, c) =>
r ++= c.fuzzySearch(str, currentEditDistance + 1,
maxEditDistance, consumed + c.word, partial, calcMaxEditDist)
}
} else AB())
} else {
if (refs.exact.nonEmpty) {
if (currentEditDistance <= calcMaxEditDist(consumed)) {
val key = consumed + " " + str
def partialEntry =
(key, PartialFuzzyResult(consumed, refs.exact, currentEditDistance + 1 /*space added*/ , str))
partial.get(key).map { pr =>
if (currentEditDistance < pr.editDistance) partial += partialEntry
}.getOrElse(partial += partialEntry)
}
if (children == null) {
val err = currentEditDistance + str.length
if (err <= maxEditDistance) AB(FuzzyResult(consumed, refs.exact, err))
else AB()
} else super.fuzzySearch(str, currentEditDistance, maxEditDistance, consumed, partial, calcMaxEditDist)
}
else if (children == null) AB()
else super.fuzzySearch(str, currentEditDistance, maxEditDistance, consumed, partial, calcMaxEditDist)
}
}
def update(w: String, ref: Int, exact: Boolean): Unit = {
val (commonPart, nodeRest, wordRest) = split(word, w)
if (nodeRest.isEmpty && wordRest.isEmpty) { //update node codes
refs.add(ref, exact)
} else {
if (nodeRest.nonEmpty) { //make common part as nodes word, move remaining part deeper
word = commonPart
val onlyRep = commonPart.endsWith("*")
val chRefs = if (onlyRep) refs.add(ref, exact) else refs
refs = if (onlyRep) null else Refs().add(ref, exact)
val nch = children
children = AB(new MutableIndexNode(nodeRest, chRefs, nch)) //move children and refs to new child node
}
if (wordRest.nonEmpty) { //update children with remaining part of new word
updateChildren(wordRest, ref, exact)
}
}
}
/** returns (common part from two args, rest from first arg, rest from second arg) */
def split(s1: String, s2: String): (String, String, String) = {
val equalCharCount = s1 zip s2 count (t => t._1 == t._2)
(s1.substring(0, equalCharCount), s1.substring(equalCharCount), s2.substring(equalCharCount))
}
override def load(path: Vector[Int], word: String, refs: Refs): Unit = {
if (path.isEmpty) {
this.word = word
this.refs = refs
} else {
super.load(path, word, refs)
}
}
def writeNode(writer: (Vector[Int], String, Refs) => Unit, path: Vector[Int]): Unit = {
writer(path, word, refs)
}
/** Debuging info */
override def nodeStatistics: NodeStats = Option(refs).map { r =>
NodeStats(0, r.exact.size + r.approx.size)
}.getOrElse(NodeStats(0, 0)) + super.nodeStatistics
override def validateNodeWord(path: String): AB[(String, String, Int)] = {
(if (word.length > 1 && !word.contains("*"))
AB((path, word, refs.exact.headOption.getOrElse(refs.approx.head)))
else AB()) ++
super.validateNodeWord(path + word)
}
override def validateIndex(path: String): AB[(String, AB[Int])] = {
val wrongCodes = AB[Int]()
def findDuplicates(arr: AB[Int]): Int = {
if (arr.isEmpty) return -1
arr.reduce { (prevCode, curCode) =>
if (prevCode >= curCode) wrongCodes += curCode
curCode
}
}
(if (refs != null) {
findDuplicates(refs.exact)
findDuplicates(refs.approx)
if (wrongCodes.nonEmpty) AB(s"$path|$word" -> wrongCodes) else AB()
} else AB()) ++ super.validateIndex(path + word)
}
}
def searchCodes(words: AB[String],
index: MutableIndex,
calcMaxEditDist: String => Int)(limit: Int,
filter: Int => Boolean = null): AB[Result] = {
def searchParams(words: AB[String]) = {
val ws =
wordStatForSearch(words)
.groupBy(_._1)
.map[(String, Int)] { case (_, s) => s.sortBy(-_._2).head }
.map { case (w, c) => if (c == 1) w else s"$c*$w" }
AB.from(ws)
}
def search_idx(word: String) = index(word)
def search_idx_fuzzy(word: String) = {
index(word, calcMaxEditDist(word), calcMaxEditDist)
}
def exactSearch(p: AB[String], editDistance: Int, exactMatchWords: Set[String]): AB[Result] = {
val result = p.map(w => search_idx(w) -> exactMatchWords(w))
.map {
case (r, true) => AB(r.exact)
case (r, false) => AB(r.exact, r.approx).filter(_.nonEmpty)
}
var refCount = 0
val intersection = AB[Int]()
val combInit = (AB.fill[AB[Int]](result.size)(null), 0) //(refs, idx)
foldCombinations[AB[Int], (AB[AB[Int]], Int), AB[Int]](result,
combInit,
(cr, d) => {
cr._1(cr._2) = d
(cr._1, cr._2 + 1)
},
intersection,
(r, cr) => {
val intr = intersect(cr._1, limit, filter)
r ++= intr
refCount += intr.length
(r, refCount < limit)
}
)
intersection match {
case a if a.isEmpty => AB()
case a => AB(Result(p.mkString(" "), if (a.size > limit) a.take(limit) else a, editDistance))
}
}
def exactSearchWithMerge(params: AB[String]): AB[Result] = {
if (params.isEmpty) AB()
else {
var count = 0
var result = AB[Result]()
binCombinations(params.size - 1, spaces => {
var i = 1
var j = 0
var editDistance = 0
val ab = AB[String](params(0))
spaces foreach { s =>
if (s == 0) { //separate word
ab += params(i)
j += 1
} else { //merge words
ab(j) = ab(j) + params(i)
editDistance += 1
}
i += 1
}
result = exactSearch(searchParams(ab), editDistance, Set())
count += 1
result.isEmpty && count < 32
})
result
}
}
exactSearchWithMerge(words) match {
case a if a.nonEmpty => a
case _ => // fuzzy search
val params = searchParams(words)
//reset ref count
var refCount = 0
val fuzzyRes = params map search_idx_fuzzy
var productiveIntersectionCount = 0
var intersectionCount = 0
val MaxProductiveIntersectionCount = 32
val MaxIntersectionCount = 1024
foldCombinations[FuzzyResult, AB[FuzzyResult], AB[Result]](
fuzzyRes,
AB(),
(frs, fr) => frs += fr,
AB[Result](),
(r, frs) => {
val res = exactSearch(
searchParams(frs
.flatMap(fr => if (fr.splitDepth == 0) AB(fr.word) else AB.from(fr.word.split(' ')))),
frs.foldLeft(0)((ed, fr) => ed + fr.editDistance),
frs.collect { case fr if fr.editDistance > 0 => fr.word }.toSet
)
if (res.nonEmpty) {
r ++= res
refCount += res.foldLeft(0)(_ + _.refs.length)
productiveIntersectionCount += 1
}
intersectionCount += 1
if (intersectionCount >= MaxIntersectionCount && productiveIntersectionCount == 0)
logger.debug(s"A LOT OF FUZZY RESULTS: ${fuzzyRes.map(_.size).mkString("(", ",", ")")}\\n ${
fuzzyRes.map(_.map(fr => fr.word -> fr.editDistance)
.mkString("(", ",", ")")).mkString("[", ",", "]")
}")
(r, refCount < limit &&
intersectionCount < MaxIntersectionCount &&
productiveIntersectionCount < MaxProductiveIntersectionCount
)
}
)
}
}
val accents = "ēūīāšģķļžčņ" zip "euiasgklzcn" toMap
def unaccent(str: String) = str
.toLowerCase
.foldLeft(new scala.collection.mutable.StringBuilder(str.length))(
(b, c) => b.append(accents.getOrElse(c, c)))
.toString
def isWhitespaceOrSeparator(c: Char) = c.isWhitespace || "-,/.\\"'\\n".contains(c)
//better performance, whitespaces are eliminated in the same run as unaccent operation
def normalize(str: String) = str
.toLowerCase
.foldLeft(AB[scala.collection.mutable.StringBuilder]() -> true) {
case ((s, b), c) =>
if (isWhitespaceOrSeparator(c)) (s, true) else {
if (b) s.append(new scala.collection.mutable.StringBuilder)
s.last.append(accents.getOrElse(c, c))
(s, false)
}
}._1
.map(_.toString)
def wordStatForSearch(words: AB[String]): AB[(String, Int)] = {
val stats = AB.fill(words.size)(1)
0 until words.size foreach { i =>
(i + 1) until words.size foreach { j =>
if (words(j).length > words(i).length) {
if (words(j) startsWith words(i)) stats(i) += 1
} else if (words(i) startsWith words(j)) stats(j) += 1
}
}
words zip stats
}
def wordStatForIndex(phrase: String) = normalize(phrase)
.foldLeft(Map[String, Int]())((stat, w) =>
(0 until w.length)
.map(w.dropRight(_))
.foldLeft(stat)((stat, w) => stat + (w -> stat.get(w).map(_ + 1).getOrElse(1))))
def extractWords(phrase: String) = wordStatForIndex(phrase)
.flatMap(t => List(t._1) ++ (2 to t._2).map(s => s"$s*${t._1}"))
def editDistance(s1: String, s2: String): Int = {
val lx = s1.length + 1
val ly = s2.length + 1
if (lx > 1 && ly > 1) {
val vals = new Array[Int](lx)
var x = 0
while (x < lx) {
vals(x) = x
x += 1
}
x = 1
var y = 1
var dist = 0
while (y < ly) {
var xm1 = y
while (x < lx) {
val dxy = vals(x - 1) + (if (s1.charAt(x - 1) == s2.charAt(y - 1)) 0 else 1)
val d = Math.min(xm1 + 1, Math.min(vals(x) + 1, dxy))
vals(x - 1) = xm1
xm1 = d
x += 1
}
x = 1
y += 1
dist = xm1
}
dist
} else Math.max(lx - 1, ly - 1)
}
def binarySearch[T, K](arr: AB[T], key: K, keyFunc: T => K, comparator: (K, K) => Int): Int = {
binarySearchFromUntil(arr, 0, arr.length, key, keyFunc, comparator)
}
def binarySearchFromUntil[T, K](arr: AB[T], fromIdx: Int, toIdx: Int,
key: K, keyFunc: T => K, comparator: (K, K) => Int): Int = {
var from = fromIdx
var to = toIdx - 1
while (from <= to) {
val i = from + to >>> 1
val r = comparator(keyFunc(arr(i)), key)
if (r < 0) from = i + 1 else if (r > 0) to = i - 1 else return i
}
-(from + 1)
}
def intersect(idx: AB[AB[Int]], limit: Int, filter: Int => Boolean): AB[Int] = {
val result = AB[Int]()
val pos = Array.fill(idx.length)(0)
def check_register = {
val v = idx(0)(pos(0))
val l = pos.length
var i = 1
while (i < l && v == idx(i)(pos(i))) i += 1
if (i == l) {
if (filter == null || filter(v)) result append v
i = 0
while (i < l) {
pos(i) += 1
i += 1
}
}
}
def find_equal(a_pos: Int, b_pos: Int) = {
def search(arr: AB[Int], from: Int, until: Int, code: Int) = {
val i = binarySearchFromUntil[Int, Int](arr, from, until, code, identity _, _ - _)
if (i < 0) -(i + 1) else i
}
val a: AB[Int] = idx(a_pos)
val b: AB[Int] = idx(b_pos)
val al = a.length
val bl = b.length
var ai = pos(a_pos)
var bi = pos(b_pos)
while (ai < al && bi < bl && a(ai) != b(bi))
if (a(ai) < b(bi)) ai = search(a, ai + 1, al, b(bi))
else bi = search(b, bi + 1, bl, a(ai))
pos(a_pos) = ai
pos(b_pos) = bi
}
def continue = {
var i = 0
val l = pos.length
while (i < l && pos(i) < idx(i).length) i += 1
i == l
}
while (result.length < limit && continue) {
check_register
var i = 0
val l = pos.length - 1
while (i < l) {
find_equal(i, i + 1)
i += 1
}
}
result
}
def hasIntersection(idx: AB[AB[Int]]): Boolean = {
val pos = Array.fill(idx.length)(0)
var hasCommon = false
def checkEquals = {
val v = idx(0)(pos(0))
val l = pos.length
var i = 1
while (i < l && v == idx(i)(pos(i))) i += 1
hasCommon = i == l
hasCommon
}
def find_equal(a_pos: Int, b_pos: Int) = {
def search(arr: AB[Int], from: Int, until: Int, code: Int) = {
val i = binarySearchFromUntil[Int, Int](arr, from, until, code, identity _, _ - _)
if (i < 0) -(i + 1) else i
}
val a: AB[Int] = idx(a_pos)
val b: AB[Int] = idx(b_pos)
val al = a.length
val bl = b.length
var ai = pos(a_pos)
var bi = pos(b_pos)
while (ai < al && bi < bl && a(ai) != b(bi))
if (a(ai) < b(bi)) ai = search(a, ai + 1, al, b(bi))
else bi = search(b, bi + 1, bl, a(ai))
pos(a_pos) = ai
pos(b_pos) = bi
}
def continue = {
var i = 0
val l = pos.length
while (i < l && pos(i) < idx(i).length) i += 1
i == l && !checkEquals
}
while (continue) {
var i = 0
val l = pos.length - 1
while (i < l) {
find_equal(i, i + 1)
i += 1
}
}
hasCommon
}
/** Merge ordered collections removing duplicates */
def merge(arrs: AB[AB[Int]]): AB[Int] = {
def merge(r1: AB[Int], r2: AB[Int]) = {
var i1 = 0
var i2 = 0
val l1 = r1.length
val l2 = r2.length
val res = new AB[Int](Math.max(l1, l2))
var prevCode = -1
while (i1 < l1 && i2 < l2) {
val c1 = r1(i1)
val c2 = r2(i2)
if (c1 < c2) {
if (prevCode < c1) {
res += r1(i1)
prevCode = c1
}
i1 += 1
} else if (c1 > c2) {
if (prevCode < c2) {
res += r2(i2)
prevCode = c2
}
i2 += 1
} else {
if (prevCode < c1) {
res += r1(i1)
prevCode = c1
}
i1 += 1
i2 += 1
}
}
def addDistinct(a: AB[Int], start: Int, l: Int) = {
var i = start
while (i < l) {
val c = a(i)
if (prevCode < c) {
res += a(i)
prevCode = c
}
i += 1
}
}
if (i1 < l1) addDistinct(r1, i1, l1)
else if (i2 < l2) addDistinct(r2, i2, l2)
res
}
if (arrs.isEmpty) AB()
else arrs.reduce(merge)
}
/** Removes duplicates from collections going forward i.e. from collection at index 1 are removed elements
* contained by collection at index 0, and so on.
* Collections are ordered */
def pruneRight(arrs: AB[AB[Int]]): AB[AB[Int]] = {
def prune(l: AB[Int], r: AB[Int]): AB[Int] = {
if (l.isEmpty && r.isEmpty) return r
var li = 0
var ri = 0
val ll = l.length
while (li < ll && ri < r.length) {
if (l(li) < r(ri)) li += 1
else if (l(li) > r(ri)) ri += 1
else {
r.remove(ri)
li += 1
}
}
r
}
if (arrs.length < 2) return arrs
var i = 0
val l = arrs.length
while (i < l) {
var j = i + 1
while (j < l) {
arrs(j) = prune(arrs(i), arrs(j))
j += 1
}
i += 1
}
arrs
}
def foldCombinations[A, B, C](data: AB[AB[A]],
combInit: => B,
combFun: (B, A) => B,
init: C,
folder: (C, B) => (C, Boolean)): C = {
if (data.exists(_.isEmpty)) return init
data.sortInPlaceBy(_.size)
val count = data.size
var res = init
val limits = Array.tabulate(count)(data(_).size - 1)
val max_sum = limits.sum
var s = 0
var continue = true
while(s <= max_sum && continue) {
val it = new PermutationsItrOverSumEls(count, s, limits)
while (continue && it.hasNext) {
val idxs = it.next()
var combRes = combInit
var i = 0
while (i < count) {
combRes = combFun(combRes, data(i)(idxs(i)))
i += 1
}
val (fr, cont) = folder(res, combRes)
res = fr
continue = cont
}
s += 1
}
res
}
class PermutationsItrOverSumEls(el_count: Int, sum: Int,
limits: Array[Int] /** NOTE: limits must be sorted! */)
extends AbstractIterator[Array[Int]] {
private val elms = new Array[Int](el_count) //y
private val cur_l = new Array[Int](el_count) //l
private val cur_s = new Array[Int](el_count) //s
private val result = new Array[Int](el_count) //res
private val permutator = new PermutationsItr(el_count, limits)
private var _hasNext = el_count > 0 && init
private def init: Boolean = {
var i = el_count - 1
var s = sum
cur_l(i) = Math.min(limits(i), s)
while (i >= 0) {
cur_s(i) = s
if (i == 0) {
elms(0) = s
cur_l(0) = limits(0)
} else {
elms(i) = Math.min(s, cur_l(i))
val in = i - 1
s = s - elms(i)
cur_l(in) = Math.min(limits(in), elms(i))
}
i -= 1
}
s <= cur_l(0)
}
private def cont_adjust(idx: Int): Boolean = {
var i = idx
elms(i) -= 1
i -= 1
while (i >= 0) {
val ip = i + 1
cur_s(i) = cur_s(ip) - elms(ip)
cur_l(i) = Math.min(limits(i), elms(ip))
elms(i) = Math.min(cur_s(i), cur_l(i))
i -= 1
}
cur_s(0) > cur_l(0)
}
private def calc_next: Boolean = {
var i = 1
while (i < el_count && cont_adjust(i)) i += 1
i < el_count
}
def hasNext = _hasNext || permutator.hasNext
@throws[NoSuchElementException]
def next(): Array[Int] = {
if (!hasNext)
Iterator.empty.next()
if (permutator.hasNext) permutator.next()
else {
System.arraycopy(elms, 0, result, 0, el_count)
permutator.init(result)
permutator.next()
_hasNext = calc_next
result
}
}
//recursive implementation of element combinations generation for sum
// private def se(c: Int, s: Int, l: List[Int]): (List[List[Int]], Int) = {
// if (c == 0) (Nil, 0)
// else if (c == 1) {
// if (s <= l.head) (List(List(s)), s)
// else (Nil, 0)
// } else {
// def r(y: Int, res: List[List[Int]]): (List[List[Int]], Int) = {
// se(c - 1, s - y, Math.min(l.tail.head, y) :: l.tail.tail) match {
// case (x, z) if x.nonEmpty && y >= z => r(y - 1, res ::: x.map(y :: _))
// case _ => (res, y)
// }
// }
// r(Math.min(s, l.head), Nil)
// }
// }
//ported from scala standard library
class PermutationsItr(size: Int, limits: Array[Int]) extends AbstractIterator[Array[Int]] {
private[this] var result: Array[Int] = _
private[this] val elms: Array[Int] = new Array[Int](size)
private[this] var _hasNext = false
def hasNext = _hasNext
@throws[NoSuchElementException]
def next(): Array[Int] = {
if (!hasNext)
Iterator.empty.next()
System.arraycopy(elms, 0, result, 0, size)
var i = elms.length - 2
while(i >= 0 && elms(i) >= elms(i + 1)) i -= 1
if (i < 0)
_hasNext = false
else {
var j = elms.length - 1
while(elms(j) <= elms(i)) j -= 1
if (elms(j) > limits(i)) { // check limit
j = i - 1
while (j >= 0 && (elms(j + 1) > limits(j) || elms(j + 1) <= elms(j))) j -= 1
if (j < 0) _hasNext = false
else swap(j, j + 1)
} else swap(i, j) // limit ok
val len = (elms.length - i) / 2
var k = 1
while (k <= len) {
swap(i + k, elms.length - k)
k += 1
}
}
result
}
private def swap(i: Int, j: Int): Unit = {
val tmpE = elms(i)
elms(i) = elms(j)
elms(j) = tmpE
}
def init(res: Array[Int]) = {
// NOTE: res must be sorted!
result = res
_hasNext = true
//val m = scala.collection.mutable.HashMap[Int, Int]()
//idxs = result map (m.getOrElseUpdate(_, m.size))
//util.Arrays.sort(idxs)
System.arraycopy(res, 0, elms, 0, size)
}
}
}
def binCombinations(n: Int, f: Array[Int] => Boolean): Unit = {
val a = new Array[Int](n)
var b = true
def go(d: Int): Unit = {
if (d == 0) b = f(a.clone) else {
var i = 0
while (i <= 1 && b) {
a(n - d) = i
go(d - 1)
i += 1
}
}
}
go(n)
}
}
| mrumkovskis/addresses | indexer/src/main/scala/index/Index.scala | Scala | mit | 34,452 |
package blended.prickle.akka.http
import akka.http.scaladsl.marshalling.{Marshaller, ToEntityMarshaller}
import akka.http.scaladsl.model.{ContentTypeRange, MediaTypes}
import akka.http.scaladsl.unmarshalling.{FromEntityUnmarshaller, Unmarshaller}
import blended.util.logging.Logger
import microjson.JsValue
import prickle._
trait PrickleSupport {
private[this] val log = Logger[PrickleSupport]
val prickleMediaType = MediaTypes.`application/json`
implicit def toEntityMarshaller[T](implicit p : Pickler[T], config : PConfig[JsValue]) : ToEntityMarshaller[T] = {
// Marshaller.stringMarshaller(prickleMediaType) {
// Marshaller.charArrayMarshaller(prickleMediaType).wrap(prickleMediaType) {
Marshaller.StringMarshaller.wrap(prickleMediaType) {
in : T =>
log.debug(s"About to pickle: ${in}")
val pickled = Pickle.intoString(in)
log.debug(s"pickled: ${pickled}")
pickled
}
}
implicit def fromEntityUnmarshaller[T](implicit u : Unpickler[T], config : PConfig[JsValue]) : FromEntityUnmarshaller[T] = {
Unmarshaller.stringUnmarshaller.forContentTypes(ContentTypeRange(prickleMediaType.toContentType)).map {
jsonString : String =>
// Unmarshaller.charArrayUnmarshaller.forContentTypes(ContentTypeRange(prickleMediaType.toContentType)).map {
// in: Array[Char] =>
// val jsonString = String.valueOf(in)
log.debug(s"About to unpickle from json string: ${jsonString}")
val unpickled = Unpickle[T].fromString(jsonString)
log.debug(s"unpickled: ${unpickled}")
unpickled.get
}
}
}
object PrickleSupport extends PrickleSupport
| woq-blended/blended | blended.prickle.akka.http/src/main/scala/blended/prickle/akka/http/PrickleSupport.scala | Scala | apache-2.0 | 1,679 |
package com.mesosphere.universe.v3.model
import com.mesosphere.Generators.nonNegNum
import org.scalacheck.Gen
import org.scalatest.FreeSpec
import org.scalatest.prop.PropertyChecks
final class PackageDefinitionSpec extends FreeSpec with PropertyChecks {
"PackageDefinition$.ReleaseVersion" - {
"ReleaseVersion$.validate should" - {
"succeed on non-negative numbers" in {
forAll (nonNegNum[Long]) { n =>
whenever (n >= 0) {
assert(ReleaseVersion.validate(n).isReturn)
}
}
}
"fail on negative numbers" in {
forAll (Gen.negNum[Long]) { n =>
whenever (n < 0) {
assert(ReleaseVersion.validate(n).isThrow)
}
}
}
}
"ReleaseVersion.value" in {
forAll (nonNegNum[Long]) { n =>
assertResult(n)(ReleaseVersion(n).value)
}
}
"ReleaseVersion$.ordering orders by value" in {
forAll (nonNegNum[Long], nonNegNum[Long]) { (a, b) =>
whenever (a >= 0 && b >= 0) {
val aVersion = ReleaseVersion(a)
val bVersion = ReleaseVersion(b)
assertResult(Ordering[Long].compare(a, b)) {
Ordering[ReleaseVersion].compare(aVersion, bVersion)
}
}
}
}
}
}
| dcos/cosmos | cosmos-test-common/src/test/scala/com/mesosphere/universe/v3/model/PackageDefinitionSpec.scala | Scala | apache-2.0 | 1,281 |
package com.technophobia.substeps.intellij.parser
import com.intellij.lang.{ASTNode, PsiBuilder, PsiParser}
import com.intellij.psi.tree.IElementType
import com.technophobia.substeps.intellij.lexer._
import scala.util.parsing.combinator.Parsers
import com.technophobia.substeps.nodes._
import com.technophobia.substeps.nodes.BasicScenario
import scala.Some
import com.technophobia.substeps.nodes.UnresolvedSubstepUsage
import com.technophobia.substeps.nodes.Feature
import scala.util.parsing.input.{Position, Reader}
class FeaturePsiParser extends PsiParser {
def parse(root: IElementType, builder: PsiBuilder): ASTNode = {
val start = builder.mark()
new FeatureParser().apply(builder)
start.done(root)
builder.getTreeBuilt
}
}
class FeatureParser extends Parsers {
override type Elem = (IElementType, String)
override type Input = Reader[Elem]
def apply(in: PsiBuilder) {
featureFile.apply(PsiBuilderImmutableAdapter(in))
}
implicit class ElementParser(tokenToMatch: IElementType) extends Parser[String] {
def apply(in: Reader[Elem]): ParseResult[String] = {
def createSuccess = {
in match {
case psiReader: PsiBuilderImmutableAdapter => {
val marker = psiReader.mark()
val success = Success(in.first._2, in.rest)
marker.done(tokenToMatch)
success
}
case _ => throw new RuntimeException("Wrong Reader Type")
}
}
val (token, tokenText) = in.first
token match {
case x if x == tokenToMatch => createSuccess
case _ => {
print(s"Parsing failed at ${tokenText}, expected ${tokenToMatch}")
Failure(s"Expected ${tokenToMatch} but found ${token}", in)
}
}
}
}
implicit class MarkingParser(wrapped: Parser[String]) extends Parser[String] {
def apply(in: FeatureParser.this.type#Input): FeatureParser.this.type#ParseResult[String] = {
val psiBuilder = in.asInstanceOf[PsiBuilderImmutableAdapter]
val mark = psiBuilder.mark()
val result = wrapped(in)
result match {
case Success(_,_) => mark.done(FeatureNameElement)
case _ => mark.rollbackTo()
}
result
}
}
private def featureFile: Parser[Feature] = opt(tagDef <~ rep1(EolElement)) ~ (featureDef <~ rep1(EolElement)) ~ (rep(scenario) <~ rep(EolElement)) ^^ {
case (Some(tags) ~ featureName ~ scenarios) => Feature(featureName, tags, scenarios)
case (None ~ featureName ~ scenarios) => Feature(featureName, Nil, scenarios)
}
private def tagDef: Parser[List[String]] = TagsMarkerElement ~> rep(tag)
private def tag: Parser[String] = TextElement
private def featureDef: MarkingParser = FeatureMarkerElement ~> rep(TextElement) ^^ (x => x.mkString(" "))
private def scenario: Parser[BasicScenario] = (opt(tagDef <~ rep1(EolElement)) ~ scenarioDef <~ rep1(EolElement)) ~ rep1sep(substepUsage, EolElement) <~ rep(EolElement) ^^ {
case (Some(tags) ~ scenarioName ~ substeps) => BasicScenario(scenarioName, tags, substeps)
case (None ~ scenarioName ~ substeps) => BasicScenario(scenarioName, Nil, substeps)
}
def substepUsage: Parser[SubstepUsage] = rep(TextElement) ^^ ((x) => UnresolvedSubstepUsage(x.mkString(" ")))
private def scenarioDef: Parser[String] = ScenarioMarkerElement ~> rep(TextElement) ^^ (x => x.mkString(" "))
case class PsiBuilderImmutableAdapter(builder: PsiBuilder) extends Reader[FeatureParser.this.Elem] {
val first = (builder.getTokenType, builder.getTokenText)
val advanced = false
def mark() = builder.mark()
val pos: Position = new Position {
val column: Int = builder.getCurrentOffset
val line: Int = 0
protected val lineContents = builder.getOriginalText.toString
}
val atEnd: Boolean = first == null
def rest: Input = {
if(advanced) throw new RuntimeException("Builder already advanced")
builder.advanceLexer()
PsiBuilderImmutableAdapter(builder)
}
}
}
| rickybarefield/Substeps-IDEA-Plugin | src/main/scala/com/technophobia/substeps/intellij/parser/FeaturePsiParser.scala | Scala | mit | 4,072 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io._
import java.nio.charset.StandardCharsets
import java.sql.Timestamp
import java.util.Date
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.Promise
import scala.concurrent.duration._
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.apache.spark.SparkFunSuite
import org.apache.spark.internal.Logging
import org.apache.spark.sql.hive.test.HiveTestJars
import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.test.ProcessTestUtils.ProcessOutputCapturer
import org.apache.spark.util.{ThreadUtils, Utils}
/**
* A test suite for the `spark-sql` CLI tool.
*/
class CliSuite extends SparkFunSuite with BeforeAndAfterAll with BeforeAndAfterEach with Logging {
val warehousePath = Utils.createTempDir()
val metastorePath = Utils.createTempDir()
val scratchDirPath = Utils.createTempDir()
val sparkWareHouseDir = Utils.createTempDir()
override def beforeAll(): Unit = {
super.beforeAll()
warehousePath.delete()
metastorePath.delete()
scratchDirPath.delete()
}
override def afterAll(): Unit = {
try {
Utils.deleteRecursively(warehousePath)
Utils.deleteRecursively(metastorePath)
Utils.deleteRecursively(scratchDirPath)
} finally {
super.afterAll()
}
}
override def afterEach(): Unit = {
// Only running `runCliWithin` in a single test case will share the same temporary
// Hive metastore
Utils.deleteRecursively(metastorePath)
}
/**
* Run a CLI operation and expect all the queries and expected answers to be returned.
*
* @param timeout maximum time for the commands to complete
* @param extraArgs any extra arguments
* @param errorResponses a sequence of strings whose presence in the stdout of the forked process
* is taken as an immediate error condition. That is: if a line containing
* with one of these strings is found, fail the test immediately.
* The default value is `Seq("Error:")`
* @param queriesAndExpectedAnswers one or more tuples of query + answer
*/
def runCliWithin(
timeout: FiniteDuration,
extraArgs: Seq[String] = Seq.empty,
errorResponses: Seq[String] = Seq("Error:"),
maybeWarehouse: Option[File] = Some(warehousePath),
useExternalHiveFile: Boolean = false)(
queriesAndExpectedAnswers: (String, String)*): Unit = {
val (queries, expectedAnswers) = queriesAndExpectedAnswers.unzip
// Explicitly adds ENTER for each statement to make sure they are actually entered into the CLI.
val queriesString = queries.map(_ + "\n").mkString
val extraHive = if (useExternalHiveFile) {
s"--driver-class-path ${System.getProperty("user.dir")}/src/test/noclasspath"
} else {
""
}
val warehouseConf =
maybeWarehouse.map(dir => s"--hiveconf ${ConfVars.METASTOREWAREHOUSE}=$dir").getOrElse("")
val command = {
val cliScript = "../../bin/spark-sql".split("/").mkString(File.separator)
val jdbcUrl = s"jdbc:derby:;databaseName=$metastorePath;create=true"
s"""$cliScript
| --master local
| --driver-java-options -Dderby.system.durability=test
| $extraHive
| --conf spark.ui.enabled=false
| --hiveconf ${ConfVars.METASTORECONNECTURLKEY}=$jdbcUrl
| --hiveconf ${ConfVars.SCRATCHDIR}=$scratchDirPath
| --hiveconf conf1=conftest
| --hiveconf conf2=1
| $warehouseConf
""".stripMargin.split("\\s+").toSeq ++ extraArgs
}
var next = 0
val foundAllExpectedAnswers = Promise.apply[Unit]()
val buffer = new ArrayBuffer[String]()
val lock = new Object
def captureOutput(source: String)(line: String): Unit = lock.synchronized {
// This test suite sometimes gets extremely slow out of unknown reason on Jenkins. Here we
// add a timestamp to provide more diagnosis information.
buffer += s"${new Timestamp(new Date().getTime)} - $source> $line"
// If we haven't found all expected answers and another expected answer comes up...
if (next < expectedAnswers.size && line.contains(expectedAnswers(next))) {
next += 1
// If all expected answers have been found...
if (next == expectedAnswers.size) {
foundAllExpectedAnswers.trySuccess(())
}
} else {
errorResponses.foreach { r =>
if (line.contains(r)) {
foundAllExpectedAnswers.tryFailure(
new RuntimeException(s"Failed with error line '$line'"))
}
}
}
}
val process = new ProcessBuilder(command: _*).start()
val stdinWriter = new OutputStreamWriter(process.getOutputStream, StandardCharsets.UTF_8)
stdinWriter.write(queriesString)
stdinWriter.flush()
stdinWriter.close()
new ProcessOutputCapturer(process.getInputStream, captureOutput("stdout")).start()
new ProcessOutputCapturer(process.getErrorStream, captureOutput("stderr")).start()
try {
ThreadUtils.awaitResult(foundAllExpectedAnswers.future, timeout)
} catch { case cause: Throwable =>
val message =
s"""
|=======================
|CliSuite failure output
|=======================
|Spark SQL CLI command line: ${command.mkString(" ")}
|Exception: $cause
|Executed query $next "${queries(next)}",
|But failed to capture expected output "${expectedAnswers(next)}" within $timeout.
|
|${buffer.mkString("\n")}
|===========================
|End CliSuite failure output
|===========================
""".stripMargin
logError(message, cause)
fail(message, cause)
} finally {
process.destroy()
}
}
test("load warehouse dir from hive-site.xml") {
runCliWithin(1.minute, maybeWarehouse = None, useExternalHiveFile = true)(
"desc database default;" -> "hive_one",
"set spark.sql.warehouse.dir;" -> "hive_one")
}
test("load warehouse dir from --hiveconf") {
// --hiveconf will overrides hive-site.xml
runCliWithin(2.minute, useExternalHiveFile = true)(
"desc database default;" -> warehousePath.getAbsolutePath,
"create database cliTestDb;" -> "",
"desc database cliTestDb;" -> warehousePath.getAbsolutePath,
"set spark.sql.warehouse.dir;" -> warehousePath.getAbsolutePath)
}
test("load warehouse dir from --conf spark(.hadoop).hive.*") {
// override conf from hive-site.xml
runCliWithin(
2.minute,
extraArgs = Seq("--conf", s"spark.hadoop.${ConfVars.METASTOREWAREHOUSE}=$sparkWareHouseDir"),
maybeWarehouse = None,
useExternalHiveFile = true)(
"desc database default;" -> sparkWareHouseDir.getAbsolutePath,
"create database cliTestDb;" -> "",
"desc database cliTestDb;" -> sparkWareHouseDir.getAbsolutePath,
"set spark.sql.warehouse.dir;" -> sparkWareHouseDir.getAbsolutePath)
// override conf from --hiveconf too
runCliWithin(
2.minute,
extraArgs = Seq("--conf", s"spark.${ConfVars.METASTOREWAREHOUSE}=$sparkWareHouseDir"))(
"desc database default;" -> sparkWareHouseDir.getAbsolutePath,
"create database cliTestDb;" -> "",
"desc database cliTestDb;" -> sparkWareHouseDir.getAbsolutePath,
"set spark.sql.warehouse.dir;" -> sparkWareHouseDir.getAbsolutePath)
}
test("load warehouse dir from spark.sql.warehouse.dir") {
// spark.sql.warehouse.dir overrides all hive ones
runCliWithin(
2.minute,
extraArgs =
Seq("--conf",
s"${StaticSQLConf.WAREHOUSE_PATH.key}=${sparkWareHouseDir}1",
"--conf", s"spark.hadoop.${ConfVars.METASTOREWAREHOUSE}=${sparkWareHouseDir}2"))(
"desc database default;" -> sparkWareHouseDir.getAbsolutePath.concat("1"))
}
test("Simple commands") {
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/small_kv.txt")
runCliWithin(3.minute)(
"CREATE TABLE hive_test(key INT, val STRING) USING hive;"
-> "",
"SHOW TABLES;"
-> "hive_test",
s"LOAD DATA LOCAL INPATH '$dataFilePath' OVERWRITE INTO TABLE hive_test;"
-> "",
"CACHE TABLE hive_test;"
-> "",
"SELECT COUNT(*) FROM hive_test;"
-> "5",
"DROP TABLE hive_test;"
-> ""
)
}
test("Single command with -e") {
runCliWithin(2.minute, Seq("-e", "SHOW DATABASES;"))("" -> "")
}
test("Single command with --database") {
runCliWithin(2.minute)(
"CREATE DATABASE hive_test_db;"
-> "",
"USE hive_test_db;"
-> "",
"CREATE TABLE hive_test(key INT, val STRING);"
-> "",
"SHOW TABLES;"
-> "hive_test"
)
runCliWithin(2.minute, Seq("--database", "hive_test_db", "-e", "SHOW TABLES;"))(
"" -> "hive_test"
)
}
test("Commands using SerDe provided in --jars") {
val jarFile = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/small_kv.txt")
runCliWithin(3.minute, Seq("--jars", s"$jarFile"))(
"""CREATE TABLE t1(key string, val string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe';
""".stripMargin
-> "",
"CREATE TABLE sourceTable (key INT, val STRING) USING hive;"
-> "",
s"LOAD DATA LOCAL INPATH '$dataFilePath' OVERWRITE INTO TABLE sourceTable;"
-> "",
"INSERT INTO TABLE t1 SELECT key, val FROM sourceTable;"
-> "",
"SELECT collect_list(array(val)) FROM t1;"
-> """[["val_238"],["val_86"],["val_311"],["val_27"],["val_165"]]""",
"DROP TABLE t1;"
-> "",
"DROP TABLE sourceTable;"
-> ""
)
}
test("SPARK-29022: Commands using SerDe provided in --hive.aux.jars.path") {
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/small_kv.txt")
val hiveContribJar = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
runCliWithin(
3.minute,
Seq("--conf", s"spark.hadoop.${ConfVars.HIVEAUXJARS}=$hiveContribJar"))(
"""CREATE TABLE addJarWithHiveAux(key string, val string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe';
""".stripMargin
-> "",
"CREATE TABLE sourceTableForWithHiveAux (key INT, val STRING) USING hive;"
-> "",
s"LOAD DATA LOCAL INPATH '$dataFilePath' OVERWRITE INTO TABLE sourceTableForWithHiveAux;"
-> "",
"INSERT INTO TABLE addJarWithHiveAux SELECT key, val FROM sourceTableForWithHiveAux;"
-> "",
"SELECT collect_list(array(val)) FROM addJarWithHiveAux;"
-> """[["val_238"],["val_86"],["val_311"],["val_27"],["val_165"]]""",
"DROP TABLE addJarWithHiveAux;"
-> "",
"DROP TABLE sourceTableForWithHiveAux;"
-> ""
)
}
test("SPARK-11188 Analysis error reporting") {
runCliWithin(timeout = 2.minute,
errorResponses = Seq("AnalysisException"))(
"select * from nonexistent_table;"
-> "Error in query: Table or view not found: nonexistent_table;"
)
}
test("SPARK-11624 Spark SQL CLI should set sessionState only once") {
runCliWithin(2.minute, Seq("-e", "!echo \"This is a test for Spark-11624\";"))(
"" -> "This is a test for Spark-11624")
}
test("list jars") {
val jarFile = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar")
runCliWithin(2.minute)(
s"ADD JAR $jarFile;" -> "",
s"LIST JARS;" -> "TestUDTF.jar"
)
}
test("list jar <jarfile>") {
val jarFile = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar")
runCliWithin(2.minute)(
s"ADD JAR $jarFile;" -> "",
s"List JAR $jarFile;" -> "TestUDTF.jar"
)
}
test("list files") {
val dataFilePath = Thread.currentThread().
getContextClassLoader.getResource("data/files/small_kv.txt")
runCliWithin(2.minute)(
s"ADD FILE $dataFilePath;" -> "",
s"LIST FILES;" -> "small_kv.txt"
)
}
test("list file <filepath>") {
val dataFilePath = Thread.currentThread().
getContextClassLoader.getResource("data/files/small_kv.txt")
runCliWithin(2.minute)(
s"ADD FILE $dataFilePath;" -> "",
s"LIST FILE $dataFilePath;" -> "small_kv.txt"
)
}
test("apply hiveconf from cli command") {
runCliWithin(2.minute)(
"SET conf1;" -> "conftest",
"SET conf2;" -> "1",
"SET conf3=${hiveconf:conf1};" -> "conftest",
"SET conf3;" -> "conftest"
)
}
test("Support hive.aux.jars.path") {
val hiveContribJar = HiveTestJars.getHiveContribJar().getCanonicalPath
runCliWithin(
1.minute,
Seq("--conf", s"spark.hadoop.${ConfVars.HIVEAUXJARS}=$hiveContribJar"))(
"CREATE TEMPORARY FUNCTION example_format AS " +
"'org.apache.hadoop.hive.contrib.udf.example.UDFExampleFormat';" -> "",
"SELECT example_format('%o', 93);" -> "135"
)
}
test("SPARK-28840 test --jars command") {
val jarFile = new File("../../sql/hive/src/test/resources/SPARK-21101-1.0.jar").getCanonicalPath
runCliWithin(
1.minute,
Seq("--jars", s"$jarFile"))(
"CREATE TEMPORARY FUNCTION testjar AS" +
" 'org.apache.spark.sql.hive.execution.UDTFStack';" -> "",
"SELECT testjar(1,'TEST-SPARK-TEST-jar', 28840);" -> "TEST-SPARK-TEST-jar\t28840"
)
}
test("SPARK-28840 test --jars and hive.aux.jars.path command") {
val jarFile = new File("../../sql/hive/src/test/resources/SPARK-21101-1.0.jar").getCanonicalPath
val hiveContribJar = HiveTestJars.getHiveContribJar().getCanonicalPath
runCliWithin(
1.minute,
Seq("--jars", s"$jarFile", "--conf",
s"spark.hadoop.${ConfVars.HIVEAUXJARS}=$hiveContribJar"))(
"CREATE TEMPORARY FUNCTION testjar AS" +
" 'org.apache.spark.sql.hive.execution.UDTFStack';" -> "",
"SELECT testjar(1,'TEST-SPARK-TEST-jar', 28840);" -> "TEST-SPARK-TEST-jar\t28840",
"CREATE TEMPORARY FUNCTION example_max AS " +
"'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax';" -> "",
"SELECT concat_ws(',', 'First', example_max(1234321), 'Third');" -> "First,1234321,Third"
)
}
test("SPARK-29022 Commands using SerDe provided in ADD JAR sql") {
val dataFilePath =
Thread.currentThread().getContextClassLoader.getResource("data/files/small_kv.txt")
val hiveContribJar = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
runCliWithin(
3.minute)(
s"ADD JAR ${hiveContribJar};" -> "",
"""CREATE TABLE addJarWithSQL(key string, val string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe';
""".stripMargin
-> "",
"CREATE TABLE sourceTableForWithSQL(key INT, val STRING) USING hive;"
-> "",
s"LOAD DATA LOCAL INPATH '$dataFilePath' OVERWRITE INTO TABLE sourceTableForWithSQL;"
-> "",
"INSERT INTO TABLE addJarWithSQL SELECT key, val FROM sourceTableForWithSQL;"
-> "",
"SELECT collect_list(array(val)) FROM addJarWithSQL;"
-> """[["val_238"],["val_86"],["val_311"],["val_27"],["val_165"]]""",
"DROP TABLE addJarWithSQL;"
-> "",
"DROP TABLE sourceTableForWithSQL;"
-> ""
)
}
test("SPARK-26321 Should not split semicolon within quoted string literals") {
runCliWithin(3.minute)(
"""select 'Test1', "^;^";""" -> "Test1\t^;^",
"""select 'Test2', "\";";""" -> "Test2\t\";",
"""select 'Test3', "\';";""" -> "Test3\t';",
"select concat('Test4', ';');" -> "Test4;"
)
}
test("Pad Decimal numbers with trailing zeros to the scale of the column") {
runCliWithin(1.minute)(
"SELECT CAST(1 AS DECIMAL(38, 18));"
-> "1.000000000000000000"
)
}
test("SPARK-30049 Should not complain for quotes in commented lines") {
runCliWithin(1.minute)(
"""SELECT concat('test', 'comment') -- someone's comment here
|;""".stripMargin -> "testcomment"
)
}
test("SPARK-30049 Should not complain for quotes in commented with multi-lines") {
runCliWithin(1.minute)(
"""SELECT concat('test', 'comment') -- someone's comment here \\
| comment continues here with single ' quote \\
| extra ' \\
|;""".stripMargin -> "testcomment"
)
runCliWithin(1.minute)(
"""SELECT concat('test', 'comment') -- someone's comment here \\
| comment continues here with single ' quote \\
| extra ' \\
| ;""".stripMargin -> "testcomment"
)
}
}
| matthewfranglen/spark | sql/hive-thriftserver/src/test/scala/org/apache/spark/sql/hive/thriftserver/CliSuite.scala | Scala | mit | 17,665 |
package euler
import scala.reflect.internal.MissingRequirementError
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{ universe => ru }
import Utils._
object ProjectEuler extends App {
implicit val m = ru.runtimeMirror(getClass.getClassLoader)
val eulerProblems = Iterator.from(1).map(eulerProblem).takeWhile(_.isDefined).flatten
val (totalElapsed, _) = elapsed {
eulerProblems.zipWithIndex foreach {
case (prob, n) =>
val (elapsedTime, result) = elapsed(prob.result)
println(f"Problem $n%3d completed in $elapsedTime%5d ms. Result: $result")
}
}
println(s"Total elapsed time: $totalElapsed ms")
private def eulerProblem(n: Int)(implicit m: Mirror): Option[EulerProblem] = {
// pkg for 1 to 10 = til10, 11 to 20 = til20 etc
val pkg = 10 + (n - 1) / 10 % 10 * 10
try {
val module = m.staticModule(s"euler.til$pkg.Euler" + n)
val eulerProblem = m.reflectModule(module).instance.asInstanceOf[EulerProblem]
Some(eulerProblem)
} catch {
case e: MissingRequirementError =>
None
}
}
} | TrustNoOne/Euler | scala/src/main/scala/euler/ProjectEuler.scala | Scala | mit | 1,101 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.tf
import com.intel.analytics.bigdl.numeric.NumericFloat
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Random
class ShapeSpec extends FlatSpec with Matchers {
"Shape forward" should "be success" in {
val layer = Shape()
val input = Tensor(T(T(0.1f, 0.2f), T(0.1f, 0.2f), T(0.1f, 0.2f)))
layer.forward(input) should be(Tensor[Int](T(3, 2)))
}
"Shape backward" should "be correct" in {
val layer = Shape()
val input = Tensor(T(T(0.1f, 0.2f), T(0.1f, 0.2f), T(0.1f, 0.2f)))
val gradOutput = Tensor[Int](T(3, 2))
layer.forward(input) should be(Tensor[Int](T(3, 2)))
layer.backward(input, gradOutput) should be(Tensor(T(
T(0.0f, 0.0f),
T(0.0f, 0.0f),
T(0.0f, 0.0f)
)))
}
}
class ShapeSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val shape = Shape[Float]().setName("shape")
val input = Tensor[Float](3).apply1(_ => Random.nextFloat())
runSerializationTest(shape, input)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/tf/ShapeSpec.scala | Scala | apache-2.0 | 1,797 |
package breeze.linalg.operators
import breeze.generic.{ MappingUFunc, UFunc }
import breeze.generic.UFunc.UImpl
import breeze.math.{ Field, Ring, Semiring }
/*
Copyright 2012 Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* Marker sealed trait for some operation, be it UnaryOp, BinaryOp, or
* BinaryUpdateOp.
*
* @author dramage
*/
sealed trait OpType
/**
* Type marker for BinaryOp A :+ B and BinaryUpdateOp A :+= B.
*
* @author dramage
*/
sealed trait OpAdd extends OpType
object OpAdd extends OpAdd with UFunc {
implicit def opAddFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Semiring[S]].+(v, v2)
}
}
/**
* Type marker for BinaryOp A :- B and BinaryUpdateOp A :-= B.
*
* @author dramage
*/
sealed trait OpSub extends OpType
object OpSub extends OpSub with UFunc {
implicit def opSubFromRing[S:Ring]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Ring[S]].-(v, v2)
}
}
/**
* Type marker for BinaryOp A :* B and BinaryUpdateOp A :*= B.
*
* @author dramage
*/
sealed trait OpMulScalar extends OpType
object OpMulScalar extends OpMulScalar with UFunc {
implicit def opMulScalarFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Semiring[S]].*(v, v2)
}
}
/**
* Type marker for BinaryOp A :/ B and BinaryUpdateOp A:/= B.
*
* @author dramage
*/
sealed trait OpDiv extends OpType
object OpDiv extends OpDiv with UFunc {
implicit def opDivFromField[S:Field]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Field[S]]./(v, v2)
}
}
/**
* Type marker for BinaryOp A :% B and BinaryUpdateOp A:%= B.
*
* @author dramage
*/
sealed trait OpMod extends OpType
object OpMod extends OpMod with UFunc
/**
* Type marker for BinaryOp A :^ B and BinaryUpdateOp A:^= B.
*
* @author dramage
*/
sealed trait OpPow extends OpType
object OpPow extends OpPow with UFunc
/**
* Type marker for BinaryOp A :< B.
*
* @author dramage
*/
sealed trait OpLT extends OpType
object OpLT extends OpLT with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = ord.lt(v, v2)
}
}
}
/**
* Type marker for BinaryOp A :<= B.
*
* @author dramage
*/
sealed trait OpLTE extends OpType
object OpLTE extends OpLTE with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = ord.lteq(v, v2)
}
}
}
/**
* Type marker for BinaryOp A :> B.
*
* @author dramage
*/
sealed trait OpGT extends OpType
object OpGT extends OpGT with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = ord.gt(v, v2)
}
}
}
/**
* Type marker for BinaryOp A :>= B.
*
* @author dramage
*/
sealed trait OpGTE extends OpType
object OpGTE extends OpGTE with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = ord.gteq(v, v2)
}
}
}
/**
* Type marker for BinaryOp A :== B.
*
* @author dramage
*/
sealed trait OpEq extends OpType
object OpEq extends OpEq with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = ord.equiv(v, v2)
}
}
}
/**
* Type marker for BinaryOp A :!= B.
*
* @author dramage
*/
sealed trait OpNe extends OpType
object OpNe extends OpNe with UFunc {
implicit def impl2FromOrdering[T:Ordering]:Impl2[T, T, Boolean] = {
val ord = implicitly[Ordering[T]]
new Impl2[T, T, Boolean] {
def apply(v: T, v2: T): Boolean = !ord.equiv(v, v2)
}
}
}
/**
* Type marker for BinaryUpdateOp A := B.
*
* @author dramage
*/
sealed trait OpSet extends OpType
object OpSet extends OpSet with UFunc
/**
* Type marker for BinaryOp A :& B
*
* @author dramage
*/
sealed trait OpAnd extends OpType
object OpAnd extends OpAnd with UFunc {
implicit object opAndBoolean extends Impl2[Boolean, Boolean, Boolean] {
override def apply(v: Boolean, v2: Boolean): Boolean = v && v2
}
}
/**
* Type marker for BinaryOp A :| B
*
* @author dramage
*/
sealed trait OpOr extends OpType
object OpOr extends OpOr with UFunc {
implicit object opOrBoolean extends Impl2[Boolean, Boolean, Boolean] {
override def apply(v: Boolean, v2: Boolean): Boolean = v || v2
}
}
/**
* Type marker for BinaryOp A :^^ B
*
* @author dramage
*/
sealed trait OpXor extends OpType
object OpXor extends OpXor with UFunc {
implicit object opXorBoolean extends Impl2[Boolean, Boolean, Boolean] {
override def apply(v: Boolean, v2: Boolean): Boolean = v ^ v2
}
}
/**
* Type marker for UnaryOp -A.
*
* @author dramage
*/
sealed trait OpNeg extends OpType
object OpNeg extends OpNeg with UFunc {
implicit def ringNegation[S:Ring]: UImpl[OpNeg.this.type, S, S] = new Impl[S, S] {
def apply(v: S): S = implicitly[Ring[S]].negate(v)
}
}
/**
* Type marker for UnaryOp !A.
*
* @author dramage
*/
sealed trait OpNot extends OpType
object OpNot extends OpNot with UFunc with MappingUFunc {
implicit object opNotBoolean extends Impl[Boolean, Boolean] {
override def apply(v: Boolean): Boolean = !v
}
}
/**
* Type marker for inner (dot) product of A and B.
*
* @author dramage
*/
sealed trait OpMulInner extends OpType
object OpMulInner extends OpMulInner with UFunc {
def opMulInnerFromSemiring[S:Semiring]: OpMulInner.Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Semiring[S]].*(v, v2)
}
}
/**
* Type marker for BinaryOp A \ B when A is a matrix.
*
* @author dramage
*/
sealed trait OpSolveMatrixBy extends OpType
object OpSolveMatrixBy extends OpSolveMatrixBy with UFunc
/**
* Type marker for inner (dot) product of A and B.
*
* @author dramage
*/
sealed trait OpMulMatrix extends OpType
object OpMulMatrix extends OpMulMatrix with UFunc {
implicit def opMulMatrixFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Semiring[S]].*(v, v2)
}
}
/**
* Type marker for inner power of A and p.
*
* @author claydonkey
*/
sealed trait OpPowerMatrix extends OpType
object OpPowerMatrix extends OpPowerMatrix with UFunc {
implicit def opPowMatrixFromSemiring[S:Semiring]: Impl2[S, S, S] = new Impl2[S, S, S] {
def apply(v: S, v2: S): S = implicitly[Semiring[S]].*(v, v2)
}
}
| claydonkey/breeze | math/src/main/scala/breeze/linalg/operators/OpType.scala | Scala | apache-2.0 | 7,346 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird
import org.scalacheck.Arbitrary
import org.scalatest.{ PropSpec, Matchers }
import org.scalatest.prop.PropertyChecks
class AdjoinedRingSpecification extends PropSpec with PropertyChecks with Matchers {
import BaseProperties._
implicit def adjoined[T: Arbitrary]: Arbitrary[AdjoinedUnit[T]] = Arbitrary {
implicitly[Arbitrary[T]].arbitrary.map { t => AdjoinedUnit(t) }
}
// AdjoinedUnit requires this method to be correct, so it is tested here:
property("intTimes works correctly") {
forAll { (bi0: BigInt, bi1: BigInt) =>
assert(Group.intTimes(bi0, bi1) == (bi0 * bi1))
}
}
property("AdjoinedUnit[Int] is a Ring") {
ringLaws[AdjoinedUnit[Int]]
}
property("AdjoinedUnit[Long] is a Ring") {
ringLaws[AdjoinedUnit[Long]]
}
}
| avibryant/algebird | algebird-test/src/test/scala/com/twitter/algebird/AdJoinedUnitRing.scala | Scala | apache-2.0 | 1,360 |
package slamdata.engine.physical.mongodb
import slamdata.Predef._
import org.scalacheck._
import org.specs2.mutable._
import org.specs2.ScalaCheck
import scalaz._
import slamdata.engine._
import slamdata.specs2._
class PipelineSpec extends Specification with ScalaCheck with ArbBsonField with PendingWithAccurateCoverage {
import slamdata.engine.physical.mongodb.accumulator._
import slamdata.engine.physical.mongodb.expression._
import Workflow._
import ArbitraryExprOp._
implicit def arbitraryOp: Arbitrary[PipelineOp] = Arbitrary { Gen.resize(5, Gen.sized { size =>
// Note: Gen.oneOf is overridden and this variant requires two explicit args
val ops = pipelineOpGens(size - 1)
Gen.oneOf(ops(0), ops(1), ops.drop(2): _*)
}) }
def genProject(size: Int): Gen[$Project[Unit]] = for {
fields <- Gen.nonEmptyListOf(for {
c <- Gen.alphaChar
cs <- Gen.alphaStr
field = c.toString + cs
value <- if (size <= 0) genExpr.map(-\\/(_))
else Gen.oneOf(genExpr.map(-\\/(_)), genProject(size - 1).map(p => \\/-(p.shape)))
} yield BsonField.Name(field) -> value)
id <- Gen.oneOf(IdHandling.ExcludeId, IdHandling.IncludeId)
} yield $Project((), Reshape(ListMap(fields: _*)), id)
implicit def arbProject = Arbitrary[$Project[Unit]](Gen.resize(5, Gen.sized(genProject)))
def genRedact = for {
value <- Gen.oneOf($Redact.DESCEND, $Redact.KEEP, $Redact.PRUNE)
} yield $Redact((), $var(value))
def unwindGen = for {
c <- Gen.alphaChar
} yield $Unwind((), DocField(BsonField.Name(c.toString)))
def genGroup = for {
i <- Gen.chooseNum(1, 10)
} yield $Group((),
Grouped(ListMap(BsonField.Name("docsByAuthor" + i.toString) -> $sum($literal(Bson.Int32(1))))),
-\\/($var(DocField(BsonField.Name("author" + i)))))
def genGeoNear = for {
i <- Gen.chooseNum(1, 10)
} yield $GeoNear((), (40.0, -105.0), BsonField.Name("distance" + i), None, None, None, None, None, None, None)
def genOut = for {
i <- Gen.chooseNum(1, 10)
} yield $Out((), Collection("db", "result" + i))
def pipelineOpGens(size: Int): List[Gen[PipelineOp]] = {
genProject(size) ::
genRedact ::
unwindGen ::
genGroup ::
genGeoNear ::
genOut ::
arbitraryShapePreservingOpGens.map(g => for { sp <- g } yield sp.op)
}
case class ShapePreservingPipelineOp(op: PipelineOp)
//implicit def arbitraryProject: Arbitrary[Project] = Arbitrary(genProject)
implicit def arbitraryShapePreservingOp: Arbitrary[ShapePreservingPipelineOp] = Arbitrary {
// Note: Gen.oneOf is overridden and this variant requires two explicit args
val gens = arbitraryShapePreservingOpGens
Gen.oneOf(gens(0), gens(1), gens.drop(2): _*)
}
def arbitraryShapePreservingOpGens = {
def matchGen = for {
c <- Gen.alphaChar
} yield ShapePreservingPipelineOp($Match((), Selector.Doc(BsonField.Name(c.toString) -> Selector.Eq(Bson.Int32(-1)))))
def skipGen = for {
i <- Gen.chooseNum(1, 10)
} yield ShapePreservingPipelineOp($Skip((), i))
def limitGen = for {
i <- Gen.chooseNum(1, 10)
} yield ShapePreservingPipelineOp($Limit((), i))
def sortGen = for {
c <- Gen.alphaChar
} yield ShapePreservingPipelineOp($Sort((), NonEmptyList(BsonField.Name("name1") -> Ascending)))
List(matchGen, limitGen, skipGen, sortGen)
}
case class PairOfOpsWithSameType(op1: PipelineOp, op2: PipelineOp)
implicit def arbitraryPair: Arbitrary[PairOfOpsWithSameType] = Arbitrary { Gen.resize(5, Gen.sized { size =>
for {
gen <- Gen.oneOf(pipelineOpGens(size))
op1 <- gen
op2 <- gen
} yield PairOfOpsWithSameType(op1, op2)
}) }
"Project.id" should {
"be idempotent" ! prop { (p: $Project[Unit]) =>
p.id must_== p.id.id
}
}
"Project.get" should {
"retrieve whatever value it was set to" ! prop { (p: $Project[Unit], f: BsonField) =>
val One = $literal(Bson.Int32(1))
p.set(f, -\\/(One)).get(DocVar.ROOT(f)) must (beSome(-\\/ (One)))
}
}
"Project.setAll" should {
"actually set all" ! prop { (p: $Project[Unit]) =>
p.setAll(p.getAll.map(t => t._1 -> -\\/(t._2))) must_== p
}.pendingUntilFixed("result could have `_id -> _id` inserted without changing semantics")
}
"Project.deleteAll" should {
"return empty when everything is deleted" ! prop { (p: $Project[Unit]) =>
p.deleteAll(p.getAll.map(_._1)) must_== p.empty
}
}
"SimpleMap.deleteAll" should {
import javascript._
import JsCore._
"remove one un-nested field" in {
val op = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix,
"b" -> Select(Ident("x").fix, "y").fix)).fix))),
ListMap())
val exp = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix)).fix))),
ListMap())
op.deleteAll(List(BsonField.Name("b"))) must_== exp
}
"remove one nested field" in {
val op = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix,
"b" -> Obj(ListMap(
"c" -> Select(Ident("x").fix, "y").fix,
"d" -> Select(Ident("x").fix, "z").fix)).fix)).fix))),
ListMap())
val exp = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix,
"b" -> Obj(ListMap(
"d" -> Select(Ident("x").fix, "z").fix)).fix)).fix))),
ListMap())
op.deleteAll(List(BsonField.Name("b") \\ BsonField.Name("c"))) must_== exp
}
"remove whole nested object" in {
val op = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix,
"b" -> Obj(ListMap(
"c" -> Select(Ident("x").fix, "y").fix)).fix)).fix))),
ListMap())
val exp = $SimpleMap(
$read(Collection("db", "foo")),
NonEmptyList(MapExpr(JsFn(Ident("x"),
Obj(ListMap(
"a" -> Select(Ident("x").fix, "x").fix)).fix))),
ListMap())
op.deleteAll(List(BsonField.Name("b") \\ BsonField.Name("c"))) must_== exp
}
}
}
| wemrysi/quasar | core/src/test/scala/slamdata/engine/physical/mongodb/pipeline.scala | Scala | apache-2.0 | 6,572 |
/**
* Created on February 17, 2011
* Copyright (c) 2011, Wei-ju Wu
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Wei-ju Wu nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY WEI-JU WU ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL WEI-JU WU BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.dmpp.adf.logical
/**
* Trait to decorate blocks that are reading a BCPL string field.
*/
trait ReadsBcplStrings { self : HeaderBlock =>
/**
* Read the BCPL string at the specified position.
* @param offset the data offset within the data
* @param maxChars the maximum number of characters
*/
def bcplStringAt(offset: Int, maxChars: Int) = {
val nameLength = scala.math.min(sector(offset),
maxChars)
val builder = new StringBuilder
for (i <- 0 until nameLength) {
builder.append(sector(offset + 1 + i).asInstanceOf[Char])
}
builder.toString
}
def setBcplStringAt(offset: Int, maxChars: Int, str: String) = {
val nameLength = scala.math.min(str.length, maxChars)
sector(offset) = nameLength
for (i <- 0 until nameLength) {
sector(offset + 1 + i) = str.charAt(i).asInstanceOf[Byte]
}
}
}
| weiju/adf-tools | adf-core/src/main/scala/org/dmpp/adf/logical/ReadsBcplStrings.scala | Scala | bsd-3-clause | 2,452 |
package org.template.recommendation
import org.apache.predictionio.controller.PDataSource
import org.apache.predictionio.controller.EmptyEvaluationInfo
import org.apache.predictionio.controller.EmptyActualResult
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.{DataMap, Event, Storage}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import grizzled.slf4j.Logger
case class DataSourceParams(appId: Int) extends Params
case class Item(creationYear: Option[Int])
object Item {
object Fields {
val CreationYear = "creationYear"
}
}
class DataSource(val dsp: DataSourceParams)
extends PDataSource[TrainingData,
EmptyEvaluationInfo, Query, EmptyActualResult] {
@transient lazy val logger = Logger[this.type]
private lazy val EntityType = "movie"
override
def readTraining(sc: SparkContext): TrainingData = {
val eventsDb = Storage.getPEvents()
// create a RDD of (entityID, Item)
// HOWTO: collecting items(movies)
val itemsRDD = eventsDb.aggregateProperties(
appId = dsp.appId,
entityType = "item"
)(sc).flatMap { case (entityId, properties) ⇒
ItemMarshaller.unmarshall(properties).map(entityId → _)
}
// get all user rate events
val rateEventsRDD: RDD[Event] = eventsDb.find(
appId = dsp.appId,
entityType = Some("user"),
eventNames = Some(List("rate")), // read "rate"
// targetEntityType is optional field of an event.
targetEntityType = Some(Some(EntityType)))(sc)
// collect ratings
val ratingsRDD = rateEventsRDD.flatMap { event ⇒
try {
(event.event match {
case "rate" => event.properties.getOpt[Double]("rating")
case _ ⇒ None
}).map(Rating(event.entityId, event.targetEntityId.get, _))
} catch { case e: Exception ⇒
logger.error(s"Cannot convert ${event} to Rating. Exception: ${e}.")
throw e
}
}.cache()
new TrainingData(ratingsRDD, itemsRDD)
}
}
object ItemMarshaller {
// HOWTO: implemented unmarshaller to collect properties for filtering.
def unmarshall(properties: DataMap): Option[Item] =
Some(Item(properties.getOpt[Int](Item.Fields.CreationYear)))
}
case class Rating(user: String, item: String, rating: Double)
class TrainingData(val ratings: RDD[Rating], val items: RDD[(String, Item)])
extends Serializable {
override def toString =
s"ratings: [${ratings.count()}] (${ratings.take(2).toList}...)" +
s"items: [${items.count()} (${items.take(2).toList}...)]"
}
| alex9311/PredictionIO | examples/scala-parallel-recommendation/custom-query/src/main/scala/DataSource.scala | Scala | apache-2.0 | 2,580 |
package com.malaska.spark.training.windowing.big
import org.apache.log4j.{Level, Logger}
import org.apache.spark.Partitioner
import org.apache.spark.sql.SparkSession
/**
* Big windowing. This is when you have over 50k records per key or the
* records can be on bound, but you still have many keys.
*
* If you only have one key or very few keys then you will want the
* super big windowing implementations
*/
object BigWindowing {
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
def main(args: Array[String]): Unit = {
val jsonPath = args(0)
val sparkSession = SparkSession.builder
.master("local")
.appName("my-spark-app")
.config("spark.some.config.option", "config-value")
.config("spark.driver.host","127.0.0.1")
.getOrCreate()
val jsonDf = sparkSession.read.json(jsonPath)
val timeDifRdd = jsonDf.rdd.map(row => {
val group = row.getAs[String]("group")
val time = row.getAs[Long]("time")
val value = row.getAs[Long]("value")
((group, time), value)
}).repartitionAndSortWithinPartitions(new GroupPartitioner(2)).
mapPartitions(it => {
var lastValue = 0l
var currentGroup = "n/a"
it.map{ case((group, time), value) =>
if (!group.equals(currentGroup)) {
lastValue = 0l
currentGroup = group
}
val dif = value - lastValue
lastValue = value
(group, time, value, dif)
}
})
timeDifRdd.collect().foreach(r => {
println(r)
})
sparkSession.stop()
}
}
/**
* Our customer partitioner will only partition on the first tuple
* then it will allow for the sorting on the whole key
* @param numParts
*/
class GroupPartitioner(val numParts:Int) extends Partitioner {
override def numPartitions: Int = numParts
override def getPartition(key: Any): Int = {
key.asInstanceOf[(String, Long)]._1.hashCode % numPartitions
}
}
| TedBear42/spark_training | src/main/scala/com/malaska/spark/training/windowing/big/BigWindowing.scala | Scala | apache-2.0 | 2,007 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
package internal
package wrapfs
import java.io
import io.IOException
import java.util.Calendar
/** A wrapper for basic file system entry properties.
*
* @see [[com.pavelfatin.fs.FileSystemEntry]]
*/
private trait EntryWrapper extends FileSystemEntry {
protected def root: io.File
protected def entry: io.File
def parent = if (entry == root) None else
Option(entry.getParentFile).map(new DirectoryWrapper(root, _))
def name = if (entry == root) "" else entry.getName
def name_=(it: String) {
if (entry == root) throw new UnsupportedOperationException("Root directory name can't be modified")
val renamed = entry.renameTo(new io.File(entry.getParent, it))
if (!renamed) throw new IOException(s"Unable to rename '${entry.getPath}' to '$it'")
}
def date = {
val it = Calendar.getInstance
it.setTimeInMillis(entry.lastModified)
it
}
def date_=(it: Calendar) {
if (entry == root) throw new UnsupportedOperationException("Root directory date can't be modified")
val changed = entry.setLastModified(it.getTimeInMillis)
if (!changed) throw new IOException(s"Unable to change modification time of '${entry.getPath}'")
}
def hidden = entry.isHidden
// Can be implemented only in Java 7+
def hidden_=(it: Boolean) {
if (entry == root) throw new UnsupportedOperationException("Root directory visibility can't be modified")
// Files.setAttribute(entry, "dos:hidden", it)
}
def delete() {
if (entry == root) throw new UnsupportedOperationException("Root directory can't be deleted")
val deleted = entry.delete()
if (!deleted) throw new IOException(s"Unable to delete '${entry.getPath}'")
}
}
| pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/internal/wrapfs/EntryWrapper.scala | Scala | gpl-3.0 | 2,431 |
package ai.agnos.sparql.util
import java.net.ServerSocket
/**
* HttpEndpoint construction factory
*/
object HttpEndpoint {
/**
* Get an HTTP endpoint bound to 'localhost' where a free port is automatically assigned
* and can be used to be bound to.
*
* The generated endpoint will be '''http://localhost:{assigned-port}{path}'''
*
* @param path the desired path part for the endpoint
* @return
*/
def localhostWithAutomaticPort(path: String): HttpEndpoint =
new HttpEndpointBuilder(assignPortAutomatically = true, useResourcePath = path, authentication = None).endpoint
/**
* Construct an endpoint from a string URL representation,
* which corresponds to '''[{protocol}://]{host}[:{port}][{path}]'''
* Protocol, port and path are optional. Default protocol is http://,
* default port is 80, the default path is empty.
*
* @param endpointString
* @return
*/
def apply(endpointString: String): HttpEndpoint =
new HttpEndpointBuilder(Some(endpointString), false, "", None).endpoint
/**
* Construct an endpoint from a string URL representation,
* which corresponds to '''[{protocol}://]{host}[:{port}][{path}]'''
* Protocol, port and path are optional. Default protocol is http://,
* default port is 80, the default path is empty.
*
* @param endpointString
* @return
*/
def apply(endpointString: String, authentication: Option[Authentication]): HttpEndpoint =
new HttpEndpointBuilder(Some(endpointString), false, "", authentication).endpoint
/**
* Construct an endpoint from individual parts.
* @param protocol
* @param host
* @param port
* @param path
* @return
*/
def apply(protocol: String, host: String, port: Integer, path: String, authentication: Option[Authentication]): HttpEndpoint =
new HttpEndpoint(protocol, host, port, path, authentication)
/**
* Construct an HTTP endpoint from individual parts.
*
* @param host
* @param port
* @return
*/
def apply(host: String, port: Integer, authentication: Option[Authentication]): HttpEndpoint =
new HttpEndpoint(defaultProtocol, host, port, "", authentication)
/**
* Construct an endpoint from individual parts with the desired path.
*
* @param host
* @param port
* @param path
* @return
*/
// JC: really don't like the logic of this method. environmentEndpoint is matched with the EndpointRegex multiple times
// to extract different parts of the URL. should be done only once.
// Andy why not use URL class in Java? https://docs.oracle.com/javase/7/docs/api/java/net/URL.html
//SSZ: yes, you are right, not optimal. Will make an effort to refactor this.
def apply(host: String, port: Integer, path: String, authentication: Option[Authentication] = None): HttpEndpoint =
new HttpEndpoint(defaultProtocol, host, port, path, authentication)
private val defaultProtocol: String = "http"
private val defaultHost: String = "localhost"
private sealed class HttpEndpointBuilder(
environmentEndpoint: Option[String] = None,
assignPortAutomatically: Boolean,
useResourcePath: String = "",
authentication: Option[Authentication] = None) {
private val EndpointRegex = "(http[s]{0,1}://)?([a-zA-Z\\\\-\\\\.0-9]+)(:\\\\d{1,6})?(/.+)?".r
val protocol = environmentEndpoint match {
case Some(EndpointRegex(protocol, _, _, _)) if protocol != null => protocol.replace("://", "")
case _ => defaultProtocol
}
private val defaultPort: Int = protocol match {
case "http" => 80
case "https" => 443
}
val host = environmentEndpoint match {
case Some(EndpointRegex(_, host, _, _)) if host != null => host
case _ => defaultHost
}
val port: Int = environmentEndpoint match {
case Some(EndpointRegex(_, _, port, _)) if port != null => port.replace(":","").toInt
case Some(EndpointRegex(_, _, null, _)) => defaultPort
case _ if assignPortAutomatically =>
val socket = new ServerSocket(0)
val p = socket.getLocalPort
socket.close()
p
case _ => defaultPort
}
val path: String = environmentEndpoint match {
case Some(EndpointRegex(_, _, _, path)) if path != null => path
case _ => useResourcePath
}
val endpoint = HttpEndpoint(protocol, host, port, path, authentication)
}
}
/**
* Internal representation of an HttpEndpoint.
*
* @param protocol
* @param host
* @param port
* @param path
*/
case class HttpEndpoint(protocol: String, host: String, port: Int, path: String, authentication: Option[Authentication]) {
/**
* Shows the desired fully qualified URL of the endpoint built from the individual components
*/
val url: String = s"$protocol://$host${
(protocol, port) match {
case ("http", 80) => ""
case ("https", 443) => ""
case (p, x) => s":$x"
}
}$path"
}
/**
* Internal representation of the HTTP endpoint's user authentication.
*/
sealed trait Authentication
/**
* Basic HTTP authentication credential (username & "cleartext" password) .
*
* @param username
* @param password
*/
case class BasicAuthentication(username: String, password: String) extends Authentication
| modelfabric/reactive-sparql | src/main/scala/ai/agnos/sparql/util/HttpEndpoint.scala | Scala | mit | 5,291 |
package geek.lawsof.physics.lib.machine.recipes
import net.minecraft.inventory.InventoryCrafting
import net.minecraft.item.ItemStack
import net.minecraft.world.World
/**
* Created by anshuman on 22-07-2014.
*/
trait MachineRecipe[I <: MachineRecipeInput, O <: MachineRecipeOutput] {
def matches(input: I): Boolean
def time(input: I): Int
def result(input: I): O
}
| GeckoTheGeek42/TheLawsOfPhysics | src/main/scala/geek/lawsof/physics/lib/machine/recipes/MachineRecipe.scala | Scala | mit | 376 |
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.sandius.rembulan.test
import net.sandius.rembulan.compiler.CompilerSettings.CPUAccountingMode
import net.sandius.rembulan.compiler.{CompilerChunkLoader, CompilerSettings}
import net.sandius.rembulan.env.RuntimeEnvironments
import net.sandius.rembulan.exec._
import net.sandius.rembulan.impl.StateContexts
import net.sandius.rembulan.lib._
import net.sandius.rembulan.load.{ChunkClassLoader, ChunkLoader}
import net.sandius.rembulan.runtime.RuntimeCallInitialiser
import net.sandius.rembulan.test.FragmentExpectations.Env
import net.sandius.rembulan.test.Util.{BufferPrinter, Printer}
import net.sandius.rembulan.{Conversions, StateContext, Table, Variable}
import org.scalatest.{FunSpec, MustMatchers}
import scala.util.{Failure, Success}
trait FragmentExecTestSuite extends FunSpec with MustMatchers {
def bundles: Seq[FragmentBundle]
def expectations: Seq[FragmentExpectations]
def contexts: Seq[FragmentExpectations.Env]
def steps: Seq[Int]
def compilerConfigs: CompilerConfigs = CompilerConfigs.DefaultOnly
protected val Empty = FragmentExpectations.Env.Empty
protected val Basic = FragmentExpectations.Env.Basic
protected val Mod = FragmentExpectations.Env.Module
protected val Coro = FragmentExpectations.Env.Coro
protected val Math = FragmentExpectations.Env.Math
protected val Str = FragmentExpectations.Env.Str
protected val IO = FragmentExpectations.Env.IO
protected val Tab = FragmentExpectations.Env.Tab
protected val Debug = FragmentExpectations.Env.Debug
protected val Full = FragmentExpectations.Env.Full
protected def envForContext(state: StateContext, ctx: Env, ldr: ChunkLoader, printer: Printer): Table = {
val env = state.newTable()
val runtimeEnv = RuntimeEnvironments.system() // FIXME
val moduleClassLoader = this.getClass().getClassLoader
ctx match {
case Empty =>
// no-op
case Basic =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
case Mod =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
ModuleLib.installInto(state, env, runtimeEnv, ldr, moduleClassLoader)
case Coro =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
CoroutineLib.installInto(state, env)
case Math =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
MathLib.installInto(state, env)
case Str =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
StringLib.installInto(state, env)
case IO =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
IoLib.installInto(state, env, runtimeEnv)
case Tab =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
TableLib.installInto(state, env)
case Debug =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
DebugLib.installInto(state, env)
case Full =>
BasicLib.installInto(state, env, runtimeEnv, ldr)
ModuleLib.installInto(state, env, runtimeEnv, ldr, moduleClassLoader)
CoroutineLib.installInto(state, env)
MathLib.installInto(state, env)
StringLib.installInto(state, env)
IoLib.installInto(state, env, runtimeEnv)
OsLib.installInto(state, env, runtimeEnv)
TableLib.installInto(state, env)
Utf8Lib.installInto(state, env)
DebugLib.installInto(state, env)
}
env
}
sealed trait ChkLoader {
def name: String
def loader(): ChunkLoader
}
def compilerSettingsToString(settings: CompilerSettings): String = {
val cpu = settings.cpuAccountingMode() match {
case CPUAccountingMode.NO_CPU_ACCOUNTING => "n"
case CPUAccountingMode.IN_EVERY_BASIC_BLOCK => "a"
}
val cfold = settings.constFolding() match {
case true => "t"
case false => "f"
}
val ccache = settings.constCaching() match {
case true => "t"
case false => "f"
}
val nlimit = settings.nodeSizeLimit() match {
case 0 => "0"
case n => n.toString
}
cpu + cfold + ccache + "_" + nlimit
}
case class RembulanChkLoader(settings: CompilerSettings) extends ChkLoader {
def name = "RemC" + "_" + compilerSettingsToString(settings)
def loader() = CompilerChunkLoader.of(new ChunkClassLoader(), settings, "fragment_test_")
}
class CompilerConfigs private (configs: Seq[CompilerSettings]) {
def loaders: Seq[RembulanChkLoader] = configs.distinct map RembulanChkLoader
}
object CompilerConfigs {
val bools = Seq(true, false)
val limits = Seq(0, 10)
// val limits = Seq(0)
val allConfigs = for (
cpu <- CPUAccountingMode.values();
cfold <- bools;
ccache <- bools;
nlimit <- limits
) yield CompilerSettings.defaultSettings()
.withCPUAccountingMode(cpu)
.withConstFolding(cfold)
.withConstCaching(ccache)
.withNodeSizeLimit(nlimit)
case object DefaultOnly extends CompilerConfigs(Seq(CompilerSettings.defaultSettings()))
case object All extends CompilerConfigs(allConfigs)
}
val ldrs = compilerConfigs.loaders
for (bundle <- bundles;
fragment <- bundle.all;
ctx <- contexts) {
val prefix = ""
describe (prefix + fragment.description + " in " + ctx + ":") {
for (s <- steps; l <- ldrs) {
val stepDesc = s match {
case Int.MaxValue => "max"
case i => i.toString
}
it (l.name + " / " + stepDesc) {
val printer = new BufferPrinter()
val (state, func) = Util.timed(printer, "Compilation and setup") {
val ldr = l.loader()
val state = StateContexts.newDefaultInstance()
val env = envForContext(state, ctx, ldr, printer)
val func = ldr.loadTextChunk(new Variable(env), "test", fragment.code)
(state, func)
}
var steps = 0
val before = System.nanoTime()
val callExecutor = DirectCallExecutor.newExecutorWithTickLimit(s)
var resultValues: Array[AnyRef] = null
var continuation: Continuation = RuntimeCallInitialiser.forState(state).newCall(func)
var error: CallException = null
do {
try {
steps += 1
resultValues = callExecutor.resume(continuation)
}
catch {
case ex: CallPausedException => continuation = ex.getContinuation
case ex: CallException => error = ex
}
} while (error == null && resultValues == null)
val res = if (error != null) {
Failure(error.getCause)
}
else {
require (resultValues != null, "result must not be null")
Success(resultValues.toSeq)
}
val after = System.nanoTime()
val totalTimeMillis = (after - before) / 1000000.0
// val totalCPUUnitsSpent = preemptionContext.totalCost
// val avgTimePerCPUUnitNanos = (after - before).toDouble / totalCPUUnitsSpent.toDouble
// val avgCPUUnitsPerSecond = (1000000000.0 * totalCPUUnitsSpent) / (after - before)
printer.println("Execution took %.1f ms".format(totalTimeMillis))
// println("Total CPU cost: " + preemptionContext.totalCost + " LI")
printer.println("Computation steps: " + steps)
// println()
// println("Avg time per unit: %.2f ns".format(avgTimePerCPUUnitNanos))
// println("Avg units per second: %.1f LI/s".format(avgCPUUnitsPerSecond))
printer.println()
res match {
case Success(result) =>
printer.println("Result: success (" + result.size + " values):")
for ((v, i) <- result.zipWithIndex) {
printer.println(i + ":" + "\t" + Conversions.toHumanReadableString(v) + " (" + (if (v != null) v.getClass.getName else "null") + ")")
}
case Failure(ex) =>
printer.println("Result: error: " + ex.getMessage)
}
for (expects <- expectations;
ctxExp <- expects.expectationFor(fragment);
exp <- ctxExp.get(ctx)) {
exp.tryMatch(res, { () => scala.Predef.print(printer.get) })(this)
}
}
}
}
}
}
| mjanicek/rembulan | rembulan-tests/src/test/scala/net/sandius/rembulan/test/FragmentExecTestSuite.scala | Scala | apache-2.0 | 8,867 |
/*
* _ _
* _ __ ___ | |__ | | ___
* | '_ \\ / _ \\| '_ \\| |/ _ \\ noble :: norcane blog engine
* | | | | (_) | |_) | | __/ Copyright (c) 2016-2018 norcane
* |_| |_|\\___/|_.__/|_|\\___|
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.norcane.noble.api.models
/**
* Model class representing the configuration of one blog.
*
* @param name internal name of the blog
* @param path the path of the blog
* @param reloadToken token used to authenticate blog reload requests
* @param storageConfig configuration of blog storage
* @author Vaclav Svejcar ([email protected])
*/
case class BlogConfig(name: String, path: String, reloadToken: Option[String],
storageConfig: StorageConfig)
| norcane/noble | sdk/noble-api/src/main/scala/com/norcane/noble/api/models/BlogConfig.scala | Scala | apache-2.0 | 1,292 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.net.URLEncoder
import java.util.Date
import javax.servlet.http.HttpServletRequest
import scala.collection.JavaConverters._
import scala.collection.mutable.{HashMap, ListBuffer}
import scala.xml._
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.JobExecutionStatus
import org.apache.spark.scheduler._
import org.apache.spark.ui._
import org.apache.spark.ui.jobs.UIData.{JobUIData, StageUIData}
import org.apache.spark.util.Utils
/** Page showing list of all ongoing and recently finished jobs */
private[ui] class AllJobsPage(parent: JobsTab) extends WebUIPage("") {
private val JOBS_LEGEND =
<div class="legend-area"><svg width="150px" height="85px">
<rect class="succeeded-job-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Succeeded</text>
<rect class="failed-job-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Failed</text>
<rect class="running-job-legend"
x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="67px">Running</text>
</svg></div>.toString.filter(_ != '\n')
private val EXECUTORS_LEGEND =
<div class="legend-area"><svg width="150px" height="55px">
<rect class="executor-added-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Added</text>
<rect class="executor-removed-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Removed</text>
</svg></div>.toString.filter(_ != '\n')
private def getLastStageNameAndDescription(job: JobUIData): (String, String) = {
val lastStageInfo = Option(job.stageIds)
.filter(_.nonEmpty)
.flatMap { ids => parent.jobProgresslistener.stageIdToInfo.get(ids.max)}
val lastStageData = lastStageInfo.flatMap { s =>
parent.jobProgresslistener.stageIdToData.get((s.stageId, s.attemptId))
}
val name = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)")
val description = lastStageData.flatMap(_.description).getOrElse("")
(name, description)
}
private def makeJobEvent(jobUIDatas: Seq[JobUIData]): Seq[String] = {
jobUIDatas.filter { jobUIData =>
jobUIData.status != JobExecutionStatus.UNKNOWN && jobUIData.submissionTime.isDefined
}.map { jobUIData =>
val jobId = jobUIData.jobId
val status = jobUIData.status
val (jobName, jobDescription) = getLastStageNameAndDescription(jobUIData)
val displayJobDescription =
if (jobDescription.isEmpty) {
jobName
} else {
UIUtils.makeDescription(jobDescription, "", plainText = true).text
}
val submissionTime = jobUIData.submissionTime.get
val completionTimeOpt = jobUIData.completionTime
val completionTime = completionTimeOpt.getOrElse(System.currentTimeMillis())
val classNameByStatus = status match {
case JobExecutionStatus.SUCCEEDED => "succeeded"
case JobExecutionStatus.FAILED => "failed"
case JobExecutionStatus.RUNNING => "running"
case JobExecutionStatus.UNKNOWN => "unknown"
}
// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a Javascript string literal.
val escapedDesc = Utility.escape(displayJobDescription)
val jsEscapedDesc = StringEscapeUtils.escapeEcmaScript(escapedDesc)
val jobEventJsonAsStr =
s"""
|{
| 'className': 'job application-timeline-object ${classNameByStatus}',
| 'group': 'jobs',
| 'start': new Date(${submissionTime}),
| 'end': new Date(${completionTime}),
| 'content': '<div class="application-timeline-content"' +
| 'data-html="true" data-placement="top" data-toggle="tooltip"' +
| 'data-title="${jsEscapedDesc} (Job ${jobId})<br>' +
| 'Status: ${status}<br>' +
| 'Submitted: ${UIUtils.formatDate(new Date(submissionTime))}' +
| '${
if (status != JobExecutionStatus.RUNNING) {
s"""<br>Completed: ${UIUtils.formatDate(new Date(completionTime))}"""
} else {
""
}
}">' +
| '${jsEscapedDesc} (Job ${jobId})</div>'
|}
""".stripMargin
jobEventJsonAsStr
}
}
private def makeExecutorEvent(executorUIDatas: Seq[SparkListenerEvent]):
Seq[String] = {
val events = ListBuffer[String]()
executorUIDatas.foreach {
case a: SparkListenerExecutorAdded =>
val addedEvent =
s"""
|{
| 'className': 'executor added',
| 'group': 'executors',
| 'start': new Date(${a.time}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${a.executorId}<br>' +
| 'Added at ${UIUtils.formatDate(new Date(a.time))}"' +
| 'data-html="true">Executor ${a.executorId} added</div>'
|}
""".stripMargin
events += addedEvent
case e: SparkListenerExecutorRemoved =>
val removedEvent =
s"""
|{
| 'className': 'executor removed',
| 'group': 'executors',
| 'start': new Date(${e.time}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${e.executorId}<br>' +
| 'Removed at ${UIUtils.formatDate(new Date(e.time))}' +
| '${
if (e.reason != null) {
s"""<br>Reason: ${e.reason.replace("\n", " ")}"""
} else {
""
}
}"' +
| 'data-html="true">Executor ${e.executorId} removed</div>'
|}
""".stripMargin
events += removedEvent
}
events.toSeq
}
private def makeTimeline(
jobs: Seq[JobUIData],
executors: Seq[SparkListenerEvent],
startTime: Long): Seq[Node] = {
val jobEventJsonAsStrSeq = makeJobEvent(jobs)
val executorEventJsonAsStrSeq = makeExecutorEvent(executors)
val groupJsonArrayAsStr =
s"""
|[
| {
| 'id': 'executors',
| 'content': '<div>Executors</div>${EXECUTORS_LEGEND}',
| },
| {
| 'id': 'jobs',
| 'content': '<div>Jobs</div>${JOBS_LEGEND}',
| }
|]
""".stripMargin
val eventArrayAsStr =
(jobEventJsonAsStrSeq ++ executorEventJsonAsStrSeq).mkString("[", ",", "]")
<span class="expand-application-timeline">
<span class="expand-application-timeline-arrow arrow-closed"></span>
<a data-toggle="tooltip" title={ToolTips.JOB_TIMELINE} data-placement="right">
Event Timeline
</a>
</span> ++
<div id="application-timeline" class="collapsed">
<div class="control-panel">
<div id="application-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
</div> ++
<script type="text/javascript">
{Unparsed(s"drawApplicationTimeline(${groupJsonArrayAsStr}," +
s"${eventArrayAsStr}, ${startTime}, ${UIUtils.getTimeZoneOffset()});")}
</script>
}
private def jobsTable(
request: HttpServletRequest,
tableHeaderId: String,
jobTag: String,
jobs: Seq[JobUIData],
killEnabled: Boolean): Seq[Node] = {
val allParameters = request.getParameterMap.asScala.toMap
val parameterOtherTable = allParameters.filterNot(_._1.startsWith(jobTag))
.map(para => para._1 + "=" + para._2(0))
val someJobHasJobGroup = jobs.exists(_.jobGroup.isDefined)
val jobIdTitle = if (someJobHasJobGroup) "Job Id (Job Group)" else "Job Id"
val parameterJobPage = request.getParameter(jobTag + ".page")
val parameterJobSortColumn = request.getParameter(jobTag + ".sort")
val parameterJobSortDesc = request.getParameter(jobTag + ".desc")
val parameterJobPageSize = request.getParameter(jobTag + ".pageSize")
val parameterJobPrevPageSize = request.getParameter(jobTag + ".prevPageSize")
val jobPage = Option(parameterJobPage).map(_.toInt).getOrElse(1)
val jobSortColumn = Option(parameterJobSortColumn).map { sortColumn =>
UIUtils.decodeURLParameter(sortColumn)
}.getOrElse(jobIdTitle)
val jobSortDesc = Option(parameterJobSortDesc).map(_.toBoolean).getOrElse(
// New jobs should be shown above old jobs by default.
if (jobSortColumn == jobIdTitle) true else false
)
val jobPageSize = Option(parameterJobPageSize).map(_.toInt).getOrElse(100)
val jobPrevPageSize = Option(parameterJobPrevPageSize).map(_.toInt).getOrElse(jobPageSize)
val page: Int = {
// If the user has changed to a larger page size, then go to page 1 in order to avoid
// IndexOutOfBoundsException.
if (jobPageSize <= jobPrevPageSize) {
jobPage
} else {
1
}
}
val currentTime = System.currentTimeMillis()
try {
new JobPagedTable(
jobs,
tableHeaderId,
jobTag,
UIUtils.prependBaseUri(parent.basePath),
"jobs", // subPath
parameterOtherTable,
parent.jobProgresslistener.stageIdToInfo,
parent.jobProgresslistener.stageIdToData,
killEnabled,
currentTime,
jobIdTitle,
pageSize = jobPageSize,
sortColumn = jobSortColumn,
desc = jobSortDesc
).table(page)
} catch {
case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) =>
<div class="alert alert-error">
<p>Error while rendering job table:</p>
<pre>
{Utils.exceptionString(e)}
</pre>
</div>
}
}
def render(request: HttpServletRequest): Seq[Node] = {
val listener = parent.jobProgresslistener
listener.synchronized {
val startTime = listener.startTime
val endTime = listener.endTime
val activeJobs = listener.activeJobs.values.toSeq
val completedJobs = listener.completedJobs.reverse.toSeq
val failedJobs = listener.failedJobs.reverse.toSeq
val activeJobsTable =
jobsTable(request, "active", "activeJob", activeJobs, killEnabled = parent.killEnabled)
val completedJobsTable =
jobsTable(request, "completed", "completedJob", completedJobs, killEnabled = false)
val failedJobsTable =
jobsTable(request, "failed", "failedJob", failedJobs, killEnabled = false)
val shouldShowActiveJobs = activeJobs.nonEmpty
val shouldShowCompletedJobs = completedJobs.nonEmpty
val shouldShowFailedJobs = failedJobs.nonEmpty
val completedJobNumStr = if (completedJobs.size == listener.numCompletedJobs) {
s"${completedJobs.size}"
} else {
s"${listener.numCompletedJobs}, only showing ${completedJobs.size}"
}
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<strong>User:</strong>
{parent.getSparkUser}
</li>
<li>
<strong>Total Uptime:</strong>
{
if (endTime < 0 && parent.sc.isDefined) {
UIUtils.formatDuration(System.currentTimeMillis() - startTime)
} else if (endTime > 0) {
UIUtils.formatDuration(endTime - startTime)
}
}
</li>
<li>
<strong>Scheduling Mode: </strong>
{listener.schedulingMode.map(_.toString).getOrElse("Unknown")}
</li>
{
if (shouldShowActiveJobs) {
<li>
<a href="#active"><strong>Active Jobs:</strong></a>
{activeJobs.size}
</li>
}
}
{
if (shouldShowCompletedJobs) {
<li id="completed-summary">
<a href="#completed"><strong>Completed Jobs:</strong></a>
{completedJobNumStr}
</li>
}
}
{
if (shouldShowFailedJobs) {
<li>
<a href="#failed"><strong>Failed Jobs:</strong></a>
{listener.numFailedJobs}
</li>
}
}
</ul>
</div>
var content = summary
val executorListener = parent.executorListener
content ++= makeTimeline(activeJobs ++ completedJobs ++ failedJobs,
executorListener.executorEvents, startTime)
if (shouldShowActiveJobs) {
content ++= <h4 id="active">Active Jobs ({activeJobs.size})</h4> ++
activeJobsTable
}
if (shouldShowCompletedJobs) {
content ++= <h4 id="completed">Completed Jobs ({completedJobNumStr})</h4> ++
completedJobsTable
}
if (shouldShowFailedJobs) {
content ++= <h4 id ="failed">Failed Jobs ({failedJobs.size})</h4> ++
failedJobsTable
}
val helpText = """A job is triggered by an action, like count() or saveAsTextFile().""" +
" Click on a job to see information about the stages of tasks inside it."
UIUtils.headerSparkPage("Spark Jobs", content, parent, helpText = Some(helpText))
}
}
}
private[ui] class JobTableRowData(
val jobData: JobUIData,
val lastStageName: String,
val lastStageDescription: String,
val duration: Long,
val formattedDuration: String,
val submissionTime: Long,
val formattedSubmissionTime: String,
val jobDescription: NodeSeq,
val detailUrl: String)
private[ui] class JobDataSource(
jobs: Seq[JobUIData],
stageIdToInfo: HashMap[Int, StageInfo],
stageIdToData: HashMap[(Int, Int), StageUIData],
basePath: String,
currentTime: Long,
pageSize: Int,
sortColumn: String,
desc: Boolean) extends PagedDataSource[JobTableRowData](pageSize) {
// Convert JobUIData to JobTableRowData which contains the final contents to show in the table
// so that we can avoid creating duplicate contents during sorting the data
private val data = jobs.map(jobRow).sorted(ordering(sortColumn, desc))
private var _slicedJobIds: Set[Int] = null
override def dataSize: Int = data.size
override def sliceData(from: Int, to: Int): Seq[JobTableRowData] = {
val r = data.slice(from, to)
_slicedJobIds = r.map(_.jobData.jobId).toSet
r
}
private def getLastStageNameAndDescription(job: JobUIData): (String, String) = {
val lastStageInfo = Option(job.stageIds)
.filter(_.nonEmpty)
.flatMap { ids => stageIdToInfo.get(ids.max)}
val lastStageData = lastStageInfo.flatMap { s =>
stageIdToData.get((s.stageId, s.attemptId))
}
val name = lastStageInfo.map(_.name).getOrElse("(Unknown Stage Name)")
val description = lastStageData.flatMap(_.description).getOrElse("")
(name, description)
}
private def jobRow(jobData: JobUIData): JobTableRowData = {
val (lastStageName, lastStageDescription) = getLastStageNameAndDescription(jobData)
val duration: Option[Long] = {
jobData.submissionTime.map { start =>
val end = jobData.completionTime.getOrElse(System.currentTimeMillis())
end - start
}
}
val formattedDuration = duration.map(d => UIUtils.formatDuration(d)).getOrElse("Unknown")
val submissionTime = jobData.submissionTime
val formattedSubmissionTime = submissionTime.map(UIUtils.formatDate).getOrElse("Unknown")
val jobDescription = UIUtils.makeDescription(lastStageDescription, basePath, plainText = false)
val detailUrl = "%s/jobs/job?id=%s".format(basePath, jobData.jobId)
new JobTableRowData (
jobData,
lastStageName,
lastStageDescription,
duration.getOrElse(-1),
formattedDuration,
submissionTime.getOrElse(-1),
formattedSubmissionTime,
jobDescription,
detailUrl
)
}
/**
* Return Ordering according to sortColumn and desc
*/
private def ordering(sortColumn: String, desc: Boolean): Ordering[JobTableRowData] = {
val ordering: Ordering[JobTableRowData] = sortColumn match {
case "Job Id" | "Job Id (Job Group)" => Ordering.by(_.jobData.jobId)
case "Description" => Ordering.by(x => (x.lastStageDescription, x.lastStageName))
case "Submitted" => Ordering.by(_.submissionTime)
case "Duration" => Ordering.by(_.duration)
case "Stages: Succeeded/Total" | "Tasks (for all stages): Succeeded/Total" =>
throw new IllegalArgumentException(s"Unsortable column: $sortColumn")
case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn")
}
if (desc) {
ordering.reverse
} else {
ordering
}
}
}
private[ui] class JobPagedTable(
data: Seq[JobUIData],
tableHeaderId: String,
jobTag: String,
basePath: String,
subPath: String,
parameterOtherTable: Iterable[String],
stageIdToInfo: HashMap[Int, StageInfo],
stageIdToData: HashMap[(Int, Int), StageUIData],
killEnabled: Boolean,
currentTime: Long,
jobIdTitle: String,
pageSize: Int,
sortColumn: String,
desc: Boolean
) extends PagedTable[JobTableRowData] {
val parameterPath = basePath + s"/$subPath/?" + parameterOtherTable.mkString("&")
override def tableId: String = jobTag + "-table"
override def tableCssClass: String =
"table table-bordered table-condensed table-striped " +
"table-head-clickable table-cell-width-limited"
override def pageSizeFormField: String = jobTag + ".pageSize"
override def prevPageSizeFormField: String = jobTag + ".prevPageSize"
override def pageNumberFormField: String = jobTag + ".page"
override val dataSource = new JobDataSource(
data,
stageIdToInfo,
stageIdToData,
basePath,
currentTime,
pageSize,
sortColumn,
desc)
override def pageLink(page: Int): String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
parameterPath +
s"&$pageNumberFormField=$page" +
s"&$jobTag.sort=$encodedSortColumn" +
s"&$jobTag.desc=$desc" +
s"&$pageSizeFormField=$pageSize" +
s"#$tableHeaderId"
}
override def goButtonFormPath: String = {
val encodedSortColumn = URLEncoder.encode(sortColumn, "UTF-8")
s"$parameterPath&$jobTag.sort=$encodedSortColumn&$jobTag.desc=$desc#$tableHeaderId"
}
override def headers: Seq[Node] = {
// Information for each header: title, cssClass, and sortable
val jobHeadersAndCssClasses: Seq[(String, String, Boolean)] =
Seq(
(jobIdTitle, "", true),
("Description", "", true), ("Submitted", "", true), ("Duration", "", true),
("Stages: Succeeded/Total", "", false),
("Tasks (for all stages): Succeeded/Total", "", false)
)
if (!jobHeadersAndCssClasses.filter(_._3).map(_._1).contains(sortColumn)) {
throw new IllegalArgumentException(s"Unknown column: $sortColumn")
}
val headerRow: Seq[Node] = {
jobHeadersAndCssClasses.map { case (header, cssClass, sortable) =>
if (header == sortColumn) {
val headerLink = Unparsed(
parameterPath +
s"&$jobTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$jobTag.desc=${!desc}" +
s"&$jobTag.pageSize=$pageSize" +
s"#$tableHeaderId")
val arrow = if (desc) "▾" else "▴" // UP or DOWN
<th class={cssClass}>
<a href={headerLink}>
{header}<span>
{Unparsed(arrow)}
</span>
</a>
</th>
} else {
if (sortable) {
val headerLink = Unparsed(
parameterPath +
s"&$jobTag.sort=${URLEncoder.encode(header, "UTF-8")}" +
s"&$jobTag.pageSize=$pageSize" +
s"#$tableHeaderId")
<th class={cssClass}>
<a href={headerLink}>
{header}
</a>
</th>
} else {
<th class={cssClass}>
{header}
</th>
}
}
}
}
<thead>{headerRow}</thead>
}
override def row(jobTableRow: JobTableRowData): Seq[Node] = {
val job = jobTableRow.jobData
val killLink = if (killEnabled) {
val confirm =
s"if (window.confirm('Are you sure you want to kill job ${job.jobId} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
// SPARK-6846 this should be POST-only but YARN AM won't proxy POST
/*
val killLinkUri = s"$basePathUri/jobs/job/kill/"
<form action={killLinkUri} method="POST" style="display:inline">
<input type="hidden" name="id" value={job.jobId.toString}/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
*/
val killLinkUri = s"$basePath/jobs/job/kill/?id=${job.jobId}"
<a href={killLinkUri} onclick={confirm} class="kill-link">(kill)</a>
} else {
Seq.empty
}
<tr id={"job-" + job.jobId}>
<td>
{job.jobId} {job.jobGroup.map(id => s"($id)").getOrElse("")}
</td>
<td>
{jobTableRow.jobDescription} {killLink}
<a href={jobTableRow.detailUrl} class="name-link">{jobTableRow.lastStageName}</a>
</td>
<td>
{jobTableRow.formattedSubmissionTime}
</td>
<td>{jobTableRow.formattedDuration}</td>
<td class="stage-progress-cell">
{job.completedStageIndices.size}/{job.stageIds.size - job.numSkippedStages}
{if (job.numFailedStages > 0) s"(${job.numFailedStages} failed)"}
{if (job.numSkippedStages > 0) s"(${job.numSkippedStages} skipped)"}
</td>
<td class="progress-cell">
{UIUtils.makeProgressBar(started = job.numActiveTasks, completed = job.numCompletedTasks,
failed = job.numFailedTasks, skipped = job.numSkippedTasks, killed = job.numKilledTasks,
total = job.numTasks - job.numSkippedTasks)}
</td>
</tr>
}
}
| u2009cf/spark-radar | core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala | Scala | apache-2.0 | 23,526 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.algorithms.consensus
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.AlignmentRecord
class ConsensusGeneratorFromReadsSuite extends ADAMFunSuite {
val cg = new ConsensusGeneratorFromReads
def artificial_reads: RDD[AlignmentRecord] = {
val path = ClassLoader.getSystemClassLoader.getResource("artificial.sam").getFile
sc.loadAlignments(path)
}
sparkTest("checking search for consensus list for artificial reads") {
val consensus = cg.findConsensus(artificial_reads.map(new RichAlignmentRecord(_))
.collect()
.toSeq)
assert(consensus.size === 2)
}
}
| FusionWorks/adam | adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/ConsensusGeneratorFromReadsSuite.scala | Scala | apache-2.0 | 1,576 |
package com.twitter.zipkin.storage.hbase.mapping
import org.apache.hadoop.hbase.util.Bytes
case class SpanNameMapping(id: Long, value: Array[Byte], parent: Option[ServiceMapping]) extends Mapping {
val mappingType: Byte = 1
lazy val name = Bytes.toString(value)
}
| pteichman/zipkin | zipkin-hbase/src/main/scala/com/twitter/zipkin/storage/hbase/mapping/SpanNameMapping.scala | Scala | apache-2.0 | 270 |
import sbt._
import Keys._
object TestProjectBuild extends Build {
lazy val root = Project(id = "root", base = file("."))
lazy val footprint = Project(id = "foo", base = file("foo"))
lazy val bar = Project(id = "bar", base = file("bar"))
val deleteFileOnExitHook = Command.single("deleteFileOnExitHook") { (state: State, arg: String) =>
state.addExitHook {
new File(arg).delete()
}
}
override lazy val settings = super.settings ++ Seq(Keys.commands += deleteFileOnExitHook)
}
| luismfonseca/sbt-autojump | src/sbt-test/global/avoid-execute-exit-hooks/project/Build.scala | Scala | mit | 505 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.network.TransportContext
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.server.TransportServer
import org.apache.spark.network.shuffle.{ExternalShuffleBlockHandler, ExternalShuffleClient}
/**
* This suite creates an external shuffle server and routes all shuffle fetches through it.
* Note that failures in this suite may arise due to changes in Spark that invalidate expectations
* set up in [[ExternalShuffleBlockHandler]], such as changing the format of shuffle files or how
* we hash files into folders.
*/
class ExternalShuffleServiceSuite extends ShuffleSuite with BeforeAndAfterAll {
var server: TransportServer = _
var rpcHandler: ExternalShuffleBlockHandler = _
override def beforeAll() {
val transportConf = SparkTransportConf.fromSparkConf(conf, numUsableCores = 2)
rpcHandler = new ExternalShuffleBlockHandler(transportConf)
val transportContext = new TransportContext(transportConf, rpcHandler)
server = transportContext.createServer()
conf.set("spark.shuffle.manager", "sort")
conf.set("spark.shuffle.service.enabled", "true")
conf.set("spark.shuffle.service.port", server.getPort.toString)//端口随机
}
override def afterAll() {
server.close()
}
// This test ensures that the external shuffle service is actually in use for the other tests.
//此测试确保外部shuffle服务,在其他测试中实际使用
/**
test("using external shuffle service") {
sc = new SparkContext("local-cluster[2,1,1024]", "test", conf)
sc.env.blockManager.externalShuffleServiceEnabled should equal(true)
sc.env.blockManager.shuffleClient.getClass should equal(classOf[ExternalShuffleClient])
// In a slow machine, one slave may register hundreds of milliseconds ahead of the other one.
// If we don't wait for all slaves, it's possible that only one executor runs all jobs. Then
// all shuffle blocks will be in this executor, ShuffleBlockFetcherIterator will directly fetch
// local blocks from the local BlockManager and won't send requests to ExternalShuffleService.
// In this case, we won't receive FetchFailed. And it will make this test fail.
// Therefore, we should wait until all slaves are up
//在一个缓慢的机器中,一个从节点可能会在另一个机器前登记数百毫秒
//如果我们不等待所有的从节点,这是可能的,只有一个执行者运行所有的工作,然后所有的shuffle块将在这个执行者
sc.jobProgressListener.waitUntilExecutorsUp(2, 10000)
val rdd = sc.parallelize(0 until 1000, 10).map(i => (i, 1)).reduceByKey(_ + _)
rdd.count()
rdd.count()
// Invalidate the registered executors, disallowing access to their shuffle blocks (without
// deleting the actual shuffle files, so we could access them without the shuffle service).
//无效的注册者,不允许访问他们的shuffle块
rpcHandler.applicationRemoved(sc.conf.getAppId, false /* cleanupLocalDirs */)
// Now Spark will receive FetchFailed, and not retry the stage due to "spark.test.noStageRetry"
// being set.
val e = intercept[SparkException] {
rdd.count()
}
e.getMessage should include ("Fetch failure will not retry stage due to testing config")
}**/
}
| tophua/spark1.52 | core/src/test/scala/org/apache/spark/ExternalShuffleServiceSuite.scala | Scala | apache-2.0 | 4,181 |
package monocle.std
import monocle.function.{At, Empty}
import monocle.{Lens, Prism}
import scalaz.{ISet, Order}
object iset extends ISetOptics
trait ISetOptics {
implicit def emptyISet[A]: Empty[ISet[A]] = new Empty[ISet[A]] {
def empty = Prism[ISet[A], Unit](s => if(s.isEmpty) Some(()) else None)(_ => ISet.empty[A])
}
implicit def atISet[A: Order]: At[ISet[A], A, Unit] = new At[ISet[A], A, Unit] {
def at(a: A) = Lens[ISet[A], Option[Unit]](s => if(s member a) Some(()) else None)(
optA => set => optA.fold(set delete a)(_ => set insert a)
)
}
}
| NightRa/Monocle | core/src/main/scala/monocle/std/ISet.scala | Scala | mit | 584 |
package drt.shared
object SplitRatiosNs {
// type SplitRatios = List[SplitRatio]
case class SplitRatios(splits: List[SplitRatio]=Nil)
object SplitRatios {
def apply(ratios: SplitRatio*): SplitRatios = SplitRatios(ratios.toList)
}
case class SplitRatio(paxType: PaxTypeAndQueue, ratio: Double)
}
| somanythings/drt-scalajs-spa-exploration | shared/src/main/scala/spatutorial/shared/SplitRatiosNs.scala | Scala | apache-2.0 | 310 |
def combine(x: M): (M => M) | hmemcpy/milewski-ctfp-pdf | src/content/3.6/code/scala/snippet04.scala | Scala | gpl-3.0 | 27 |
package org.littlewings.infinispan.persistence
import org.infinispan.Cache
import org.infinispan.configuration.cache.{CacheMode, ConfigurationBuilder}
import org.infinispan.manager.DefaultCacheManager
import org.scalatest.FunSpec
import org.scalatest.Matchers._
class SimpleMapCacheStoreSpec extends FunSpec {
describe("simple map cache store spec") {
it("persistence") {
val storeSize = 2
val numInstances = 1
SimpleMapCacheStore.instances(storeSize)
withStoreCache[String, String]("storeCache", numInstances) { cache =>
println(s"Cache size => ${cache.size}")
val range = 1 to 10
(1 to 3).foreach(i => cache.remove(s"key$i"))
(4 to 6).foreach(i => cache.put(s"key$i", "valueXXXXX"))
range.foreach(i => println(s"key[${s"key$i"}] => ${cache.get(s"key$i")}"))
withStoreCache[String, String]("storeCache", numInstances) { cache2 =>
range.foreach(i => println(s"key[${s"key$i"}] => ${cache2.get(s"key$i")}"))
(1 to 10) foreach (i => cache2.put(s"key$i", s"value$i"))
}
// (1 to 10) foreach (i => cache.put(s"key$i", s"value$i"))
/*
println("Wait...")
Thread.sleep(5000L)
range.foreach(i => println(s"key[${s"key$i"}] => ${cache.get(s"key$i")}"))
*/
}
}
}
def withStoreCache[K, V](cacheName: String, numInstances: Int = 1)(fun: Cache[K, V] => Unit): Unit = {
val managers =
(1 to numInstances).map { _ =>
val manager = new DefaultCacheManager("infinispan.xml")
val persistenceBuilder = new ConfigurationBuilder()
.clustering
.cacheMode(CacheMode.DIST_SYNC)
.expiration
// .maxIdle(3000L)
// .wakeUpInterval(500L)
.persistence
manager.defineConfiguration(cacheName,
persistenceBuilder
.addStore(new SimpleMapCacheStoreConfigurationBuilder(persistenceBuilder))
.fetchPersistentState(false)
.preload(false)
.shared(false)
.purgeOnStartup(false)
.ignoreModifications(false)
.build)
manager
}
try {
managers.foreach(_.getCache[K, V](cacheName))
fun(managers.head.getCache[K, V](cacheName))
} finally {
managers.foreach(_.stop())
}
}
}
| kazuhira-r/infinispan-examples | infinispan-persistence/src/test/scala/org/littlewings/infinispan/persistence/SimpleMapCacheStoreSpec.scala | Scala | mit | 2,522 |
val cont: Unit => Unit = null
/*start*/cont/*end*/
//(Unit) => Unit | LPTK/intellij-scala | testdata/typeInference/bugs4/SCL2816.scala | Scala | apache-2.0 | 67 |
package com.antiparagon.cvexperimenter.chessscanner
import org.opencv.core.{Core, Rect}
import org.opencv.imgcodecs.Imgcodecs
import org.scalatest._
/**
* This test file is meant to test all the example chessboard images saved in the
* 'CVExperimenter/images/Chess Scanner/Starting Position/Chessboard Failures' folder.
* These images are the ones that the current algorithms do not work on. If these images
* are made to work then all saved images will have the algorithms find the chessboards.
*
* Created by wmckay on 11/5/16.
*/
class ChessboardFinderFailuresTester extends FlatSpec with Matchers {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME)
val IMG_FOLDER = "images/Chess Scanner/Starting Position/Chessboard Failures/"
val OUTPUT_FOLDER = "Debug Images/"
val BOARD_SETUP = "BoardSetup.jpg"
val BOARD_SETUP_RECT = new Rect(13, 13, 223, 223)
val CHESS_BOARD_SET_UP = "chess_board_set_up.jpg"
val CHESS_BOARD_SET_UP_RECT = new Rect(0, 0, 350, 350)
val CHESS_BOARD_SET_UP_MODIFIED = "chess_board_set_up_modified.jpg"
val CHESS_BOARD_SET_UP_MODIFIED_RECT = new Rect(9, 9, 350, 350)
val CHESS = "chess.png"
val CHESS_RECT = new Rect(2, 0, 241, 245)
val CHESS_MODIFIED = "chess_modified.png"
val CHESS_MODIFIED_RECT = new Rect(2, 0, 241, 245)
val CHESS_KID = "chesskid.png"
val CHESS_KID_RECT = new Rect(42, 10, 632, 632)
val CHESS_KID_MODIFIED = "chesskid_modified.png"
val CHESS_KID_MODIFIED_RECT = new Rect(42, 10, 632, 632)
val DIAGRAM_OF_CHESS_BOARD_SETUP = "diagram-of-chess-board-setup.png"
val DIAGRAM_OF_CHESS_BOARD_SETUP_RECT = new Rect(16, 11, 266, 274)
val DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED = "diagram-of-chess-board-setup_modified.png"
val DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED_RECT = new Rect(16, 11, 266, 274)
val STAGRAM = "stagram.png"
val STAGRAM_RECT = new Rect(15, 13, 298, 300)
val STAGRAM_MODIFIED = "stagram_modified.png"
val STAGRAM_MODIFIED_RECT = new Rect(15, 13, 298, 300)
val VP_BLACKARRAY = "VP-Blackarray.png"
val VP_BLACKARRAY_RECT = new Rect(6, 8, 280, 279)
val VP_BLACKARRAY_MODIFIED = "VP-Blackarray_modified.png"
val VP_BLACKARRAY_MODIFIED_RECT = new Rect(6, 8, 280, 279)
/**
* This board only has a few squares found using contours. The fix was to
* only require a least 3 inner squares to determine a chessboard.
*
* Note: The debug images look like it should work fine. Should take
* a second look at why there is a problem.
*/
"ChessboardFinder" should "return Rect when given image " + BOARD_SETUP in {
val img = Imgcodecs.imread(IMG_FOLDER + BOARD_SETUP)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(BOARD_SETUP) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (BOARD_SETUP_RECT)
}
/**
* This board is hard to find because the chessboard is the full image with no background.
* The test below this one uses the same board with a black border around it.
*
* Note: How should an image that is only a chessboard be handled? Should a border be
* required?
*/
"ChessboardFinder" should "return Rect when given image " + CHESS_BOARD_SET_UP in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_BOARD_SET_UP)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_BOARD_SET_UP) + "_").findChessboard(img)
rect should be (None)
// The lines are commented out until a way to handle a chessbaord with no border is found.
//assert(rect.isDefined)
//rect.get should be (CHESS_BOARD_SET_UP_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_BOARD_SET_UP_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_BOARD_SET_UP_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_BOARD_SET_UP_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_BOARD_SET_UP_MODIFIED_RECT)
}
/**
* The chessboard needed a outlined rectangle on the outside of the chess squares.
*/
"ChessboardFinder" should "return Rect when given image " + CHESS in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS) + "_").findChessboard(img)
rect should be (None)
//assert(rect.isDefined)
//rect.get should be (CHESS_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_MODIFIED_RECT)
}
/**
* This board was hard to find because the background was close to the dark square color.
*/
"ChessboardFinder" should "return Rect when given image " + CHESS_KID in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_KID)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_KID) + "_").findChessboard(img)
rect should be (None)
//assert(rect.isDefined)
//rect.get should be (CHESS_KID_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_KID_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_KID_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_KID_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_KID_MODIFIED_RECT)
}
/**
* This board was hard to find because the background was close to the dark square color.
*/
"ChessboardFinder" should "return Rect when given image " + DIAGRAM_OF_CHESS_BOARD_SETUP in {
val img = Imgcodecs.imread(IMG_FOLDER + DIAGRAM_OF_CHESS_BOARD_SETUP)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(DIAGRAM_OF_CHESS_BOARD_SETUP) + "_").findChessboard(img)
rect should be (None)
//assert(rect.isDefined)
//rect.get should be (DIAGRAM_OF_CHESS_BOARD_SETUP_RECT)
}
"ChessboardFinder" should "return Rect when given image " + DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED_RECT)
}
/**
* The contrast between light and dark squares was not enough. The adaptive
* thresholding was making all the squares white.
*/
"ChessboardFinder" should "return Rect when given image " + STAGRAM in {
val img = Imgcodecs.imread(IMG_FOLDER + STAGRAM)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(STAGRAM) + "_").findChessboard(img)
rect should be (None)
//assert(rect.isDefined)
//rect.get should be (STAGRAM_RECT)
}
"ChessboardFinder" should "return Rect when given image " + STAGRAM_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + STAGRAM_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(STAGRAM_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (STAGRAM_MODIFIED_RECT)
}
/**
* The chessboard image was too small. Resizing the image made the test pass.
*/
"ChessboardFinder" should "return Rect when given image " + VP_BLACKARRAY in {
val img = Imgcodecs.imread(IMG_FOLDER + VP_BLACKARRAY)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(VP_BLACKARRAY) + "_").findChessboard(img)
rect should be (None)
//assert(rect.isDefined)
//rect.get should be (VP_BLACKARRAY_RECT)
}
"ChessboardFinder" should "return Rect when given image " + VP_BLACKARRAY_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + VP_BLACKARRAY_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(VP_BLACKARRAY_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (VP_BLACKARRAY_MODIFIED_RECT)
}
def removeExt(filename: String): String = {
if(filename.contains(".")) filename.substring(0, filename.lastIndexOf('.'))
else filename
}
}
| antiparagon/CVExperimenter | src/test/scala/com/antiparagon/cvexperimenter/chessscanner/ChessboardFinderFailuresTester.scala | Scala | mit | 8,419 |
/**
* Copyright (c) 2015, Cloudera, Inc. All Rights Reserved.
*
* Cloudera, Inc. licenses this file to you under the Apache License,
* Version 2.0 (the "License"). You may not use this file except in
* compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* This software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for
* the specific language governing permissions and limitations under the
* License.
*/
package com.cloudera.sparkts
import breeze.linalg._
object Lag {
/**
* Makes a lag matrix from the given time series with the given lag, trimming both rows and
* columns so that every element in the matrix is full.
*/
private[sparkts] def lagMatTrimBoth(x: Array[Double], maxLag: Int): Array[Array[Double]] = {
lagMatTrimBoth(x, maxLag, false)
}
/**
* Makes a lag matrix from the given time series with the given lag, trimming both rows and
* columns so that every element in the matrix is full.
*/
private[sparkts] def lagMatTrimBoth(x: Array[Double], maxLag: Int, includeOriginal: Boolean)
: Array[Array[Double]] = {
val numObservations = x.length
val numRows = numObservations - maxLag
val numCols = maxLag + (if (includeOriginal) 1 else 0)
val lagMat = Array.ofDim[Double](numRows, numCols)
val initialLag = if (includeOriginal) 0 else 1
for (r <- 0 until numRows) {
for (c <- initialLag to maxLag) {
lagMat(r)(c - initialLag) = x(r + maxLag - c)
}
}
lagMat
}
/**
* Makes a lag matrix from the given time series with the given lag, trimming both rows and
* columns so that every element in the matrix is full.
*/
private[sparkts] def lagMatTrimBoth(x: Vector[Double], maxLag: Int): Matrix[Double] = {
lagMatTrimBoth(x, maxLag, false)
}
/**
* Makes a lag matrix from the given time series with the given lag, trimming both rows and
* columns so that every element in the matrix is full.
*/
private[sparkts] def lagMatTrimBoth(x: Vector[Double], maxLag: Int, includeOriginal: Boolean)
: Matrix[Double] = {
val numObservations = x.size
val numRows = numObservations - maxLag
val numCols = maxLag + (if (includeOriginal) 1 else 0)
val lagMat = new DenseMatrix[Double](numRows, numCols)
val initialLag = if (includeOriginal) 0 else 1
for (r <- 0 until numRows) {
for (c <- initialLag to maxLag) {
lagMat(r, (c - initialLag)) = x(r + maxLag - c)
}
}
lagMat
}
private[sparkts] def lagMatTrimBoth(
x: Vector[Double],
outputMat: DenseMatrix[Double],
maxLag: Int,
includeOriginal: Boolean): Unit = {
val numObservations = x.size
val numRows = numObservations - maxLag
val initialLag = if (includeOriginal) 0 else 1
for (r <- 0 until numRows) {
for (c <- initialLag to maxLag) {
outputMat(r, (c - initialLag)) = x(r + maxLag - c)
}
}
}
}
| superwaiwjia/spark-timeseries | src/main/scala/com/cloudera/sparkts/Lag.scala | Scala | apache-2.0 | 3,062 |
package net.reactivecore.mongofaker
import java.io.{IOException, File}
import org.apache.commons.io.FileUtils
import java.nio.file.Files
private[mongofaker] class ManagedDatabaseDirException (msg : String, cause : Throwable = null) extends MongoFakerException (msg, cause)
/**
* Managed Database Directory.
*/
private[mongofaker] class ManagedDatabaseDir extends DatabaseDir{
val tmpDir = FileUtils.getTempDirectory()
if (!tmpDir.exists()) {
throw new ManagedDatabaseDirException(s"TMP directory ${tmpDir} does not exist")
}
val subDir = try {
Files.createTempDirectory(tmpDir.toPath(), "mongofaker_")
} catch {
case e : Throwable => throw new ManagedDatabaseDirException(s"Could not create database directory ${e.getMessage()}", e)
}
def directory : File = {
subDir.toFile
}
def uninit() : Unit = {
try {
FileUtils.deleteDirectory(directory)
} catch {
case ioError : IOException =>
throw new ManagedDatabaseDirException(s"Could not delete database directory ${directory.getAbsolutePath}", ioError)
}
}
}
| reactivecore/mongofaker | src/main/scala/net/reactivecore/mongofaker/ManagedDatabaseDir.scala | Scala | mit | 1,083 |
package org.http4s
package server
import cats.Monad
import cats.data.{Kleisli, OptionT}
object ContextMiddleware {
def apply[F[_]: Monad, T](
getContext: Kleisli[OptionT[F, ?], Request[F], T]): ContextMiddleware[F, T] =
_.compose(Kleisli((r: Request[F]) => getContext(r).map(ContextRequest(_, r))))
}
| ChristopherDavenport/http4s | server/src/main/scala/org/http4s/server/ContextMiddleware.scala | Scala | apache-2.0 | 315 |
package com.orendainx.trucking.storm.bolts
import java.nio.charset.StandardCharsets
import java.util
import com.typesafe.scalalogging.Logger
import org.apache.nifi.storm.NiFiDataPacket
import org.apache.storm.task.{OutputCollector, TopologyContext}
import org.apache.storm.topology.OutputFieldsDeclarer
import org.apache.storm.topology.base.BaseRichBolt
import org.apache.storm.tuple.{Fields, Tuple, Values}
/**
* Convert Tuples in the form of NiFiDataPackets into Tuples of their respective JVM objects.
*
* @author Edgar Orendain <[email protected]>
*/
class NiFiPacketToSerialized extends BaseRichBolt {
private lazy val log = Logger(this.getClass)
private var outputCollector: OutputCollector = _
override def prepare(stormConf: util.Map[_, _], context: TopologyContext, collector: OutputCollector): Unit = {
outputCollector = collector
}
override def execute(tuple: Tuple): Unit = {
val dp = tuple.getValueByField("nifiDataPacket").asInstanceOf[NiFiDataPacket]
val content = dp.getContent
val str = new String(content, StandardCharsets.UTF_8)
log.info(s"Content: $content")
log.info(s"str: $str")
//import java.util.Base64
//val dec = Base64.getDecoder.decode(content)
outputCollector.emit(new Values(dp.getAttributes.get("dataType"), str))
outputCollector.ack(tuple)
}
override def declareOutputFields(declarer: OutputFieldsDeclarer): Unit = declarer.declare(new Fields("dataType", "data"))
}
| orendain/trucking-iot | storm-topology/src/main/scala/com/orendainx/trucking/storm/bolts/NiFiPacketToSerialized.scala | Scala | apache-2.0 | 1,474 |
package is.hail.utils.richUtils
import is.hail.annotations.Region
import is.hail.asm4s._
import is.hail.expr.ir.EmitCodeBuilder
import is.hail.types.physical._
import is.hail.io.OutputBuffer
import is.hail.types.physical.stypes.{SCode, SValue}
import is.hail.types.virtual._
class RichCodeOutputBuffer(
val ob: Value[OutputBuffer]
) extends AnyVal {
def flush(): Code[Unit] =
ob.invoke[Unit]("flush")
def close(): Code[Unit] =
ob.invoke[Unit]("close")
def indexOffset(): Code[Long] =
ob.invoke[Long]("indexOffset")
def writeByte(b: Code[Byte]): Code[Unit] =
ob.invoke[Byte, Unit]("writeByte", b)
def write(buf: Code[Array[Byte]]): Code[Unit] =
ob.invoke[Array[Byte], Unit]("write", buf)
def write(buf: Code[Array[Byte]], startPos: Code[Int], endPos: Code[Int]): Code[Unit] =
ob.invoke[Array[Byte], Int, Int, Unit]("write", buf, startPos, endPos)
def writeInt(i: Code[Int]): Code[Unit] =
ob.invoke[Int, Unit]("writeInt", i)
def writeLong(l: Code[Long]): Code[Unit] =
ob.invoke[Long, Unit]("writeLong", l)
def writeFloat(f: Code[Float]): Code[Unit] =
ob.invoke[Float, Unit]("writeFloat", f)
def writeDouble(d: Code[Double]): Code[Unit] =
ob.invoke[Double, Unit]("writeDouble", d)
def writeBytes(region: Code[Region], off: Code[Long], n: Code[Int]): Code[Unit] =
ob.invoke[Region, Long, Int, Unit]("writeBytes", region, off, n)
def writeBytes(addr: Code[Long], n: Code[Int]): Code[Unit] =
ob.invoke[Long, Int, Unit]("writeBytes", addr, n)
def writeDoubles(from: Code[Array[Double]], fromOff: Code[Int], n: Code[Int]): Code[Unit] =
ob.invoke[Array[Double], Int, Int, Unit]("writeDoubles", from, fromOff, n)
def writeDoubles(from: Code[Array[Double]]): Code[Unit] =
ob.invoke[Array[Double], Unit]("writeDoubles", from)
def writeBoolean(b: Code[Boolean]): Code[Unit] =
ob.invoke[Boolean, Unit]("writeBoolean", b)
def writeUTF(s: Code[String]): Code[Unit] =
ob.invoke[String, Unit]("writeUTF", s)
def writePrimitive(cb: EmitCodeBuilder, pc: SValue): Unit = pc.st.virtualType match {
case TBoolean => cb += writeBoolean(pc.asBoolean.value)
case TInt32 => cb += writeInt(pc.asInt.value)
case TInt64 => cb += writeLong(pc.asLong.value)
case TFloat32 => cb += writeFloat(pc.asFloat.value)
case TFloat64 => cb += writeDouble(pc.asDouble.value)
}
}
| hail-is/hail | hail/src/main/scala/is/hail/utils/richUtils/RichCodeOutputBuffer.scala | Scala | mit | 2,376 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.examples
import collection.mutable.HashMap
import org.apache.commons.math3.linear._
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext._
import org.apache.spark.HashPartitioner
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.Row;
import org.apache.spark.sql.types.{ StructType, StructField, LongType, DoubleType };
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.SQLContext
import org.apache.spark.mllib.linalg.{ Vector, Vectors, VectorUDT }
import org.apache.spark.ml.classification.LogisticRegression
import org.apache.spark.{ SparkConf, SparkContext }
import scala.util._
/**
* SparkALR for Spark
*/
object SparkALR {
// Number of users
val U = 5000
// Number of movies
val M = 100
// Number of features
val F = 5
// Number of iterations
val ITERATIONS = 5
// Number of regression iterations
val REGMAXITER = 2
// Regularization parameter
val REGP = 1e-4
// Elastic-net parameter
val ENET = 0.00
// Number of partitions for data (set to number of machines in cluster)
val NUMPARTITIONS = 4
// File name to read data
val fileName = "data/mllib/SparkALR.data.nu5000_nm100_nl10_k2_N10000.csv"
val outputDir = "./"
// scala context that is visible to all in SparkALR
val sparkConf = new SparkConf().setAppName("SparkALR")
val sc = new SparkContext(sparkConf)
val sqlContext = new SQLContext(sc)
private def formSuffStat(data: RDD[((Long, Long), (Double, Int))], link: String) :
RDD[(Long, HashMap[Long,(Double, Double)])] = link match {
// RDD[(u, HashMap[m,(t, n)])]
case "logistic" => val initialMap = HashMap.empty[Long, (Double, Double)]
val addToMap = (s: HashMap[Long, (Double, Double)],
v: (Long, (Double, Double))) => s+= (v._1 -> v._2)
val mergePartitionMap = (p1: HashMap[Long, (Double, Double)],
p2: HashMap[Long, (Double, Double)]) => p1 ++= p2
data.reduceByKey((a, b) => (a._1 + b._1, a._2 + b._2)).mapValues(v => (v._1, v._2-v._1)).
map(v => (v._1._1, (v._1._2, v._2))).aggregateByKey(initialMap)(addToMap, mergePartitionMap)
}
private def makeTrainDF_u(m_id: Long,
um_us: RDD[(Long, (HashMap[Long,(Double, Double)], Vector))]) :
DataFrame = {
// Generate the schema based on the string of schema
val schema =
StructType(
StructField("label", DoubleType, true) ::
StructField("weight", DoubleType, true) ::
StructField("features", new VectorUDT, true) :: Nil)
val um_us_im = um_us.filter(v => v._2._1.contains(m_id)).
mapValues(v => (v._1(m_id), v._2)).
flatMap(v => Seq((1.0, v._2._1._1, v._2._2), (0.0, v._2._1._2, v._2._2))).
map(v => Row(v._1, v._2, v._3))
sqlContext.createDataFrame(um_us_im, schema)
}
private def update_us(lr: LogisticRegression, data_u: HashMap[Long,(Double, Double)], ms: Array[Vector]) : Vector = {
val mu_features = data_u.keySet.toArray.map(v => v.toInt -1) collect ms
val u_instance = data_u.values.toArray.zipWithIndex.flatMap(v => Seq((1.0, v._1._1, mu_features(v._2)), (0.0, v._1._2, mu_features(v._2))))
lr.localTrain(u_instance).coefficients.toDense
}
def main(args: Array[String]) {
sc.setLogLevel("WARN")
printf("Running with M=%d, U=%d, rank=%d, iters=(%d, %d), reg=(%f, %f)\\n",
M, U, F, ITERATIONS, REGMAXITER, REGP, ENET)
printf("Reading file from %s \\n.", fileName)
val t0 = System.nanoTime()
// Create data in the form of RDD[((Long, Long), (Double, Int))]
val data = sc.textFile(fileName).map(_.split(",")).
map(v => ((v(0).toLong, v(1).toLong), (v(2).toDouble, 1)))
// *** row indexed sufficicnet stat data
// RDD[(Long, HashMap[Long,(Double, Double)])]
val um_data = formSuffStat(data, "logistic").partitionBy(new HashPartitioner(NUMPARTITIONS))
// *** row index user features
// RDD[(Long, Vector)]
var us = um_data.mapValues(v => Vectors.dense(Array.fill(F)(math.random-0.5)))
// *** column index movie features
// Array[Vector]
var ms = Array.fill(M)(Vectors.dense(Array.fill(F)(math.random-0.5)))
var msb = sc.broadcast(ms)
var um_us = um_data.join(us)
// *** LogisticRegression models for both distributed and local calculation
val lr_u = new LogisticRegression()
.setMaxIter(REGMAXITER)
.setRegParam(REGP)
.setElasticNetParam(ENET)
.setFitIntercept(false)
.setStandardization(false)
val lr_m = new LogisticRegression()
.setMaxIter(REGMAXITER)
.setRegParam(REGP)
.setElasticNetParam(ENET)
.setFitIntercept(false)
.setStandardization(false)
.setWeightCol("weight")
val t1 = System.nanoTime()
for (iter <- 1 to ITERATIONS) {
println("Iteration " + iter + ":")
// *** Update ms *** //
println("Update ms")
// join data with us
um_us = um_data.join(us)
// loop over entries of ms
for( m_id <- 1 to M ){
ms(m_id-1) = lr_m.fit(makeTrainDF_u(m_id, um_us)).coefficients.toDense
}
// *** Update us *** //
println("Update us")
// broadcast ms
msb = sc.broadcast(ms)
// map the local trainer with data
us = um_data.mapValues(v => update_us(lr_u, v, msb.value))
}
val t2 = System.nanoTime()
println("t1 - t0: " + (t1 - t0)/1.0e9 + "sec", ", t2 - t1:" + (t2 - t1)/ITERATIONS/1.0e9 + "sec")
// write ouput
us.coalesce(1,true).saveAsTextFile(outputDir + "us.csv")
sc.parallelize(ms).coalesce(1,true).saveAsTextFile(outputDir + "ms.csv")
sc.stop()
}
}
| haowu80s/spark | examples/src/main/scala/org/apache/spark/examples/SparkALR/SparkALR.scala | Scala | apache-2.0 | 6,725 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util.SchemaUtils
import org.apache.spark.mllib.linalg.{DenseVector, Vector, VectorUDT, Vectors}
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DataType, StructType}
/**
* (private[classification]) Params for probabilistic classification.
*/
private[classification] trait ProbabilisticClassifierParams
extends ClassifierParams with HasProbabilityCol with HasThresholds {
override protected def validateAndTransformSchema(
schema: StructType,
fitting: Boolean,
featuresDataType: DataType): StructType = {
val parentSchema = super.validateAndTransformSchema(schema, fitting, featuresDataType)
SchemaUtils.appendColumn(parentSchema, $(probabilityCol), new VectorUDT)
}
}
/**
* :: DeveloperApi ::
*
* Single-label binary or multiclass classifier which can output class conditional probabilities.
*
* @tparam FeaturesType Type of input features. E.g., [[Vector]]
* @tparam E Concrete Estimator type
* @tparam M Concrete Model type
*/
@DeveloperApi
abstract class ProbabilisticClassifier[
FeaturesType,
E <: ProbabilisticClassifier[FeaturesType, E, M],
M <: ProbabilisticClassificationModel[FeaturesType, M]]
extends Classifier[FeaturesType, E, M] with ProbabilisticClassifierParams {
/** @group setParam */
def setProbabilityCol(value: String): E = set(probabilityCol, value).asInstanceOf[E]
/** @group setParam */
def setThresholds(value: Array[Double]): E = set(thresholds, value).asInstanceOf[E]
}
/**
* :: DeveloperApi ::
*
* Model produced by a [[ProbabilisticClassifier]].
* Classes are indexed {0, 1, ..., numClasses - 1}.
*
* @tparam FeaturesType Type of input features. E.g., [[Vector]]
* @tparam M Concrete Model type
*/
@DeveloperApi
abstract class ProbabilisticClassificationModel[
FeaturesType,
M <: ProbabilisticClassificationModel[FeaturesType, M]]
extends ClassificationModel[FeaturesType, M] with ProbabilisticClassifierParams {
/** @group setParam */
def setProbabilityCol(value: String): M = set(probabilityCol, value).asInstanceOf[M]
/** @group setParam */
def setThresholds(value: Array[Double]): M = set(thresholds, value).asInstanceOf[M]
/**
* Transforms dataset by reading from [[featuresCol]], and appending new columns as specified by
* parameters:
* - predicted labels as [[predictionCol]] of type [[Double]]
* - raw predictions (confidences) as [[rawPredictionCol]] of type [[Vector]]
* - probability of each class as [[probabilityCol]] of type [[Vector]].
*
* @param dataset input dataset
* @return transformed dataset
*/
override def transform(dataset: DataFrame): DataFrame = {
transformSchema(dataset.schema, logging = true)
if (isDefined(thresholds)) {
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".transform() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
// Output selected columns only.
// This is a bit complicated since it tries to avoid repeated computation.
var outputData = dataset
var numColsOutput = 0
if ($(rawPredictionCol).nonEmpty) {
val predictRawUDF = udf { (features: Any) =>
predictRaw(features.asInstanceOf[FeaturesType])
}
outputData = outputData.withColumn(getRawPredictionCol, predictRawUDF(col(getFeaturesCol)))
numColsOutput += 1
}
if ($(probabilityCol).nonEmpty) {
val probUDF = if ($(rawPredictionCol).nonEmpty) {
udf(raw2probability _).apply(col($(rawPredictionCol)))
} else {
val probabilityUDF = udf { (features: Any) =>
predictProbability(features.asInstanceOf[FeaturesType])
}
probabilityUDF(col($(featuresCol)))
}
outputData = outputData.withColumn($(probabilityCol), probUDF)
numColsOutput += 1
}
if ($(predictionCol).nonEmpty) {
val predUDF = if ($(rawPredictionCol).nonEmpty) {
udf(raw2prediction _).apply(col($(rawPredictionCol)))
} else if ($(probabilityCol).nonEmpty) {
udf(probability2prediction _).apply(col($(probabilityCol)))
} else {
val predictUDF = udf { (features: Any) =>
predict(features.asInstanceOf[FeaturesType])
}
predictUDF(col($(featuresCol)))
}
outputData = outputData.withColumn($(predictionCol), predUDF)
numColsOutput += 1
}
if (numColsOutput == 0) {
this.logWarning(s"$uid: ProbabilisticClassificationModel.transform() was called as NOOP" +
" since no output columns were set.")
}
outputData
}
/**
* Estimate the probability of each class given the raw prediction,
* doing the computation in-place.
* These predictions are also called class conditional probabilities.
*
* This internal method is used to implement [[transform()]] and output [[probabilityCol]].
*
* @return Estimated class conditional probabilities (modified input vector)
*/
protected def raw2probabilityInPlace(rawPrediction: Vector): Vector
/** Non-in-place version of [[raw2probabilityInPlace()]] */
protected def raw2probability(rawPrediction: Vector): Vector = {
val probs = rawPrediction.copy
raw2probabilityInPlace(probs)
}
override protected def raw2prediction(rawPrediction: Vector): Double = {
if (!isDefined(thresholds)) {
rawPrediction.argmax
} else {
probability2prediction(raw2probability(rawPrediction))
}
}
/**
* Predict the probability of each class given the features.
* These predictions are also called class conditional probabilities.
*
* This internal method is used to implement [[transform()]] and output [[probabilityCol]].
*
* @return Estimated class conditional probabilities
*/
protected def predictProbability(features: FeaturesType): Vector = {
val rawPreds = predictRaw(features)
raw2probabilityInPlace(rawPreds)
}
/**
* Given a vector of class conditional probabilities, select the predicted label.
* This supports thresholds which favor particular labels.
* @return predicted label
*/
protected def probability2prediction(probability: Vector): Double = {
if (!isDefined(thresholds)) {
probability.argmax
} else {
val thresholds: Array[Double] = getThresholds
val scaledProbability: Array[Double] =
probability.toArray.zip(thresholds).map { case (p, t) =>
if (t == 0.0) Double.PositiveInfinity else p / t
}
Vectors.dense(scaledProbability).argmax
}
}
}
private[ml] object ProbabilisticClassificationModel {
/**
* Normalize a vector of raw predictions to be a multinomial probability vector, in place.
*
* The input raw predictions should be >= 0.
* The output vector sums to 1, unless the input vector is all-0 (in which case the output is
* all-0 too).
*
* NOTE: This is NOT applicable to all models, only ones which effectively use class
* instance counts for raw predictions.
*/
def normalizeToProbabilitiesInPlace(v: DenseVector): Unit = {
val sum = v.values.sum
if (sum != 0) {
var i = 0
val size = v.size
while (i < size) {
v.values(i) /= sum
i += 1
}
}
}
}
| practice-vishnoi/dev-spark-1 | mllib/src/main/scala/org/apache/spark/ml/classification/ProbabilisticClassifier.scala | Scala | apache-2.0 | 8,325 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.cache
import java.util.{Collections, UUID, Collection => JavaCollection, List => JavaList}
import org.apache.ignite._
import org.apache.ignite.cluster.ClusterNode
import org.apache.ignite.internal.util.lang.{GridFunc => F}
import org.apache.ignite.internal.util.scala.impl
import org.apache.ignite.internal.util.typedef.X
import org.apache.ignite.internal.visor.cache._
import org.apache.ignite.internal.visor.util.VisorTaskUtils._
import org.apache.ignite.visor.VisorTag
import org.apache.ignite.visor.commands.cache.VisorCacheCommand._
import org.apache.ignite.visor.commands.common.{VisorConsoleCommand, VisorTextTable}
import org.apache.ignite.visor.visor._
import org.jetbrains.annotations._
import scala.collection.JavaConversions._
import scala.language.{implicitConversions, reflectiveCalls}
/**
* ==Overview==
* Visor 'cache' command implementation.
*
* ==Help==
* {{{
* +-------------------------------------------------------------------------------------------+
* | cache | Prints statistics about caches from specified node on the entire grid. |
* | | Output sorting can be specified in arguments. |
* | | |
* | | Output abbreviations: |
* | | # Number of nodes. |
* | | H/h Number of cache hits. |
* | | M/m Number of cache misses. |
* | | R/r Number of cache reads. |
* | | W/w Number of cache writes. |
* +-------------------------------------------------------------------------------------------+
* | cache -clear | Clears all entries from cache on all nodes. |
* +-------------------------------------------------------------------------------------------+
* | cache -scan | List all entries in cache with specified name. |
* +-------------------------------------------------------------------------------------------+
* | cache -stop | Stop cache with specified name. |
* +-------------------------------------------------------------------------------------------+
* | cache -reset | Reset metrics for cache with specified name. |
* +-------------------------------------------------------------------------------------------+
* | cache -rebalance | Re-balance partitions for cache with specified name. |
* +-------------------------------------------------------------------------------------------+
*
* }}}
*
* ====Specification====
* {{{
* cache {-system}
* cache -i {-system}
* cache {-c=<cache-name>} {-id=<node-id>|id8=<node-id8>} {-s=hi|mi|rd|wr|cn} {-a} {-r} {-system}
* cache -clear {-c=<cache-name>}
* cache -scan -c=<cache-name> {-id=<node-id>|id8=<node-id8>} {-p=<page size>} {-system}
* cache -stop -c=<cache-name>
* cache -reset -c=<cache-name>
* cache -rebalance -c=<cache-name>
* }}}
*
* ====Arguments====
* {{{
* -id8=<node-id>
* ID8 of the node to get cache statistics from.
* Note that either '-id8' or '-id' should be specified.
* You can also use '@n0' ... '@nn' variables as a shortcut for <node-id8>.
* To specify oldest node on the same host as visor use variable '@nl'.
* To specify oldest node on other hosts that are not running visor use variable '@nr'.
* If neither is specified statistics will be gathered from all nodes.
* -id=<node-id>
* Full ID of the node to get cache statistics from.
* Either '-id8' or '-id' can be specified.
* If neither is specified statistics will be gathered from all nodes.
* -c=<cache-name>
* Name of the cache.
* -s=hi|mi|rd|wr|cn
* Defines sorting type. Sorted by:
* hi Hits.
* mi Misses.
* rd Reads.
* wr Writes.
* cn Cache name.
* If not specified - default sorting is 'cn'.
* -i
* Interactive mode.
* User can interactively select node for cache statistics.
* -r
* Defines if sorting should be reversed.
* Can be specified only with '-s' argument.
* -a
* Prints details statistics about each cache.
* By default only aggregated summary is printed.
* -system
* Enable showing of information about system caches.
* -clear
* Clears cache.
* -scan
* Prints list of all entries from cache.
* -stop
* Stop cache with specified name.
* -reset
* Reset metrics for cache with specified name.
* -rebalance
* Re-balance partitions for cache with specified name.
* -p=<page size>
* Number of object to fetch from cache at once.
* Valid range from 1 to 100.
* By default page size is 25.
* }}}
*
* ====Examples====
* {{{
* cache
* Prints summary statistics about all no system caches.
* cache -system
* Prints summary statistics about all caches.
* cache -id8=12345678 -s=hi -r
* Prints summary statistics about caches from node with specified id8
* sorted by number of hits in reverse order.
* cache -i
* Prints cache statistics for interactively selected node.
* cache -s=hi -r -a
* Prints detailed statistics about all caches sorted by number of hits in reverse order.
* cache -clear
* Clears interactively selected cache.
* cache -clear -c=cache
* Clears cache with name 'cache'.
* cache -scan
* Prints list entries from interactively selected cache.
* cache -scan -c=cache
* Prints list entries from cache with name 'cache' from all nodes with this cache.
* cache -scan -c=@c0 -p=50
* Prints list entries from cache with name taken from 'c0' memory variable
* with page of 50 items from all nodes with this cache.
* cache -scan -c=cache -id8=12345678
* Prints list entries from cache with name 'cache' and node '12345678' ID8.
* cache -stop -c=cache
* Stops cache with name 'cache'.
* cache -reset -c=cache
* Reset metrics for cache with name 'cache'.
* cache -rebalance -c=cache
* Re-balance partitions for cache with name 'cache'.
*
* }}}
*/
class VisorCacheCommand extends VisorConsoleCommand {
@impl protected val name: String = "cache"
/**
* ===Command===
* Prints statistics about caches from nodes that pass mnemonic predicate.
* Sorting can be specified in arguments.
*
* ===Examples===
* <ex>cache -id8=12345678 -s=no -r</ex>
* Prints statistics about caches from node with specified id8 sorted by number of nodes in reverse order.
* <br>
* <ex>cache -s=no -r</ex>
* Prints statistics about all caches sorted by number of nodes in reverse order.
* <br>
* <ex>cache -clear</ex>
* Clears interactively selected cache.
* <br>
* <ex>cache -clear -c=cache</ex>
* Clears cache with name 'cache'.
* <br>
* <ex>cache -scan</ex>
* Prints list entries from interactively selected cache.
* <br>
* <ex>cache -scan -c=cache</ex>
* Prints list entries from cache with name 'cache' from all nodes with this cache.
* <br>
* <ex>cache -scan -c=@c0 -p=50</ex>
* Prints list entries from cache with name taken from 'c0' memory variable with page of 50 items
* from all nodes with this cache.
* <br>
* <ex>cache -scan -c=cache -id8=12345678</ex>
* Prints list entries from cache with name 'cache' and node '12345678' ID8.
* <br>
* <ex>cache -stop -c=@c0</ex>
* Stop cache with name taken from 'c0' memory variable.
* <br>
* <ex>cache -reset -c=@c0</ex>
* Reset metrics for cache with name taken from 'c0' memory variable.
*
* @param args Command arguments.
*/
def cache(args: String) {
if (checkConnected() && checkActiveState()) {
var argLst = parseArgs(args)
if (hasArgFlag("i", argLst)) {
askForNode("Select node from:") match {
case Some(nid) => ask("Detailed statistics (y/n) [n]: ", "n") match {
case "n" | "N" => nl(); cache("-id=" + nid); return;
case "y" | "Y" => nl(); cache("-a -id=" + nid); return;
case x => nl(); warn("Invalid answer: " + x); return;
}
case None => return
}
return
}
val node = parseNode(argLst) match {
case Left(msg) =>
scold(msg)
return
case Right(n) => n
}
val showSystem = hasArgFlag("system", argLst)
var cacheName = argValue("c", argLst) match {
case Some(dfltName) if dfltName == DFLT_CACHE_KEY || dfltName == DFLT_CACHE_NAME =>
argLst = argLst.filter(_._1 != "c") ++ Seq("c" -> null)
Some(null)
case cn => cn
}
/** Check that argument list has flag from list. */
def hasArgFlagIn(flags: String *) = {
flags.exists(hasArgFlag(_, argLst))
}
// Get cache stats data from all nodes.
val aggrData = cacheData(node, cacheName, showSystem)
if (hasArgFlagIn("clear", "scan", "stop", "reset", "rebalance")) {
if (cacheName.isEmpty)
askForCache("Select cache from:", node, showSystem
&& !hasArgFlagIn("clear", "stop", "reset", "rebalance"), aggrData) match {
case Some(name) =>
argLst = argLst ++ Seq("c" -> name)
cacheName = Some(name)
case None => return
}
cacheName.foreach(name => {
aggrData.find(cache => F.eq(cache.getName, name)) match {
case Some(cache) =>
if (!cache.isSystem) {
if (hasArgFlag("scan", argLst))
VisorCacheScanCommand().scan(argLst, node)
else if (hasArgFlag("clear", argLst))
VisorCacheClearCommand().clear(argLst, node)
else if (hasArgFlag("stop", argLst))
VisorCacheStopCommand().stop(argLst, node)
else if (hasArgFlag("reset", argLst))
VisorCacheResetCommand().reset(argLst, node)
else if (hasArgFlag("rebalance", argLst))
VisorCacheRebalanceCommand().rebalance(argLst, node)
}
else {
if (hasArgFlag("scan", argLst))
warn("Scan of system cache is not allowed: " + name)
else if (hasArgFlag("clear", argLst))
warn("Clearing of system cache is not allowed: " + name)
else if (hasArgFlag("stop", argLst))
warn("Stopping of system cache is not allowed: " + name)
else if (hasArgFlag("reset", argLst))
warn("Reset metrics of system cache is not allowed: " + name)
else if (hasArgFlag("rebalance", argLst))
warn("Re-balance partitions of system cache is not allowed: " + name)
}
case None =>
warn("Cache with specified name not found: " + name)
}
})
return
}
val all = hasArgFlag("a", argLst)
val sortType = argValue("s", argLst)
val reversed = hasArgName("r", argLst)
if (sortType.isDefined && !isValidSortType(sortType.get)) {
scold("Invalid '-s' argument in: " + args)
return
}
if (aggrData.isEmpty) {
scold("No caches found.")
return
}
node match {
case Some(n) =>
println("ID8=" + nid8(n) + ", time of the snapshot: " + formatDateTime(System.currentTimeMillis))
case None =>
println("Time of the snapshot: " + formatDateTime(System.currentTimeMillis))
}
val sumT = VisorTextTable()
sumT #= ("Name(@)", "Mode", "Nodes", "Entries (Heap / Off-heap)", "Hits", "Misses", "Reads", "Writes")
sortAggregatedData(aggrData, sortType.getOrElse("cn"), reversed).foreach(
ad => {
// Add cache host as visor variable.
registerCacheName(ad.getName)
sumT += (
mkCacheName(ad.getName),
ad.getMode,
ad.getNodes.size(),
(
"min: " + (ad.getMinimumHeapSize + ad.getMinimumOffHeapPrimarySize) +
" (" + ad.getMinimumHeapSize + " / " + ad.getMinimumOffHeapPrimarySize + ")",
"avg: " + formatDouble(ad.getAverageHeapSize + ad.getAverageOffHeapPrimarySize) +
" (" + formatDouble(ad.getAverageHeapSize) + " / " + formatDouble(ad.getAverageOffHeapPrimarySize) + ")",
"max: " + (ad.getMaximumHeapSize + ad.getMaximumOffHeapPrimarySize) +
" (" + ad.getMaximumHeapSize + " / " + ad.getMaximumOffHeapPrimarySize + ")"
),
(
"min: " + ad.getMinimumHits,
"avg: " + formatDouble(ad.getAverageHits),
"max: " + ad.getMaximumHits
),
(
"min: " + ad.getMinimumMisses,
"avg: " + formatDouble(ad.getAverageMisses),
"max: " + ad.getMaximumMisses
),
(
"min: " + ad.getMinimumReads,
"avg: " + formatDouble(ad.getAverageReads),
"max: " + ad.getMaximumReads
),
(
"min: " + ad.getMinimumWrites,
"avg: " + formatDouble(ad.getAverageWrites),
"max: " + ad.getMaximumWrites
)
)
}
)
sumT.render()
if (all) {
val sorted = aggrData.sortWith((k1, k2) => {
if (k1.getName == null)
true
else if (k2.getName == null)
false
else k1.getName.compareTo(k2.getName) < 0
})
val gCfg = node.map(config).collect {
case cfg if cfg != null => cfg
}
sorted.foreach(ad => {
val cacheNameVar = mkCacheName(ad.getName)
println("\\nCache '" + cacheNameVar + "':")
val m = ad.getMetrics
val csT = VisorTextTable()
csT += ("Name(@)", cacheNameVar)
csT += ("Nodes", m.size())
csT += ("Total size Min/Avg/Max", (ad.getMinimumHeapSize + ad.getMinimumOffHeapPrimarySize) + " / " +
formatDouble(ad.getAverageHeapSize + ad.getAverageOffHeapPrimarySize) + " / " +
(ad.getMaximumHeapSize + ad.getMaximumOffHeapPrimarySize))
csT += (" Heap size Min/Avg/Max", ad.getMinimumHeapSize + " / " +
formatDouble(ad.getAverageHeapSize) + " / " + ad.getMaximumHeapSize)
csT += (" Off-heap size Min/Avg/Max", ad.getMinimumOffHeapPrimarySize + " / " +
formatDouble(ad.getAverageOffHeapPrimarySize) + " / " + ad.getMaximumOffHeapPrimarySize)
val ciT = VisorTextTable()
ciT #= ("Node ID8(@), IP", "CPUs", "Heap Used", "CPU Load", "Up Time", "Size", "Hi/Mi/Rd/Wr")
sortData(m.toMap, sortType.getOrElse("hi"), reversed).foreach { case (nid, cm) =>
val nm = ignite.cluster.node(nid).metrics()
ciT += (
nodeId8Addr(nid),
nm.getTotalCpus,
formatDouble(100d * nm.getHeapMemoryUsed / nm.getHeapMemoryMaximum) + " %",
formatDouble(nm.getCurrentCpuLoad * 100d) + " %",
X.timeSpan2HMSM(nm.getUpTime),
(
"Total: " + (cm.getHeapEntriesCount + cm.getOffHeapPrimaryEntriesCount),
" Heap: " + cm.getHeapEntriesCount,
" Off-Heap: " + cm.getOffHeapPrimaryEntriesCount,
" Off-Heap Memory: " + formatMemory(cm.getOffHeapAllocatedSize)
),
(
"Hi: " + cm.getHits,
"Mi: " + cm.getMisses,
"Rd: " + cm.getReads,
"Wr: " + cm.getWrites
)
)
}
csT.render()
nl()
println("Nodes for: " + cacheNameVar)
ciT.render()
// Print footnote.
println("'Hi' - Number of cache hits.")
println("'Mi' - Number of cache misses.")
println("'Rd' - number of cache reads.")
println("'Wr' - Number of cache writes.")
// Print metrics.
nl()
println("Aggregated queries metrics:")
println(" Minimum execution time: " + X.timeSpan2HMSM(ad.getMinimumQueryTime))
println(" Maximum execution time: " + X.timeSpan2HMSM(ad.getMaximumQueryTime))
println(" Average execution time: " + X.timeSpan2HMSM(ad.getAverageQueryTime.toLong))
println(" Total number of executions: " + ad.getQueryExecutions)
println(" Total number of failures: " + ad.getQueryFailures)
gCfg.foreach(ccfgs => ccfgs.find(ccfg => F.eq(ccfg.getName, ad.getName))
.foreach(ccfg => {
nl()
printCacheConfiguration("Cache configuration:", ccfg)
}))
})
}
else
println("\\nUse \\"-a\\" flag to see detailed statistics.")
}
}
/**
* Makes extended cache host attaching optional visor variable host
* associated with it.
*
* @param s Cache host.
*/
private def mkCacheName(@Nullable s: String): String = {
if (s == null) {
val v = mfindHead(DFLT_CACHE_KEY)
DFLT_CACHE_NAME + (if (v.isDefined) "(@" + v.get._1 + ')' else "")
}
else {
val v = mfindHead(s)
s + (if (v.isDefined) "(@" + v.get._1 + ')' else "")
}
}
/**
* Registers cache host as a visor variable if one wasn't already registered.
*
* @param s Cache host.
*/
private def registerCacheName(@Nullable s: String) = setVarIfAbsent(if (s != null) s else DFLT_CACHE_KEY, "c")
/**
* ===Command===
* Prints unsorted statistics about all caches.
*
* ===Examples===
* <ex>cache</ex>
* Prints unsorted statistics about all caches.
*/
def cache() {
this.cache("")
}
/**
* Get metrics data for all caches from all node or from specified node.
*
* @param node Option of node for cache names extracting. All nodes if `None`.
* @param systemCaches Allow selection of system caches.
* @return Caches metrics data.
*/
private def cacheData(node: Option[ClusterNode], name: Option[String], systemCaches: Boolean = false):
List[VisorCacheAggregatedMetrics] = {
assert(node != null)
try {
val caches: JavaList[String] = name.fold(Collections.emptyList[String]())(Collections.singletonList)
val arg = new VisorCacheMetricsCollectorTaskArg(systemCaches, caches)
node match {
case Some(n) => executeOne(n.id(), classOf[VisorCacheMetricsCollectorTask], arg).toList
case None => executeMulti(classOf[VisorCacheMetricsCollectorTask], arg).toList
}
}
catch {
case e: IgniteException => Nil
}
}
/**
* Gets configuration of grid from specified node for collecting of node cache's configuration.
*
* @param node Specified node.
* @return Cache configurations for specified node.
*/
private def config(node: ClusterNode): JavaCollection[VisorCacheConfiguration] = {
try {
cacheConfigurations(node.id())
}
catch {
case e: IgniteException =>
scold(e)
null
}
}
/**
* Tests whether passed in parameter is a valid sorting type.
*
* @param arg Sorting type to test.
*/
private def isValidSortType(arg: String): Boolean = {
assert(arg != null)
Set("hi", "mi", "rd", "wr", "cn").contains(arg.trim)
}
/**
* Sort metrics data.
*
* @param data Unsorted list.
* @param arg Sorting command argument.
* @param reverse Whether to reverse sorting or not.
* @return Sorted data.
*/
private def sortData(data: Map[UUID, VisorCacheMetrics], arg: String, reverse: Boolean) = {
assert(data != null)
assert(arg != null)
val sorted = arg.trim match {
case "hi" => data.toSeq.sortBy(_._2.getHits)
case "mi" => data.toSeq.sortBy(_._2.getMisses)
case "rd" => data.toSeq.sortBy(_._2.getReads)
case "wr" => data.toSeq.sortBy(_._2.getWrites)
case "cn" => data.toSeq.sortBy(_._1)
case _ =>
assert(false, "Unknown sorting type: " + arg)
Nil
}
if (reverse) sorted.reverse else sorted
}
/**
* Sort aggregated metrics data.
*
* @param data Unsorted list.
* @param arg Command argument.
* @param reverse Whether to reverse sorting or not.
* @return Sorted data.
*/
private def sortAggregatedData(data: Iterable[VisorCacheAggregatedMetrics], arg: String, reverse: Boolean):
List[VisorCacheAggregatedMetrics] = {
val sorted = arg.trim match {
case "hi" => data.toList.sortBy(_.getAverageHits)
case "mi" => data.toList.sortBy(_.getAverageMisses)
case "rd" => data.toList.sortBy(_.getAverageReads)
case "wr" => data.toList.sortBy(_.getAverageWrites)
case "cn" => data.toList.sortWith((x, y) =>
x.getName == null || (y.getName != null && x.getName.toLowerCase < y.getName.toLowerCase))
case _ =>
assert(false, "Unknown sorting type: " + arg)
Nil
}
if (reverse) sorted.reverse else sorted
}
/**
* Asks user to select a cache from the list.
*
* @param title Title displayed before the list of caches.
* @param node Option of node for cache names extracting. All nodes if `None`.
* @param showSystem Allow selection of system caches.
* @return `Option` for ID of selected cache.
*/
def askForCache(title: String, node: Option[ClusterNode], showSystem: Boolean = false,
aggrData: Seq[VisorCacheAggregatedMetrics]): Option[String] = {
assert(title != null)
if (aggrData.isEmpty) {
scold("No caches found.")
return None
}
val sortedAggrData = sortAggregatedData(aggrData, "cn", false)
println("Time of the snapshot: " + formatDateTime(System.currentTimeMillis))
val sumT = VisorTextTable()
sumT #= ("#", "Name(@)", "Mode", "Size (Heap / Off-heap)")
sortedAggrData.indices.foreach(i => {
val ad = sortedAggrData(i)
// Add cache host as visor variable.
registerCacheName(ad.getName)
sumT += (
i,
mkCacheName(ad.getName),
ad.getMode,
(
"min: " + (ad.getMinimumHeapSize + ad.getMinimumOffHeapPrimarySize) +
" (" + ad.getMinimumHeapSize + " / " + ad.getMinimumOffHeapPrimarySize + ")",
"avg: " + formatDouble(ad.getAverageHeapSize + ad.getAverageOffHeapPrimarySize) +
" (" + formatDouble(ad.getAverageHeapSize) + " / " + formatDouble(ad.getAverageOffHeapPrimarySize) + ")",
"max: " + (ad.getMaximumHeapSize + ad.getMaximumOffHeapPrimarySize) +
" (" + ad.getMaximumHeapSize + " / " + ad.getMaximumOffHeapPrimarySize + ")"
))
})
sumT.render()
val a = ask("\\nChoose cache number ('c' to cancel) [c]: ", "0")
if (a.toLowerCase == "c")
None
else {
try
Some(sortedAggrData(a.toInt).getName)
catch {
case e: Throwable =>
warn("Invalid selection: " + a)
None
}
}
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorCacheCommand {
/** Singleton command */
private val cmd = new VisorCacheCommand
addHelp(
name = "cache",
shortInfo = "Prints cache statistics, clears cache, prints list of all entries from cache.",
longInfo = Seq(
"Prints statistics about caches from specified node on the entire grid.",
"Output sorting can be specified in arguments.",
" ",
"Output abbreviations:",
" # Number of nodes.",
" H/h Number of cache hits.",
" M/m Number of cache misses.",
" R/r Number of cache reads.",
" W/w Number of cache writes.",
" ",
"Clears cache.",
" ",
"Prints list of all entries from cache."
),
spec = Seq(
"cache",
"cache -i",
"cache {-c=<cache-name>} {-id=<node-id>|id8=<node-id8>} {-s=hi|mi|rd|wr} {-a} {-r}",
"cache -clear {-c=<cache-name>} {-id=<node-id>|id8=<node-id8>}",
"cache -scan -c=<cache-name> {-id=<node-id>|id8=<node-id8>} {-p=<page size>}",
"cache -stop -c=<cache-name>",
"cache -reset -c=<cache-name>",
"cache -rebalance -c=<cache-name>"
),
args = Seq(
"-id8=<node-id>" -> Seq(
"ID8 of the node to get cache statistics from.",
"Note that either '-id8' or '-id' should be specified.",
"You can also use '@n0' ... '@nn' variables as a shortcut for <node-id8>.",
"To specify oldest node on the same host as visor use variable '@nl'.",
"To specify oldest node on other hosts that are not running visor use variable '@nr'.",
"If neither is specified statistics will be gathered from all nodes."
),
"-id=<node-id>" -> Seq(
"Full ID of the node to get cache statistics from.",
"Either '-id8' or '-id' can be specified.",
"If neither is specified statistics will be gathered from all nodes."
),
"-c=<cache-name>" -> Seq(
"Name of the cache.",
"Note you can also use '@c0' ... '@cn' variables as shortcut to <cache-name>."
),
"-clear" -> "Clears cache.",
"-system" -> "Enable showing of information about system caches.",
"-scan" -> "Prints list of all entries from cache.",
"-stop" -> "Stop cache with specified name.",
"-reset" -> "Reset metrics of cache with specified name.",
"-rebalance" -> "Re-balance partitions for cache with specified name.",
"-s=hi|mi|rd|wr|cn" -> Seq(
"Defines sorting type. Sorted by:",
" hi Hits.",
" mi Misses.",
" rd Reads.",
" wr Writes.",
" cn Cache name.",
"If not specified - default sorting is 'cn'."
),
"-i" -> Seq(
"Interactive mode.",
"User can interactively select node for cache statistics."
),
"-r" -> Seq(
"Defines if sorting should be reversed.",
"Can be specified only with '-s' argument."
),
"-a" -> Seq(
"Prints details statistics about each cache.",
"By default only aggregated summary is printed."
),
"-p=<page size>" -> Seq(
"Number of object to fetch from cache at once.",
"Valid range from 1 to 100.",
"By default page size is 25."
)
),
examples = Seq(
"cache" ->
"Prints summary statistics about all non-system caches.",
"cache -system" ->
"Prints summary statistics about all caches including system cache.",
"cache -i" ->
"Prints cache statistics for interactively selected node.",
"cache -id8=12345678 -s=hi -r" -> Seq(
"Prints summary statistics about caches from node with specified id8",
"sorted by number of hits in reverse order."
),
"cache -id8=@n0 -s=hi -r" -> Seq(
"Prints summary statistics about caches from node with id8 taken from 'n0' memory variable.",
"sorted by number of hits in reverse order."
),
"cache -c=@c0 -a" -> Seq(
"Prints detailed statistics about cache with name taken from 'c0' memory variable."
),
"cache -s=hi -r -a" ->
"Prints detailed statistics about all caches sorted by number of hits in reverse order.",
"cache -clear" -> "Clears interactively selected cache.",
"cache -clear -c=cache" -> "Clears cache with name 'cache'.",
"cache -clear -c=@c0" -> "Clears cache with name taken from 'c0' memory variable.",
"cache -scan" -> "Prints list entries from interactively selected cache.",
"cache -scan -c=cache" -> "List entries from cache with name 'cache' from all nodes with this cache.",
"cache -scan -c=@c0 -p=50" -> ("Prints list entries from cache with name taken from 'c0' memory variable" +
" with page of 50 items from all nodes with this cache."),
"cache -scan -c=cache -id8=12345678" -> "Prints list entries from cache with name 'cache' and node '12345678' ID8.",
"cache -stop -c=@c0" -> "Stop cache with name taken from 'c0' memory variable.",
"cache -reset -c=@c0" -> "Reset metrics for cache with name taken from 'c0' memory variable.",
"cache -rebalance -c=cache" -> "Re-balance partitions for cache with name 'cache'."
),
emptyArgs = cmd.cache,
withArgs = cmd.cache
)
/** Default cache name to show on screen. */
private final val DFLT_CACHE_NAME = escapeName(null)
/** Default cache key. */
protected val DFLT_CACHE_KEY = DFLT_CACHE_NAME + "-" + UUID.randomUUID().toString
/**
* Singleton.
*/
def apply() = cmd
/**
* Implicit converter from visor to commands "pimp".
*
* @param vs Visor tagging trait.
*/
implicit def fromCinfo2Visor(vs: VisorTag): VisorCacheCommand = cmd
/**
* Show table of cache configuration information.
*
* @param title Specified title for table.
* @param cfg Config to show information.
*/
private[commands] def printCacheConfiguration(title: String, cfg: VisorCacheConfiguration) {
val affinityCfg = cfg.getAffinityConfiguration
val nearCfg = cfg.getNearConfiguration
val rebalanceCfg = cfg.getRebalanceConfiguration
val evictCfg = cfg.getEvictionConfiguration
val storeCfg = cfg.getStoreConfiguration
val queryCfg = cfg.getQueryConfiguration
val cacheT = VisorTextTable()
cacheT #= ("Name", "Value")
cacheT += ("Group", cfg.getGroupName)
cacheT += ("Dynamic Deployment ID", cfg.getDynamicDeploymentId)
cacheT += ("System", bool2Str(cfg.isSystem))
cacheT += ("Mode", cfg.getMode)
cacheT += ("Atomicity Mode", safe(cfg.getAtomicityMode))
cacheT += ("Statistic Enabled", bool2Str(cfg.isStatisticsEnabled))
cacheT += ("Management Enabled", bool2Str(cfg.isManagementEnabled))
cacheT += ("On-heap cache enabled", bool2Str(cfg.isOnheapCacheEnabled))
cacheT += ("Partition Loss Policy", cfg.getPartitionLossPolicy)
cacheT += ("Query Parallelism", cfg.getQueryParallelism)
cacheT += ("Copy On Read", bool2Str(cfg.isCopyOnRead))
cacheT += ("Listener Configurations", cfg.getListenerConfigurations)
cacheT += ("Load Previous Value", bool2Str(cfg.isLoadPreviousValue))
cacheT += ("Memory Policy Name", cfg.getMemoryPolicyName)
cacheT += ("Node Filter", cfg.getNodeFilter)
cacheT += ("Read From Backup", bool2Str(cfg.isReadFromBackup))
cacheT += ("Topology Validator", cfg.getTopologyValidator)
cacheT += ("Time To Live Eager Flag", cfg.isEagerTtl)
cacheT += ("Write Synchronization Mode", safe(cfg.getWriteSynchronizationMode))
cacheT += ("Invalidate", bool2Str(cfg.isInvalidate))
cacheT += ("Affinity Function", safe(affinityCfg.getFunction))
cacheT += ("Affinity Backups", affinityCfg.getPartitionedBackups)
cacheT += ("Affinity Partitions", safe(affinityCfg.getPartitions))
cacheT += ("Affinity Exclude Neighbors", safe(affinityCfg.isExcludeNeighbors))
cacheT += ("Affinity Mapper", safe(affinityCfg.getMapper))
cacheT += ("Rebalance Mode", rebalanceCfg.getMode)
cacheT += ("Rebalance Batch Size", rebalanceCfg.getBatchSize)
cacheT += ("Rebalance Timeout", rebalanceCfg.getTimeout)
cacheT += ("Rebalance Delay", rebalanceCfg.getPartitionedDelay)
cacheT += ("Time Between Rebalance Messages", rebalanceCfg.getThrottle)
cacheT += ("Rebalance Batches Count", rebalanceCfg.getBatchesPrefetchCnt)
cacheT += ("Rebalance Cache Order", rebalanceCfg.getRebalanceOrder)
cacheT += ("Eviction Policy Enabled", bool2Str(evictCfg.getPolicy != null))
cacheT += ("Eviction Policy", safe(evictCfg.getPolicy))
cacheT += ("Eviction Policy Max Size", safe(evictCfg.getPolicyMaxSize))
cacheT += ("Eviction Filter", safe(evictCfg.getFilter))
cacheT += ("Near Cache Enabled", bool2Str(nearCfg.isNearEnabled))
cacheT += ("Near Start Size", nearCfg.getNearStartSize)
cacheT += ("Near Eviction Policy", safe(nearCfg.getNearEvictPolicy))
cacheT += ("Near Eviction Policy Max Size", safe(nearCfg.getNearEvictMaxSize))
cacheT += ("Default Lock Timeout", cfg.getDefaultLockTimeout)
cacheT += ("Metadata type count", cfg.getJdbcTypes.size())
cacheT += ("Cache Interceptor", safe(cfg.getInterceptor))
cacheT += ("Store Enabled", bool2Str(storeCfg.isEnabled))
cacheT += ("Store Class", safe(storeCfg.getStore))
cacheT += ("Store Factory Class", storeCfg.getStoreFactory)
cacheT += ("Store Keep Binary", storeCfg.isStoreKeepBinary)
cacheT += ("Store Read Through", bool2Str(storeCfg.isReadThrough))
cacheT += ("Store Write Through", bool2Str(storeCfg.isWriteThrough))
cacheT += ("Store Write Coalescing", bool2Str(storeCfg.getWriteBehindCoalescing))
cacheT += ("Write-Behind Enabled", bool2Str(storeCfg.isWriteBehindEnabled))
cacheT += ("Write-Behind Flush Size", storeCfg.getFlushSize)
cacheT += ("Write-Behind Frequency", storeCfg.getFlushFrequency)
cacheT += ("Write-Behind Flush Threads Count", storeCfg.getFlushThreadCount)
cacheT += ("Write-Behind Batch Size", storeCfg.getBatchSize)
cacheT += ("Concurrent Asynchronous Operations Number", cfg.getMaxConcurrentAsyncOperations)
cacheT += ("Loader Factory Class Name", safe(cfg.getLoaderFactory))
cacheT += ("Writer Factory Class Name", safe(cfg.getWriterFactory))
cacheT += ("Expiry Policy Factory Class Name", safe(cfg.getExpiryPolicyFactory))
cacheT +=("Query Execution Time Threshold", queryCfg.getLongQueryWarningTimeout)
cacheT +=("Query Escaped Names", bool2Str(queryCfg.isSqlEscapeAll))
cacheT +=("Query Schema Name", queryCfg.getSqlSchema)
cacheT +=("Query Indexed Types", queryCfg.getIndexedTypes)
cacheT +=("Maximum payload size for offheap indexes", cfg.getSqlIndexMaxInlineSize)
cacheT +=("Query Metrics History Size", cfg.getQueryDetailMetricsSize)
val sqlFxs = queryCfg.getSqlFunctionClasses
val hasSqlFxs = sqlFxs != null && sqlFxs.nonEmpty
if (!hasSqlFxs)
cacheT +=("Query SQL functions", NA)
val indexedTypes = queryCfg.getIndexedTypes
val hasIndexedTypes = indexedTypes != null && indexedTypes.nonEmpty
if (!hasIndexedTypes)
cacheT +=("Query Indexed Types", NA)
println(title)
cacheT.render()
if (hasSqlFxs) {
println("\\nQuery SQL functions:")
val sqlFxsT = VisorTextTable()
sqlFxsT #= "Function Class Name"
sqlFxs.foreach(s => sqlFxsT += s)
sqlFxsT.render()
}
if (hasIndexedTypes) {
println("\\nQuery Indexed Types:")
val indexedTypesT = VisorTextTable()
indexedTypesT #= ("Key Class Name", "Value Class Name")
indexedTypes.grouped(2).foreach(types => indexedTypesT += (types(0), types(1)))
indexedTypesT.render()
}
}
}
| wmz7year/ignite | modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala | Scala | apache-2.0 | 40,645 |
import org.scalatest.{FunSuite, Matchers}
/**
* Created by inieto on 27/04/15.
*/
class _03_Options extends FunSuite with Matchers {
test("Option[A] is a container for an optional value of type A." +
" If the value of type A is present, the Option[A] is an instance of Some[A], containing the present value of type A." +
" If the value is absent, the Option[A] is the object None.") {
val someValue: Option[String] = Some("I am wrapped in something")
someValue.get should be("I am wrapped in something")
val nullValue: Option[String] = None
nullValue should be(None)
}
def maybeItWillReturnSomething(flag: Boolean): Option[String] = {
if (flag) Some("Found value") else None
}
test("Having 'maybeItWillReturnSomething' represent null with None because null is a bad idea") {
val value1 = maybeItWillReturnSomething(true)
val value2 = maybeItWillReturnSomething(false)
value1.get should be("Found value")
intercept[java.util.NoSuchElementException] {
value2.get
}
}
test("Provide a default value for None") {
val value1 = maybeItWillReturnSomething(true)
val value2 = maybeItWillReturnSomething(false)
value1 getOrElse "No value" should be("Found value")
value2 getOrElse "No value" should be("No value")
value2 getOrElse {
"default function"
} should be("default function")
}
test("Checking whether option has value") {
val value1 = maybeItWillReturnSomething(true)
val value2 = maybeItWillReturnSomething(false)
value1.isEmpty should be(false)
value2.isEmpty should be(true)
}
test("Option can also be used with pattern matching") {
val someValue: Option[Double] = Some(20.0)
val value = someValue match {
case Some(v) => v
case None => 0.0
}
value should be(20.0)
val noValue: Option[Double] = None
val value1 = noValue match {
case Some(v) => v
case None => 0.0
}
value1 should be(0.0)
}
}
| inieto/scala-47deg | ScalaExercises/src/test/scala-2.11/_03_Options.scala | Scala | mit | 2,038 |
package ml.combust.bundle.test
import java.io.File
import java.net.URI
import java.nio.file.{Files, Paths}
import ml.combust.bundle.fs.BundleFileSystem
import scala.util.Try
class TestBundleFileSystem extends BundleFileSystem {
override def schemes: Seq[String] = Seq("test")
override def load(uri: URI): Try[File] = {
Try(new File(uri.getPath))
}
override def save(uri: URI, localFile: File): Unit = {
Files.copy(localFile.toPath, Paths.get(uri.getPath))
}
}
| combust-ml/mleap | bundle-ml/src/test/scala/ml/combust/bundle/test/TestBundleFileSystem.scala | Scala | apache-2.0 | 484 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import java.util.Date
import org.geotools.data.{Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithDataStore
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryHints.{BIN_BATCH_SIZE, BIN_LABEL, BIN_SORT, BIN_TRACK, SAMPLE_BY, SAMPLING}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.BIN_ATTRIBUTE_INDEX
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.opengis.feature.simple.SimpleFeature
import org.opengis.filter.Filter
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class S2IndexTest extends TestWithDataStore {
override val spec = "name:String,track:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled=s2:geom"
val features =
(0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track1", s"2010-05-07T0$i:00:00.000Z", s"POINT(40 6$i)")
} ++ (10 until 20).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track2", s"2010-05-${i}T$i:00:00.000Z", s"POINT(40 6${i - 10})")
} ++ (20 until 30).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track3", s"2010-05-${i}T${i-10}:00:00.000Z", s"POINT(40 8${i - 20})")
}
step {
addFeatures(features)
}
def execute(query: Query): Seq[SimpleFeature] =
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
def execute(ecql: String, transforms: Option[Array[String]] = None): Seq[SimpleFeature] = {
val query = transforms match {
case None => new Query(sft.getTypeName, ECQL.toFilter(ecql))
case Some(t) => new Query(sft.getTypeName, ECQL.toFilter(ecql), t)
}
execute(query)
}
"S2Index" should {
"return all features for inclusive filter" >> {
val filter = "bbox(geom, 35, 55, 45, 75)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = execute(filter)
features must haveSize(10)
features.map(_.getID.toInt) must containTheSameElementsAs(0 to 9)
}
"return some features for exclusive geom filter" >> {
val filter = "bbox(geom, 35, 55, 45, 65.001)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = execute(filter)
features must haveSize(6)
features.map(_.getID.toInt) must containTheSameElementsAs(0 to 5)
}
"return some features for exclusive date filter" >> {
val filter = "bbox(geom, 35, 55, 45, 75)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = execute(filter)
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
}
"work with whole world filter" >> {
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T05:00:00.000Z' and '2010-05-07T08:00:00.000Z'"
val features = execute(filter)
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(5 to 8)
}
"work with small bboxes" >> {
val filter = "bbox(geom, 39.999, 60.999, 40.001, 61.001)"
val features = execute(filter)
features must haveSize(2)
features.map(_.getID.toInt) must containTheSameElementsAs(Seq(1, 11))
}
"apply secondary filters" >> {
val filter = "bbox(geom, 35, 55, 45, 75)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'" +
" AND name = 'name8'"
val features = execute(filter)
features must haveSize(1)
features.map(_.getID.toInt) must containTheSameElementsAs(Seq(8))
}
"apply transforms" >> {
val filter = "bbox(geom, 35, 55, 45, 75)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = execute(filter, Some(Array("name")))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
forall(features)((f: SimpleFeature) => f.getAttributeCount mustEqual 1)
forall(features)((f: SimpleFeature) => f.getAttribute("name") must not(beNull))
}
"apply functional transforms" >> {
val filter = "bbox(geom, 35, 55, 45, 75)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = execute(filter, Some(Array("derived=strConcat('my', name)")))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
forall(features)((f: SimpleFeature) => f.getAttributeCount mustEqual 1)
forall(features)((f: SimpleFeature) => f.getAttribute("derived").asInstanceOf[String] must beMatching("myname\\d"))
}
"optimize for bin format" >> {
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
val returnedFeatures = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
// the same simple feature gets reused - so make sure you access in serial order
val aggregates = returnedFeatures.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]]).toList
aggregates.size must beLessThan(10) // ensure some aggregation was done
val bin = aggregates.flatMap(a => a.grouped(16).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
(0 until 10).map(i => s"name$i".hashCode) must contain(atLeast(bin.map(_.trackId).tail: _*))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
bin.map(_.lat) must containAllOf((0 until 10).map(_ + 60.0f))
forall(bin.map(_.lon))(_ mustEqual 40.0)
}
"optimize for bin format with sorting" >> {
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
query.getHints.put(BIN_SORT, true)
val returnedFeatures = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
// the same simple feature gets reused - so make sure you access in serial order
val aggregates = returnedFeatures.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]]).toSeq
aggregates.size must beLessThan(10) // ensure some aggregation was done
forall(aggregates) { a =>
val window = a.grouped(16).map(BinaryOutputEncoder.decode(_).dtg).sliding(2).filter(_.length > 1)
forall(window.toSeq)(w => w.head must beLessThanOrEqualTo(w(1)))
}
val bin = aggregates.flatMap(a => a.grouped(16).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
(0 until 10).map(i => s"name$i".hashCode) must contain(atLeast(bin.map(_.trackId).tail: _*))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
bin.map(_.lat) must containAllOf((0 until 10).map(_ + 60.0f))
forall(bin.map(_.lon))(_ mustEqual 40.0)
}
"optimize for bin format with label" >> {
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_LABEL, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
val returnedFeatures = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
// the same simple feature gets reused - so make sure you access in serial order
val aggregates = returnedFeatures.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]]).toSeq
aggregates.size must beLessThan(10) // ensure some aggregation was done
val bin = aggregates.flatMap(a => a.grouped(24).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
(0 until 10).map(i => s"name$i".hashCode) must contain(atLeast(bin.map(_.trackId).tail: _*))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
bin.map(_.lat) must containAllOf((0 until 10).map(_ + 60.0f))
forall(bin.map(_.lon))(_ mustEqual 40.0)
bin.map(_.label) must containAllOf((0 until 10).map(i => BinaryOutputEncoder.convertToLabel(s"name$i")))
}
"support sampling" in {
val query = new Query(sft.getTypeName, Filter.INCLUDE)
query.getHints.put(SAMPLING, new java.lang.Float(.5f))
val results = execute(query)
results must haveLength(15)
}
"support sampling with cql" in {
val query = new Query(sft.getTypeName, ECQL.toFilter("track = 'track1'"))
query.getHints.put(SAMPLING, new java.lang.Float(.5f))
val results = execute(query)
results must haveLength(5)
forall(results)(_.getAttribute("track") mustEqual "track1")
}
"support sampling with transformations" in {
val query = new Query(sft.getTypeName, Filter.INCLUDE, Array("name", "geom"))
query.getHints.put(SAMPLING, new java.lang.Float(.5f))
val results = execute(query)
results must haveLength(15)
forall(results)(_.getAttributeCount mustEqual 2)
}
"support sampling with cql and transformations" in {
val query = new Query(sft.getTypeName, ECQL.toFilter("track = 'track2'"), Array("name", "geom"))
query.getHints.put(SAMPLING, new java.lang.Float(.2f))
val results = execute(query)
results must haveLength(2)
forall(results)(_.getAttributeCount mustEqual 2)
}
"support sampling by thread" in {
val query = new Query(sft.getTypeName, Filter.INCLUDE)
query.getHints.put(SAMPLING, new java.lang.Float(.5f))
query.getHints.put(SAMPLE_BY, "track")
val results = execute(query)
results.length must beLessThan(17)
results.count(_.getAttribute("track") == "track1") must beLessThan(6)
results.count(_.getAttribute("track") == "track2") must beLessThan(6)
results.count(_.getAttribute("track") == "track3") must beLessThan(6)
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/S2IndexTest.scala | Scala | apache-2.0 | 11,176 |
package com.transgee.ebook.pdf
import java.io.{File, FileOutputStream}
import org.scalatest._
class FigureIteratorSpec extends FlatSpec with Matchers {
"FigureIterator" should "extract images" in {
extractFigures("pdf/OReilly.Mobile.and.Web.Messaging.pdf")
}
private def extractFigures(filePath: String) = {
val file = new File(filePath)
val filename = file.getName
val outPrefix = filename.substring(0, filename.lastIndexOf('.'))
withPdf(file) { doc =>
val stream = new FigureIterator(doc).toStream
stream.foreach { f =>
f.writeImageTo(new FileOutputStream(new File(s"target/$outPrefix-P${f.pageNumber}-F${f.id}.${f.suffix}")))
}
stream.length
}
}
it should "extract all drawings in a PDF" ignore {
extractFigures("pdf/RemoteNotificationsPG.pdf")
}
it should "extract all drawings in a PDF - Example 2" ignore {
extractFigures("pdf/Beautiful_Architecture.pdf")
}
}
| zenkiezhu/scala-ebook-clipper | src/test/scala/com/transgee/ebook/pdf/FigureIteratorSpec.scala | Scala | apache-2.0 | 953 |
package scala.slick.lifted
import scala.annotation.implicitNotFound
import scala.collection.generic.CanBuildFrom
import scala.slick.ast.{Join => AJoin, _}
import scala.slick.util.CollectionLinearizer
/**
* A query monad which contains the AST for a query's projection and the accumulated
* restrictions and other modifiers.
*/
abstract class Query[+E, U] extends Rep[Seq[U]] with CollectionLinearizer[Seq, U] { self =>
def unpackable: ShapedValue[_ <: E, U]
final lazy val packed = unpackable.packedNode
final lazy val elementLinearizer = unpackable.linearizer
final val canBuildFrom: CanBuildFrom[Nothing, U, Seq[U]] = implicitly
def flatMap[F, T](f: E => Query[F, T]): Query[F, T] = {
val generator = new AnonSymbol
val aliased = {
val uv = unpackable.value
WithOp.encodeRef(uv, generator)
}
val fv = f(aliased)
new WrappingQuery[F, T](new Bind(generator, Node(unpackable.value), Node(fv)), fv.unpackable)
}
def map[F, G, T](f: E => F)(implicit shape: Shape[F, T, G]): Query[G, T] =
flatMap(v => Query.pure[F, T, G](f(v)))
def >>[F, T](q: Query[F, T]): Query[F, T] = flatMap(_ => q)
def filter[T](f: E => T)(implicit wt: CanBeQueryCondition[T]): Query[E, U] = {
val generator = new AnonSymbol
val aliased = unpackable.encodeRef(generator)
val fv = f(aliased.value)
new WrappingQuery[E, U](Filter(generator, Node(this), Node(wt(fv))), unpackable)
}
def withFilter[T : CanBeQueryCondition](f: E => T) = filter(f)
def where[T <: Column[_] : CanBeQueryCondition](f: E => T) = filter(f)
def join[E2, U2](q2: Query[E2, U2], jt: JoinType = JoinType.Inner) = {
val leftGen, rightGen = new AnonSymbol
val aliased1 = unpackable.encodeRef(leftGen)
val aliased2 = q2.unpackable.encodeRef(rightGen)
new BaseJoinQuery[E, E2, U, U2](leftGen, rightGen, Node(unpackable.value), Node(q2.unpackable.value), jt, aliased1.zip(aliased2))
}
def innerJoin[E2, U2](q2: Query[E2, U2]) = join(q2, JoinType.Inner)
def leftJoin[E2, U2](q2: Query[E2, U2]) = join(q2, JoinType.Left)
def rightJoin[E2, U2](q2: Query[E2, U2]) = join(q2, JoinType.Right)
def outerJoin[E2, U2](q2: Query[E2, U2]) = join(q2, JoinType.Outer)
def zip[E2, U2](q2: Query[E2, U2]): Query[(E, E2), (U, U2)] = join(q2, JoinType.Zip)
def zipWith[E2, U2, F, G, T](q2: Query[E2, U2], f: (E, E2) => F)(implicit shape: Shape[F, T, G]): Query[G, T] =
join(q2, JoinType.Zip).map[F, G, T](x => f(x._1, x._2))
def zipWithIndex = zip(Query(RangeFrom(0L)))
def sortBy[T <% Ordered](f: E => T): Query[E, U] = {
val generator = new AnonSymbol
val aliased = unpackable.encodeRef(generator)
new WrappingQuery[E, U](SortBy(generator, Node(this), f(aliased.value).columns), unpackable)
}
def groupBy[K, T, G, P](f: E => K)(implicit kshape: Shape[K, T, G], vshape: Shape[E, _, P]): Query[(G, Query[P, U]), (T, Query[P, U])] = {
val sym = new AnonSymbol
val key = ShapedValue(f(unpackable.encodeRef(sym).value), kshape).packedValue
val value = ShapedValue(pack, Shape.selfLinearizingShape.asInstanceOf[Shape[Query[P, U], Query[P, U], Query[P, U]]])
val group = GroupBy(sym, new AnonSymbol, Node(unpackable.value), Node(key.value))
new WrappingQuery(group, key.zip(value))
}
def encodeRef(sym: Symbol, positions: List[Int] = Nil): Query[E, U] = new Query[E, U] {
val unpackable = self.unpackable.encodeRef(sym, positions)
lazy val nodeDelegate =
positions.foldRight[Node](Ref(sym))((idx, node) => Select(node, ElementSymbol(idx)))
}
def union[O >: E, R](other: Query[O, U]) =
new WrappingQuery[O, U](Union(Node(unpackable.value), Node(other.unpackable.value), false), unpackable)
def unionAll[O >: E, R](other: Query[O, U]) =
new WrappingQuery[O, U](Union(Node(unpackable.value), Node(other.unpackable.value), true), unpackable)
def length: Column[Int] = Library.CountAll.column(Node(unpackable.value))
@deprecated("Use .length instead of .count", "0.10.0-M2")
def count = length
def countDistinct: Column[Int] = Library.CountDistinct.column(Node(unpackable.value))
def exists = Library.Exists.column[Boolean](Node(unpackable.value))
@deprecated("Query.sub is not needed anymore", "0.10.0-M2")
def sub = this
def pack[R](implicit packing: Shape[E, _, R]): Query[R, U] =
new Query[R, U] {
val unpackable: ShapedValue[_ <: R, U] = self.unpackable.packedValue(packing)
def nodeDelegate = self.nodeDelegate
}
def take(num: Int): Query[E, U] = new WrappingQuery[E, U](Take(Node(this), num), unpackable)
def drop(num: Int): Query[E, U] = new WrappingQuery[E, U](Drop(Node(this), num), unpackable)
}
object Query extends Query[Column[Unit], Unit] {
def nodeDelegate = packed
def unpackable = ShapedValue(ConstColumn(()).mapOp((n, _) => Pure(n)), Shape.unpackColumnBase[Unit, Column[Unit]])
@deprecated("Use .sortBy on a query instead of mixing in Query.orderBy", "0.10.0-M2")
def orderBy[T <% Ordered](by: T) =
new WrappingQuery[Column[Unit], Unit](OrderBy(new AnonSymbol, Node(this), by.columns), unpackable)
def apply[E, U, R](value: E)(implicit unpack: Shape[E, U, R]): Query[R, U] = {
val unpackable = ShapedValue(value, unpack).packedValue
if(unpackable.packedNode.isInstanceOf[AbstractTable[_]])
new NonWrappingQuery[R, U](unpackable.packedNode, unpackable)
else new WrappingQuery[R, U](Pure(unpackable.packedNode), unpackable)
}
def pure[E, U, R](value: E)(implicit unpack: Shape[E, U, R]): Query[R, U] = {
val unpackable = ShapedValue(value, unpack).packedValue
new WrappingQuery[R, U](Pure(unpackable.packedNode), unpackable)
}
}
@implicitNotFound("Type ${T} cannot be a query condition (only Boolean, Column[Boolean] and Column[Option[Boolean]] are allowed")
trait CanBeQueryCondition[-T] extends (T => Column[_])
object CanBeQueryCondition {
implicit object BooleanColumnCanBeQueryCondition extends CanBeQueryCondition[Column[Boolean]] {
def apply(value: Column[Boolean]) = value
}
implicit object BooleanOptionColumnCanBeQueryCondition extends CanBeQueryCondition[Column[Option[Boolean]]] {
def apply(value: Column[Option[Boolean]]) = value
}
implicit object BooleanCanBeQueryCondition extends CanBeQueryCondition[Boolean] {
def apply(value: Boolean) = new ConstColumn(value)
}
}
class WrappingQuery[+E, U](val nodeDelegate: Node, val base: ShapedValue[_ <: E, U]) extends Query[E, U] {
lazy val unpackable = base.encodeRef(nodeDelegate.nodeIntrinsicSymbol)
}
class NonWrappingQuery[+E, U](val nodeDelegate: Node, val unpackable: ShapedValue[_ <: E, U]) extends Query[E, U]
final class BaseJoinQuery[+E1, +E2, U1, U2](leftGen: Symbol, rightGen: Symbol, left: Node, right: Node, jt: JoinType, base: ShapedValue[_ <: (E1, E2), (U1, U2)])
extends WrappingQuery[(E1, E2), (U1, U2)](AJoin(leftGen, rightGen, left, right, jt, ConstColumn.TRUE), base) {
def on[T <: Column[_]](pred: (E1, E2) => T)(implicit wt: CanBeQueryCondition[T]) =
new WrappingQuery[(E1, E2), (U1, U2)](AJoin(leftGen, rightGen, left, right, jt, Node(wt(pred(base.value._1, base.value._2)))), base)
}
| zefonseca/slick-1.0.0-scala.2.11.1 | src/main/scala/scala/slick/lifted/Query.scala | Scala | bsd-2-clause | 7,130 |
package scife.enumeration
package member
package testcase
import scife.{ enumeration => e }
import dependent._
import scife.util.logging._
import scife.util._
import org.scalatest._
import org.scalatest.prop._
import org.scalacheck.Gen
class BinarySearchTreeTest extends FunSuite with Matchers with GeneratorDrivenPropertyChecks with HasLogger with ProfileLogger {
import Checks._
import structures._
import BSTrees._
test("figure out what is going on") {
val depend = constructEnumerator
for (size <- 1 to 7) {
{
val enum = depend(size, 1 to size)
for (ind <- 0 until enum.size) enum(ind)
}
// {
// val enum = depend(size, 1 to size)
//
// for (ind <- 0 to 0) {
// val missing = (1 to size).toList.find(!enum(ind).contains(_))
//
// val newTree = enum(ind) insert missing.get
//
// var invariant = false
// for (bH <- blackHeight to (blackHeight + 1); if !invariant) {
// val enumBigger = dependEnumMember(size, 1 to size, Set(true, false), bH)
//
// invariant = (enumBigger.member(newTree))
// }
// assert(invariant)
// }
// }
}
}
test("regular enumeration") {
common.BinarySearchTreeTestHelper.testCorrectness( constructEnumerator )
}
test("member enumeration") {
val trees = constructEnumerator
{
val en = trees.getEnum(1, 1 to 2): Member[Tree]
for ((revEl, ind) <- List(Node(1), Node(2)).zipWithIndex) {
en.member(revEl) should be(true)
}
}
val normalTrees = constructEnumeratorNormal
forAll(Gen.choose(1, 5), Gen.choose(1, 5), minSuccessful(20)) {
(size: Int, m: Int) =>
{
val normalList = normalTrees.getEnum(size, 1 to m)
for (ind <- 0 until normalList.size) {
trees(size, 1 to m).member(normalList(ind)) should be(true)
}
}
}
}
private def constructEnumerator = {
val rootProducer = new WrapFunctionFin(
(range: Range) => { new WrapRange(range) })
val sizeProducer = new WrapFunctionFin(
(size: Int) => { new WrapRange(0 until size) })
new WrapFunctionFin(
(self: MemberDependFinite[(Int, Range), Tree], pair: (Int, Range)) => {
val (size, range) = pair
if (size <= 0) new WrapArray(Array(Leaf)): MemberFinite[Tree]
else if (size == 1) new WrapArray(Array(range map { v => Node(Leaf, v, Leaf) }: _*)): MemberFinite[Tree]
else {
val roots = rootProducer.getEnum(range)
val leftSizes = sizeProducer.getEnum(size)
val rootLeftSizePairs = new member.ProductFinite(leftSizes, roots)
val leftTrees = new InMapFin(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
(leftSize, range.start to (median - 1))
})
val rightTrees =
new InMapFin(self, { (par: (Int, Int)) =>
val (leftSize, median) = par
(size - leftSize - 1, (median + 1) to range.end)
})
val leftRightPairs =
new ProductFinite(leftTrees, rightTrees)
val allNodes =
new ChainFinite(rootLeftSizePairs, leftRightPairs): MemberFinite[((Int, Int), (Tree, Tree))]
val makeTree =
(p: ((Int, Int), (Tree, Tree))) => {
val ((leftSize, currRoot), (leftTree, rightTree)) = p
Node(leftTree, currRoot, rightTree)
}
val memberTree =
(t: Tree) => {
val n = t.asInstanceOf[Node]
val leftSize = BSTrees.size(n.l)
val currRoot = n.v
val leftTree = n.l
val rightTree = n.r
((leftSize, currRoot), (leftTree, rightTree))
}
new Map[((Int, Int), (Tree, Tree)), Tree](allNodes, makeTree, memberTree) with MemberFinite[Tree]: MemberFinite[Tree]
}
})
}
private def constructEnumeratorNormal = {
import e.dependent._
val rootProducer = new WrapFunctionFin(
(range: Range) => { new WrapRange(range) })
val sizeProducer = new WrapFunctionFin(
(size: Int) => {
Enum(0 until size)
})
Depend.fin(
(self: e.dependent.DependFinite[(Int, Range), Tree], pair: (Int, Range)) => {
val (size, range) = pair
if (size <= 0) Enum(Leaf): Finite[Tree]
else if (size == 1) Enum(range.toArray map { v => Node(Leaf, v, Leaf) }): Finite[Tree]
else {
val roots = rootProducer.getEnum(range)
val leftSizes = sizeProducer.getEnum(size)
val rootLeftSizePairs = e.Product(leftSizes, roots)
val leftTrees = InMap(self,
{ (par: (Int, Int)) =>
val (leftSize, median) = par
(leftSize, range.start to (median - 1))
})
val rightTrees =
InMap(self,
{ (par: (Int, Int)) =>
val (leftSize, median) = par
(size - leftSize - 1, (median + 1) to range.end)
})
val leftRightPairs =
Product(leftTrees, rightTrees)
val allNodes =
new ChainFinite(rootLeftSizePairs, leftRightPairs)
val makeTree =
(p: ((Int, Int), (Tree, Tree))) => {
val ((leftSize, currRoot), (leftTree, rightTree)) = p
Node(leftTree, currRoot, rightTree)
}
allNodes map makeTree: Finite[Tree]
}
})
}
}
| kaptoxic/SciFe | src/test/scala/scife/enumeration/member/testcase/BinarySearchTreeTest.scala | Scala | gpl-2.0 | 5,573 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.loadBalancer
import java.nio.charset.StandardCharsets
import scala.annotation.tailrec
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success
import org.apache.kafka.clients.producer.RecordMetadata
import akka.actor.ActorRefFactory
import akka.actor.ActorSystem
import akka.actor.Props
import akka.cluster.Cluster
import akka.util.Timeout
import akka.pattern.ask
import whisk.common.Logging
import whisk.common.LoggingMarkers
import whisk.common.TransactionId
import whisk.core.WhiskConfig
import whisk.core.WhiskConfig._
import whisk.core.connector.{ActivationMessage, CompletionMessage}
import whisk.core.connector.MessageFeed
import whisk.core.connector.MessageProducer
import whisk.core.connector.MessagingProvider
import whisk.core.database.NoDocumentException
import whisk.core.entity.{ActivationId, WhiskActivation}
import whisk.core.entity.EntityName
import whisk.core.entity.ExecutableWhiskActionMetaData
import whisk.core.entity.Identity
import whisk.core.entity.InstanceId
import whisk.core.entity.UUID
import whisk.core.entity.WhiskAction
import whisk.core.entity.size._
import whisk.core.entity.types.EntityStore
import whisk.spi.SpiLoader
trait LoadBalancer {
val activeAckTimeoutGrace = 1.minute
/** Gets the number of in-flight activations for a specific user. */
def activeActivationsFor(namespace: UUID): Future[Int]
/** Gets the number of in-flight activations in the system. */
def totalActiveActivations: Future[Int]
/**
* Publishes activation message on internal bus for an invoker to pick up.
*
* @param action the action to invoke
* @param msg the activation message to publish on an invoker topic
* @param transid the transaction id for the request
* @return result a nested Future the outer indicating completion of publishing and
* the inner the completion of the action (i.e., the result)
* if it is ready before timeout (Right) otherwise the activation id (Left).
* The future is guaranteed to complete within the declared action time limit
* plus a grace period (see activeAckTimeoutGrace).
*/
def publish(action: ExecutableWhiskActionMetaData, msg: ActivationMessage)(
implicit transid: TransactionId): Future[Future[Either[ActivationId, WhiskActivation]]]
}
class LoadBalancerService(config: WhiskConfig, instance: InstanceId, entityStore: EntityStore)(
implicit val actorSystem: ActorSystem,
logging: Logging)
extends LoadBalancer {
/** The execution context for futures */
implicit val executionContext: ExecutionContext = actorSystem.dispatcher
/** How many invokers are dedicated to blackbox images. We range bound to something sensical regardless of configuration. */
private val blackboxFraction: Double = Math.max(0.0, Math.min(1.0, config.controllerBlackboxFraction))
logging.info(this, s"blackboxFraction = $blackboxFraction")(TransactionId.loadbalancer)
/** Feature switch for shared load balancer data **/
private val loadBalancerData = {
if (config.controllerLocalBookkeeping) {
new LocalLoadBalancerData()
} else {
/** Specify how seed nodes are generated */
val seedNodesProvider = new StaticSeedNodesProvider(config.controllerSeedNodes, actorSystem.name)
Cluster(actorSystem).joinSeedNodes(seedNodesProvider.getSeedNodes())
new DistributedLoadBalancerData()
}
}
override def activeActivationsFor(namespace: UUID) = loadBalancerData.activationCountOn(namespace)
override def totalActiveActivations = loadBalancerData.totalActivationCount
override def publish(action: ExecutableWhiskActionMetaData, msg: ActivationMessage)(
implicit transid: TransactionId): Future[Future[Either[ActivationId, WhiskActivation]]] = {
chooseInvoker(msg.user, action).flatMap { invokerName =>
val entry = setupActivation(action, msg.activationId, msg.user.uuid, invokerName, transid)
sendActivationToInvoker(messageProducer, msg, invokerName).map { _ =>
entry.promise.future
}
}
}
/** An indexed sequence of all invokers in the current system */
def allInvokers: Future[IndexedSeq[(InstanceId, InvokerState)]] =
invokerPool
.ask(GetStatus)(Timeout(5.seconds))
.mapTo[IndexedSeq[(InstanceId, InvokerState)]]
/**
* Tries to fill in the result slot (i.e., complete the promise) when a completion message arrives.
* The promise is removed form the map when the result arrives or upon timeout.
*
* @param msg is the kafka message payload as Json
*/
private def processCompletion(response: Either[ActivationId, WhiskActivation],
tid: TransactionId,
forced: Boolean,
invoker: InstanceId): Unit = {
val aid = response.fold(l => l, r => r.activationId)
// treat left as success (as it is the result of a message exceeding the bus limit)
val isSuccess = response.fold(l => true, r => !r.response.isWhiskError)
loadBalancerData.removeActivation(aid) match {
case Some(entry) =>
logging.info(this, s"${if (!forced) "received" else "forced"} active ack for '$aid'")(tid)
// Active acks that are received here are strictly from user actions - health actions are not part of
// the load balancer's activation map. Inform the invoker pool supervisor of the user action completion.
// If the active ack was forced, because the waiting period expired, treat it as a failed activation.
// A cluster of such failures will eventually turn the invoker unhealthy and suspend queuing activations
// to that invoker topic.
invokerPool ! InvocationFinishedMessage(invoker, isSuccess && !forced)
if (!forced) {
entry.promise.trySuccess(response)
} else {
entry.promise.tryFailure(new Throwable("no active ack received"))
}
case None if !forced =>
// the entry has already been removed but we receive an active ack for this activation Id.
// This happens for health actions, because they don't have an entry in Loadbalancerdata or
// for activations that already timed out.
// For both cases, it looks like the invoker works again and we should send the status of
// the activation to the invokerPool.
invokerPool ! InvocationFinishedMessage(invoker, isSuccess)
logging.debug(this, s"received active ack for '$aid' which has no entry")(tid)
case None =>
// the entry has already been removed by an active ack. This part of the code is reached by the timeout.
// As the active ack is already processed we don't have to do anything here.
logging.debug(this, s"forced active ack for '$aid' which has no entry")(tid)
}
}
/**
* Creates an activation entry and insert into various maps.
*/
private def setupActivation(action: ExecutableWhiskActionMetaData,
activationId: ActivationId,
namespaceId: UUID,
invokerName: InstanceId,
transid: TransactionId): ActivationEntry = {
val timeout = action.limits.timeout.duration + activeAckTimeoutGrace
// Install a timeout handler for the catastrophic case where an active ack is not received at all
// (because say an invoker is down completely, or the connection to the message bus is disrupted) or when
// the active ack is significantly delayed (possibly dues to long queues but the subject should not be penalized);
// in this case, if the activation handler is still registered, remove it and update the books.
loadBalancerData.putActivation(activationId, {
actorSystem.scheduler.scheduleOnce(timeout) {
processCompletion(Left(activationId), transid, forced = true, invoker = invokerName)
}
ActivationEntry(activationId, namespaceId, invokerName, Promise[Either[ActivationId, WhiskActivation]]())
})
}
/**
* Creates or updates a health test action by updating the entity store.
* This method is intended for use on startup.
* @return Future that completes successfully iff the action is added to the database
*/
private def createTestActionForInvokerHealth(db: EntityStore, action: WhiskAction): Future[Unit] = {
implicit val tid = TransactionId.loadbalancer
WhiskAction
.get(db, action.docid)
.flatMap { oldAction =>
WhiskAction.put(db, action.revision(oldAction.rev))(tid, notifier = None)
}
.recover {
case _: NoDocumentException => WhiskAction.put(db, action)(tid, notifier = None)
}
.map(_ => {})
.andThen {
case Success(_) => logging.info(this, "test action for invoker health now exists")
case Failure(e) => logging.error(this, s"error creating test action for invoker health: $e")
}
}
/** Gets a producer which can publish messages to the kafka bus. */
private val messagingProvider = SpiLoader.get[MessagingProvider]
private val messageProducer = messagingProvider.getProducer(config, executionContext)
private def sendActivationToInvoker(producer: MessageProducer,
msg: ActivationMessage,
invoker: InstanceId): Future[RecordMetadata] = {
implicit val transid = msg.transid
val topic = s"invoker${invoker.toInt}"
val start = transid.started(
this,
LoggingMarkers.CONTROLLER_KAFKA,
s"posting topic '$topic' with activation id '${msg.activationId}'")
producer.send(topic, msg).andThen {
case Success(status) =>
transid.finished(this, start, s"posted to ${status.topic()}[${status.partition()}][${status.offset()}]")
case Failure(e) => transid.failed(this, start, s"error on posting to topic $topic")
}
}
private val invokerPool = {
// Do not create the invokerPool if it is not possible to create the health test action to recover the invokers.
InvokerPool
.healthAction(instance)
.map {
// Await the creation of the test action; on failure, this will abort the constructor which should
// in turn abort the startup of the controller.
a =>
Await.result(createTestActionForInvokerHealth(entityStore, a), 1.minute)
}
.orElse {
throw new IllegalStateException(
"cannot create test action for invoker health because runtime manifest is not valid")
}
val maxPingsPerPoll = 128
val pingConsumer =
messagingProvider.getConsumer(config, s"health${instance.toInt}", "health", maxPeek = maxPingsPerPoll)
val invokerFactory = (f: ActorRefFactory, invokerInstance: InstanceId) =>
f.actorOf(InvokerActor.props(invokerInstance, instance))
actorSystem.actorOf(
InvokerPool.props(invokerFactory, (m, i) => sendActivationToInvoker(messageProducer, m, i), pingConsumer))
}
/**
* Subscribes to active acks (completion messages from the invokers), and
* registers a handler for received active acks from invokers.
*/
val maxActiveAcksPerPoll = 128
val activeAckPollDuration = 1.second
private val activeAckConsumer =
messagingProvider.getConsumer(config, "completions", s"completed${instance.toInt}", maxPeek = maxActiveAcksPerPoll)
val activationFeed = actorSystem.actorOf(Props {
new MessageFeed(
"activeack",
logging,
activeAckConsumer,
maxActiveAcksPerPoll,
activeAckPollDuration,
processActiveAck)
})
def processActiveAck(bytes: Array[Byte]): Future[Unit] = Future {
val raw = new String(bytes, StandardCharsets.UTF_8)
CompletionMessage.parse(raw) match {
case Success(m: CompletionMessage) =>
processCompletion(m.response, m.transid, forced = false, invoker = m.invoker)
activationFeed ! MessageFeed.Processed
case Failure(t) =>
activationFeed ! MessageFeed.Processed
logging.error(this, s"failed processing message: $raw with $t")
}
}
/** Compute the number of blackbox-dedicated invokers by applying a rounded down fraction of all invokers (but at least 1). */
private def numBlackbox(totalInvokers: Int) = Math.max(1, (totalInvokers.toDouble * blackboxFraction).toInt)
/** Return invokers (almost) dedicated to running blackbox actions. */
private def blackboxInvokers(
invokers: IndexedSeq[(InstanceId, InvokerState)]): IndexedSeq[(InstanceId, InvokerState)] = {
val blackboxes = numBlackbox(invokers.size)
invokers.takeRight(blackboxes)
}
/**
* Return (at least one) invokers for running non black-box actions.
* This set can overlap with the blackbox set if there is only one invoker.
*/
private def managedInvokers(
invokers: IndexedSeq[(InstanceId, InvokerState)]): IndexedSeq[(InstanceId, InvokerState)] = {
val managed = Math.max(1, invokers.length - numBlackbox(invokers.length))
invokers.take(managed)
}
/** Determine which invoker this activation should go to. Due to dynamic conditions, it may return no invoker. */
private def chooseInvoker(user: Identity, action: ExecutableWhiskActionMetaData): Future[InstanceId] = {
val hash = generateHash(user.namespace, action)
loadBalancerData.activationCountPerInvoker.flatMap { currentActivations =>
allInvokers.flatMap { invokers =>
val invokersToUse = if (action.exec.pull) blackboxInvokers(invokers) else managedInvokers(invokers)
val invokersWithUsage = invokersToUse.view.map {
// Using a view defers the comparably expensive lookup to actual access of the element
case (instance, state) => (instance, state, currentActivations.getOrElse(instance.toString, 0))
}
LoadBalancerService.schedule(invokersWithUsage, config.loadbalancerInvokerBusyThreshold, hash) match {
case Some(invoker) => Future.successful(invoker)
case None =>
logging.error(this, s"all invokers down")(TransactionId.invokerHealth)
Future.failed(new LoadBalancerException("no invokers available"))
}
}
}
}
/** Generates a hash based on the string representation of namespace and action */
private def generateHash(namespace: EntityName, action: ExecutableWhiskActionMetaData): Int = {
(namespace.asString.hashCode() ^ action.fullyQualifiedName(false).asString.hashCode()).abs
}
}
object LoadBalancerService {
def requiredProperties =
kafkaHost ++
Map(
kafkaTopicsCompletedRetentionBytes -> 1024.MB.toBytes.toString,
kafkaTopicsCompletedRetentionMS -> 1.hour.toMillis.toString,
kafkaTopicsCompletedSegmentBytes -> 512.MB.toBytes.toString) ++
Map(
loadbalancerInvokerBusyThreshold -> null,
controllerBlackboxFraction -> null,
controllerLocalBookkeeping -> null,
controllerSeedNodes -> null)
/** Memoizes the result of `f` for later use. */
def memoize[I, O](f: I => O): I => O = new scala.collection.mutable.HashMap[I, O]() {
override def apply(key: I) = getOrElseUpdate(key, f(key))
}
/** Euclidean algorithm to determine the greatest-common-divisor */
@tailrec
def gcd(a: Int, b: Int): Int = if (b == 0) a else gcd(b, a % b)
/** Returns pairwise coprime numbers until x. Result is memoized. */
val pairwiseCoprimeNumbersUntil: Int => IndexedSeq[Int] = LoadBalancerService.memoize {
case x =>
(1 to x).foldLeft(IndexedSeq.empty[Int])((primes, cur) => {
if (gcd(cur, x) == 1 && primes.forall(i => gcd(i, cur) == 1)) {
primes :+ cur
} else primes
})
}
/**
* Scans through all invokers and searches for an invoker, that has a queue length
* below the defined threshold. The threshold is subject to a 3 times back off. Iff
* no "underloaded" invoker was found it will default to the first invoker in the
* step-defined progression that is healthy.
*
* @param invokers a list of available invokers to search in, including their state and usage
* @param invokerBusyThreshold defines when an invoker is considered overloaded
* @param hash stable identifier of the entity to be scheduled
* @return an invoker to schedule to or None of no invoker is available
*/
def schedule(invokers: Seq[(InstanceId, InvokerState, Int)],
invokerBusyThreshold: Int,
hash: Int): Option[InstanceId] = {
val numInvokers = invokers.size
if (numInvokers > 0) {
val homeInvoker = hash % numInvokers
val stepSizes = LoadBalancerService.pairwiseCoprimeNumbersUntil(numInvokers)
val step = stepSizes(hash % stepSizes.size)
val invokerProgression = Stream
.from(0)
.take(numInvokers)
.map(i => (homeInvoker + i * step) % numInvokers)
.map(invokers)
.filter(_._2 == Healthy)
invokerProgression
.find(_._3 < invokerBusyThreshold)
.orElse(invokerProgression.find(_._3 < invokerBusyThreshold * 2))
.orElse(invokerProgression.find(_._3 < invokerBusyThreshold * 3))
.orElse(invokerProgression.headOption)
.map(_._1)
} else None
}
}
private case class LoadBalancerException(msg: String) extends Throwable(msg)
| duynguyen/incubator-openwhisk | core/controller/src/main/scala/whisk/core/loadBalancer/LoadBalancerService.scala | Scala | apache-2.0 | 18,224 |
// NOTE: copy/pasted from https://github.com/lampepfl/dotty/tree/1962ada58fcd2333a2e40179ab0ac6efb6167ed2
package scala.meta
package internal.hosts.scalac
package tasty
import collection.mutable
import TastyBuffer._
import scala.io.Codec
import TastyName._
import TastyFormat._
class NameBuffer extends TastyBuffer(10000) {
private val nameRefs = new mutable.LinkedHashMap[TastyName, NameRef]
def nameIndex(name: TastyName): NameRef = nameRefs.get(name) match {
case Some(ref) =>
ref
case None =>
val ref = NameRef(nameRefs.size)
nameRefs(name) = ref
ref
}
def nameIndex(str: String): NameRef = nameIndex(Simple(str))
private def withLength(op: => Unit): Unit = {
val lengthAddr = currentAddr
writeByte(0)
op
val length = currentAddr.index - lengthAddr.index - 1
assert(length < 128)
putNat(lengthAddr, length, 1)
}
def writeNameRef(ref: NameRef) = writeNat(ref.index)
def pickleName(name: TastyName): Unit = name match {
case Simple(name) =>
val bytes =
if (name.length == 0) new Array[Byte](0)
else Codec.toUTF8(name)
writeByte(UTF8)
writeNat(bytes.length)
writeBytes(bytes, bytes.length)
case Qualified(qualified, selector) =>
writeByte(QUALIFIED)
withLength { writeNameRef(qualified); writeNameRef(selector) }
case Signed(original, params, result) =>
writeByte(SIGNED)
withLength { writeNameRef(original); writeNameRef(result); params.foreach(writeNameRef) }
case Expanded(prefix, original) =>
writeByte(EXPANDED)
withLength { writeNameRef(prefix); writeNameRef(original) }
case ModuleClass(module) =>
writeByte(OBJECTCLASS)
withLength { writeNameRef(module) }
case SuperAccessor(accessed) =>
writeByte(SUPERACCESSOR)
withLength { writeNameRef(accessed) }
case DefaultGetter(method, paramNumber) =>
writeByte(DEFAULTGETTER)
withLength { writeNameRef(method); writeNat(paramNumber) }
case Shadowed(original) =>
writeByte(SHADOWED)
withLength { writeNameRef(original) }
}
override def assemble(): Unit = {
var i = 0
for ((name, ref) <- nameRefs) {
assert(ref.index == i)
i += 1
pickleName(name)
}
}
}
| mdemarne/scalahost | interface/src/main/scala/scala/meta/internal/hosts/scalac/tasty/NameBuffer.scala | Scala | bsd-3-clause | 2,277 |
package com.lvxingpai.model.misc
import javax.validation.constraints.Min
import com.lvxingpai.model.mixin.ObjectIdEnabled
import org.hibernate.validator.constraints.NotBlank
import java.util.Date
import scala.beans.BeanProperty
/**
* Created by pengyt on 2015/10/21.
*/
class Feedback extends ObjectIdEnabled {
/**
* 用户id
*/
@NotBlank
@Min(value = 1)
@BeanProperty
var userId: Long = 0
/**
* 反馈内容
*/
@BeanProperty
var body: String = null
/**
* 反馈时间
*/
@BeanProperty
var time: Date = null
/**
* 从哪个App反馈过来的, 例如:旅行派
*/
@BeanProperty
var origin: String = null
}
| Lvxingpai/core-model | src/main/scala/com/lvxingpai/model/misc/Feedback.scala | Scala | apache-2.0 | 668 |
package alternatives.breeze
import breeze.util.BloomFilter
import org.openjdk.jmh.annotations.{Benchmark, Param, Scope, State}
import scala.util.Random
@State(Scope.Benchmark)
class ArrayByteItemBenchmark {
private val itemsExpected = 1000000L
private val falsePositiveRate = 0.01
private val random = new Random()
private val bf = BloomFilter.optimallySized[Array[Byte]](itemsExpected.toDouble, falsePositiveRate)
@Param(Array("1024"))
var length: Int = _
private val item = new Array[Byte](length)
random.nextBytes(item)
bf.+=(item)
@Benchmark
def breezePut(): Unit = {
bf.+=(item)
}
@Benchmark
def breezeGet(): Unit = {
bf.contains(item)
}
}
| alexandrnikitin/bloom-filter-scala | benchmarks/src/main/scala/alternatives/breeze/ArrayByteItemBenchmark.scala | Scala | mit | 693 |
package edu.gemini.phase2.template.factory.impl.flamingos2
import edu.gemini.pot.sp._
import edu.gemini.phase2.template.factory.impl._
import edu.gemini.spModel.gemini.flamingos2.Flamingos2
import edu.gemini.spModel.gemini.flamingos2.blueprint.SpFlamingos2BlueprintImaging
import scala.collection.JavaConverters._
case class Flamingos2Imaging(blueprint:SpFlamingos2BlueprintImaging) extends Flamingos2Base[SpFlamingos2BlueprintImaging] {
// **** IF INSTRUMENT MODE == IMAGING ****
//
// INCLUDE {1,2,3}
// FOR {1, 2, 3}:
// Put FILTERS from PI into F2 ITERATOR
// SET EXPOSURE TIME in Iterator/Static component:
// Y = 60s
// F1056 = 60s
// F1063 = 60s
// J-lo = 60s
// J = 60s
// H = 10s
// Ks = 30s
//
val targetGroup = Seq(1, 2, 3)
val baselineFolder = Seq.empty
val notes = Seq("F2 Imaging Notes")
val science = Seq(1, 2)
val cal = Seq(3)
def exposureTimes(filter: Flamingos2.Filter): Double = {
import Flamingos2.Filter._
filter match {
case H => 10.0
case K_LONG | K_SHORT => 30.0
case _ => 60.0
}
}
def initialize(grp:ISPGroup, db:TemplateDb): Maybe[Unit] = for {
_ <- forObservations(grp, science, forScience).right
_ <- forObservations(grp, cal, forCalibrations(db)).right
} yield ()
def forScience(obs: ISPObservation): Maybe[Unit] = for {
_ <- obs.setFilters(blueprint.filters.asScala).right
_ <- obs.setExposureTimes(blueprint.filters.asScala.map(exposureTimes)).right
} yield ()
def forCalibrations(db: TemplateDb)(obs: ISPObservation): Maybe[Unit] =
setFlatFilters(obs, blueprint.filters.asScala)
// Update the static component and the first iterator to set the filters to use.
private def setFlatFilters(obs: ISPObservation, lst: Iterable[Flamingos2.Filter]): Maybe[Unit] = for {
_ <- lst.headOption.toRight("One or more filters must be specified.").right
_ <- obs.setFilter(lst.head).right
_ <- obs.ed.iterateFirst(Flamingos2.FILTER_PROP.getName, lst.toList).right
} yield ()
}
| spakzad/ocs | bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/template/factory/impl/flamingos2/Flamingos2Imaging.scala | Scala | bsd-3-clause | 2,181 |
package org.example1.usage
import org.example1.declaration.{Y => Y_Renamed, _}
trait Usage_NoOther_Imports_Wildcard_WithRename_OtherClass {
val x: X = ???
val y: Y_Renamed = ???
val z: Z = ???
} | JetBrains/intellij-scala | scala/scala-impl/testdata/move/allInOne/before/org/example1/usage/Usage_NoOther_Imports_Wildcard_WithRename_OtherClass.scala | Scala | apache-2.0 | 202 |
package rolodato.genetics.impl
import rolodato.genetics.{Crossover, Gene}
import scala.language.postfixOps
import scala.math.abs
import scala.util.Random
trait OnePointCrossover extends Crossover {
/**
* Given two parents, creates two children by applying a one-point
* crossover algorithm.
*
* Unless overridden, the crossover point is chosen randomly.
*
* @return a list containing the resulting two children
* @see http://en.wikipedia.org/wiki/Crossover_%28genetic_algorithm%29#One
* -point_crossover
*/
def cross(parent1: Gene, parent2: Gene): List[Gene] = {
require(parent1.length == parent2.length, "gene lengths must be equal")
val xop: Int = abs(crossoverPoint % parent1.length)
val child1 = ((parent1 string) take xop) ++ ((parent2 string) drop xop)
val child2 = ((parent2 string) take xop) ++ ((parent1 string) drop xop)
List(parent1.copy(child1), parent2.copy(child2))
}
def crossoverPoint: Int = Random.nextInt()
}
object OnePointCrossover {
/**
* Creates a crossover algorithm that always uses the same crossover point.
* @param xop The crossover point to use. Should be between 0 and the gene
* string length, exclusive. Useful for testing.
*/
def apply(xop: Int) = new OnePointCrossover {
override def crossoverPoint: Int = xop
}
def apply() = new OnePointCrossover {}
}
| rolodato/genetics | shared/src/main/scala/rolodato/genetics/impl/OnePointCrossover.scala | Scala | mit | 1,387 |
// Starter Code for Exercise 1
// From "Constructors" atom
import com.atomicscala.AtomicTest._
val doubleHalfCaf = new Coffee(shots=2, decaf=1)
val tripleHalfCaf = new Coffee(shots=3, decaf=2)
doubleHalfCaf.decaf is 1
doubleHalfCaf.caf is 1
doubleHalfCaf.shots is 2
tripleHalfCaf.decaf is 2
tripleHalfCaf.caf is 1
tripleHalfCaf.shots is 3
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-solutions/25_Constructors/Starter-1.scala | Scala | apache-2.0 | 341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.dstream
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.{JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat}
import org.apache.spark.{HashPartitioner, Partitioner, SerializableWritable}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.{Duration, Time}
import org.apache.spark.streaming.StreamingContext.rddToFileName
/**
* Extra functions available on DStream of (key, value) pairs through an implicit conversion.
*/
class PairDStreamFunctions[K, V](self: DStream[(K,V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K])
extends Serializable
{
private[streaming] def ssc = self.ssc
private[streaming] def defaultPartitioner(numPartitions: Int = self.ssc.sc.defaultParallelism) = {
new HashPartitioner(numPartitions)
}
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
*/
def groupByKey(): DStream[(K, Iterable[V])] = {
groupByKey(defaultPartitioner())
}
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
*/
def groupByKey(numPartitions: Int): DStream[(K, Iterable[V])] = {
groupByKey(defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying `groupByKey` on each RDD. The supplied
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
*/
def groupByKey(partitioner: Partitioner): DStream[(K, Iterable[V])] = {
val createCombiner = (v: V) => ArrayBuffer[V](v)
val mergeValue = (c: ArrayBuffer[V], v: V) => (c += v)
val mergeCombiner = (c1: ArrayBuffer[V], c2: ArrayBuffer[V]) => (c1 ++ c2)
combineByKey(createCombiner, mergeValue, mergeCombiner, partitioner)
.asInstanceOf[DStream[(K, Iterable[V])]]
}
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the associative reduce function. Hash partitioning is used to generate the RDDs
* with Spark's default number of partitions.
*/
def reduceByKey(reduceFunc: (V, V) => V): DStream[(K, V)] = {
reduceByKey(reduceFunc, defaultPartitioner())
}
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. Hash partitioning is used to generate the RDDs
* with `numPartitions` partitions.
*/
def reduceByKey(reduceFunc: (V, V) => V, numPartitions: Int): DStream[(K, V)] = {
reduceByKey(reduceFunc, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def reduceByKey(reduceFunc: (V, V) => V, partitioner: Partitioner): DStream[(K, V)] = {
val cleanedReduceFunc = ssc.sc.clean(reduceFunc)
combineByKey((v: V) => v, cleanedReduceFunc, cleanedReduceFunc, partitioner)
}
/**
* Combine elements of each key in DStream's RDDs using custom functions. This is similar to the
* combineByKey for RDDs. Please refer to combineByKey in
* org.apache.spark.rdd.PairRDDFunctions in the Spark core documentation for more information.
*/
def combineByKey[C: ClassTag](
createCombiner: V => C,
mergeValue: (C, V) => C,
mergeCombiner: (C, C) => C,
partitioner: Partitioner,
mapSideCombine: Boolean = true): DStream[(K, C)] = {
new ShuffledDStream[K, V, C](self, createCombiner, mergeValue, mergeCombiner, partitioner,
mapSideCombine)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. This is similar to
* `DStream.groupByKey()` but applies it over a sliding window. The new DStream generates RDDs
* with the same interval as this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration): DStream[(K, Iterable[V])] = {
groupByKeyAndWindow(windowDuration, self.slideDuration, defaultPartitioner())
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. Similar to
* `DStream.groupByKey()`, but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration)
: DStream[(K, Iterable[V])] =
{
groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner())
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream; if not specified
* then Spark's default number of partitions will be used
*/
def groupByKeyAndWindow(
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int
): DStream[(K, Iterable[V])] = {
groupByKeyAndWindow(windowDuration, slideDuration, defaultPartitioner(numPartitions))
}
/**
* Create a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner partitioner for controlling the partitioning of each RDD in the new
* DStream.
*/
def groupByKeyAndWindow(
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
): DStream[(K, Iterable[V])] = {
val createCombiner = (v: Iterable[V]) => new ArrayBuffer[V] ++= v
val mergeValue = (buf: ArrayBuffer[V], v: Iterable[V]) => buf ++= v
val mergeCombiner = (buf1: ArrayBuffer[V], buf2: ArrayBuffer[V]) => buf1 ++= buf2
self.groupByKey(partitioner)
.window(windowDuration, slideDuration)
.combineByKey[ArrayBuffer[V]](createCombiner, mergeValue, mergeCombiner, partitioner)
.asInstanceOf[DStream[(K, Iterable[V])]]
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window on `this` DStream.
* Similar to `DStream.reduceByKey()`, but applies it over a sliding window. The new DStream
* generates RDDs with the same interval as this DStream. Hash partitioning is used to generate
* the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
windowDuration: Duration
): DStream[(K, V)] = {
reduceByKeyAndWindow(reduceFunc, windowDuration, self.slideDuration, defaultPartitioner())
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
windowDuration: Duration,
slideDuration: Duration
): DStream[(K, V)] = {
reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, defaultPartitioner())
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int
): DStream[(K, V)] = {
reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration,
defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. Similar to
* `DStream.reduceByKey()`, but applies it over a sliding window.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner partitioner for controlling the partitioning of each RDD
* in the new DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
): DStream[(K, V)] = {
val cleanedReduceFunc = ssc.sc.clean(reduceFunc)
self.reduceByKey(cleanedReduceFunc, partitioner)
.window(windowDuration, slideDuration)
.reduceByKey(cleanedReduceFunc, partitioner)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduced value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
*
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
*
* This is more efficient than reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param filterFunc Optional function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
invReduceFunc: (V, V) => V,
windowDuration: Duration,
slideDuration: Duration = self.slideDuration,
numPartitions: Int = ssc.sc.defaultParallelism,
filterFunc: ((K, V)) => Boolean = null
): DStream[(K, V)] = {
reduceByKeyAndWindow(
reduceFunc, invReduceFunc, windowDuration,
slideDuration, defaultPartitioner(numPartitions), filterFunc
)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduced value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient than reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @param filterFunc Optional function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
*/
def reduceByKeyAndWindow(
reduceFunc: (V, V) => V,
invReduceFunc: (V, V) => V,
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner,
filterFunc: ((K, V)) => Boolean
): DStream[(K, V)] = {
val cleanedReduceFunc = ssc.sc.clean(reduceFunc)
val cleanedInvReduceFunc = ssc.sc.clean(invReduceFunc)
val cleanedFilterFunc = if (filterFunc != null) Some(ssc.sc.clean(filterFunc)) else None
new ReducedWindowedDStream[K, V](
self, cleanedReduceFunc, cleanedInvReduceFunc, cleanedFilterFunc,
windowDuration, slideDuration, partitioner
)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Seq[V], Option[S]) => Option[S]
): DStream[(K, S)] = {
updateStateByKey(updateFunc, defaultPartitioner())
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param numPartitions Number of partitions of each RDD in the new DStream.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Seq[V], Option[S]) => Option[S],
numPartitions: Int
): DStream[(K, S)] = {
updateStateByKey(updateFunc, defaultPartitioner(numPartitions))
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of the key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Seq[V], Option[S]) => Option[S],
partitioner: Partitioner
): DStream[(K, S)] = {
val newUpdateFunc = (iterator: Iterator[(K, Seq[V], Option[S])]) => {
iterator.flatMap(t => updateFunc(t._2, t._3).map(s => (t._1, s)))
}
updateStateByKey(newUpdateFunc, partitioner, true)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. Note, that this function may generate a different
* tuple with a different key than the input key. Therefore keys may be removed
* or added in this way. It is up to the developer to decide whether to
* remember the partitioner despite the key being changed.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream
* @param rememberPartitioner Whether to remember the paritioner object in the generated RDDs.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)],
partitioner: Partitioner,
rememberPartitioner: Boolean
): DStream[(K, S)] = {
new StateDStream(self, ssc.sc.clean(updateFunc), partitioner, rememberPartitioner, None)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of the key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @param initialRDD initial state value of each key.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Seq[V], Option[S]) => Option[S],
partitioner: Partitioner,
initialRDD: RDD[(K, S)]
): DStream[(K, S)] = {
val newUpdateFunc = (iterator: Iterator[(K, Seq[V], Option[S])]) => {
iterator.flatMap(t => updateFunc(t._2, t._3).map(s => (t._1, s)))
}
updateStateByKey(newUpdateFunc, partitioner, true, initialRDD)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. Note, that this function may generate a different
* tuple with a different key than the input key. Therefore keys may be removed
* or added in this way. It is up to the developer to decide whether to
* remember the partitioner despite the key being changed.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream
* @param rememberPartitioner Whether to remember the paritioner object in the generated RDDs.
* @param initialRDD initial state value of each key.
* @tparam S State type
*/
def updateStateByKey[S: ClassTag](
updateFunc: (Iterator[(K, Seq[V], Option[S])]) => Iterator[(K, S)],
partitioner: Partitioner,
rememberPartitioner: Boolean,
initialRDD: RDD[(K, S)]
): DStream[(K, S)] = {
new StateDStream(self, ssc.sc.clean(updateFunc), partitioner,
rememberPartitioner, Some(initialRDD))
}
/**
* Return a new DStream by applying a map function to the value of each key-value pairs in
* 'this' DStream without changing the key.
*/
def mapValues[U: ClassTag](mapValuesFunc: V => U): DStream[(K, U)] = {
new MapValuedDStream[K, V, U](self, mapValuesFunc)
}
/**
* Return a new DStream by applying a flatmap function to the value of each key-value pairs in
* 'this' DStream without changing the key.
*/
def flatMapValues[U: ClassTag](
flatMapValuesFunc: V => TraversableOnce[U]
): DStream[(K, U)] = {
new FlatMapValuedDStream[K, V, U](self, flatMapValuesFunc)
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with Spark's default number
* of partitions.
*/
def cogroup[W: ClassTag](other: DStream[(K, W)]): DStream[(K, (Iterable[V], Iterable[W]))] = {
cogroup(other, defaultPartitioner())
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
*/
def cogroup[W: ClassTag](other: DStream[(K, W)], numPartitions: Int)
: DStream[(K, (Iterable[V], Iterable[W]))] = {
cogroup(other, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* The supplied org.apache.spark.Partitioner is used to partition the generated RDDs.
*/
def cogroup[W: ClassTag](
other: DStream[(K, W)],
partitioner: Partitioner
): DStream[(K, (Iterable[V], Iterable[W]))] = {
self.transformWith(
other,
(rdd1: RDD[(K, V)], rdd2: RDD[(K, W)]) => rdd1.cogroup(rdd2, partitioner)
)
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
*/
def join[W: ClassTag](other: DStream[(K, W)]): DStream[(K, (V, W))] = {
join[W](other, defaultPartitioner())
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
*/
def join[W: ClassTag](other: DStream[(K, W)], numPartitions: Int): DStream[(K, (V, W))] = {
join[W](other, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* The supplied org.apache.spark.Partitioner is used to control the partitioning of each RDD.
*/
def join[W: ClassTag](
other: DStream[(K, W)],
partitioner: Partitioner
): DStream[(K, (V, W))] = {
self.transformWith(
other,
(rdd1: RDD[(K, V)], rdd2: RDD[(K, W)]) => rdd1.join(rdd2, partitioner)
)
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def leftOuterJoin[W: ClassTag](other: DStream[(K, W)]): DStream[(K, (V, Option[W]))] = {
leftOuterJoin[W](other, defaultPartitioner())
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def leftOuterJoin[W: ClassTag](
other: DStream[(K, W)],
numPartitions: Int
): DStream[(K, (V, Option[W]))] = {
leftOuterJoin[W](other, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def leftOuterJoin[W: ClassTag](
other: DStream[(K, W)],
partitioner: Partitioner
): DStream[(K, (V, Option[W]))] = {
self.transformWith(
other,
(rdd1: RDD[(K, V)], rdd2: RDD[(K, W)]) => rdd1.leftOuterJoin(rdd2, partitioner)
)
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def rightOuterJoin[W: ClassTag](other: DStream[(K, W)]): DStream[(K, (Option[V], W))] = {
rightOuterJoin[W](other, defaultPartitioner())
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def rightOuterJoin[W: ClassTag](
other: DStream[(K, W)],
numPartitions: Int
): DStream[(K, (Option[V], W))] = {
rightOuterJoin[W](other, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def rightOuterJoin[W: ClassTag](
other: DStream[(K, W)],
partitioner: Partitioner
): DStream[(K, (Option[V], W))] = {
self.transformWith(
other,
(rdd1: RDD[(K, V)], rdd2: RDD[(K, W)]) => rdd1.rightOuterJoin(rdd2, partitioner)
)
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def fullOuterJoin[W: ClassTag](other: DStream[(K, W)]): DStream[(K, (Option[V], Option[W]))] = {
fullOuterJoin[W](other, defaultPartitioner())
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def fullOuterJoin[W: ClassTag](
other: DStream[(K, W)],
numPartitions: Int
): DStream[(K, (Option[V], Option[W]))] = {
fullOuterJoin[W](other, defaultPartitioner(numPartitions))
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def fullOuterJoin[W: ClassTag](
other: DStream[(K, W)],
partitioner: Partitioner
): DStream[(K, (Option[V], Option[W]))] = {
self.transformWith(
other,
(rdd1: RDD[(K, V)], rdd2: RDD[(K, W)]) => rdd1.fullOuterJoin(rdd2, partitioner)
)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval
* is generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix"
*/
def saveAsHadoopFiles[F <: OutputFormat[K, V]](
prefix: String,
suffix: String
)(implicit fm: ClassTag[F]) {
saveAsHadoopFiles(prefix, suffix, keyClass, valueClass,
fm.runtimeClass.asInstanceOf[Class[F]])
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval
* is generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix"
*/
def saveAsHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: OutputFormat[_, _]],
conf: JobConf = new JobConf(ssc.sparkContext.hadoopConfiguration)
) {
// Wrap conf in SerializableWritable so that ForeachDStream can be serialized for checkpoints
val serializableConf = new SerializableWritable(conf)
val saveFunc = (rdd: RDD[(K, V)], time: Time) => {
val file = rddToFileName(prefix, suffix, time)
rdd.saveAsHadoopFile(file, keyClass, valueClass, outputFormatClass, serializableConf.value)
}
self.foreachRDD(saveFunc)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]](
prefix: String,
suffix: String
)(implicit fm: ClassTag[F]) {
saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass,
fm.runtimeClass.asInstanceOf[Class[F]])
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: NewOutputFormat[_, _]],
conf: Configuration = ssc.sparkContext.hadoopConfiguration
) {
// Wrap conf in SerializableWritable so that ForeachDStream can be serialized for checkpoints
val serializableConf = new SerializableWritable(conf)
val saveFunc = (rdd: RDD[(K, V)], time: Time) => {
val file = rddToFileName(prefix, suffix, time)
rdd.saveAsNewAPIHadoopFile(
file, keyClass, valueClass, outputFormatClass, serializableConf.value)
}
self.foreachRDD(saveFunc)
}
private def keyClass: Class[_] = kt.runtimeClass
private def valueClass: Class[_] = vt.runtimeClass
}
| trueyao/spark-lever | streaming/src/main/scala/org/apache/spark/streaming/dstream/PairDStreamFunctions.scala | Scala | apache-2.0 | 31,252 |
package quotes
import models._
object QuoteBuilderSingleton extends QuoteBuilder
class QuoteBuilder {
def buildQuote(quoteProfile: QuoteProfile, distanceResult: DistanceResult, fuelSurchargeProfile: FuelSurchargeProfile): Quote = {
val fares = quoteProfile.fareProfiles.map(x => {
val calculatedFare: BigDecimal = (x.baseFare + (x.perMile * distanceResult.miles)).setScale(2, BigDecimal.RoundingMode.HALF_UP)
val fare:BigDecimal = calculatedFare.max(x.minimum).setScale(2, BigDecimal.RoundingMode.HALF_UP)
val fuelSurcharge: BigDecimal = (calculatedFare * fuelSurchargeProfile.percentage).setScale(2, BigDecimal.RoundingMode.HALF_UP)
val serviceCharge: BigDecimal = (fare * 0.2).setScale(2, BigDecimal.RoundingMode.HALF_UP)
val total: BigDecimal = (fare + serviceCharge + fuelSurcharge).setScale(2, BigDecimal.RoundingMode.HALF_UP)
val fareLineItems: Seq[FareLineItem] =
Seq(
FareLineItem("fare", "Base Fare", fare),
FareLineItem("fuelCharge", "Fuel Surcharge", fuelSurcharge),
FareLineItem("serviceCharge", "Service Charge", serviceCharge),
FareLineItem("total", "Total", total)
)
new Fare(
fareProfile = x.name,
totalFare = total,
fareDetail = fareDetailFrom(fareLineItems),
fareLineItems = fareLineItems
)
}
)
Quote(
vehicleName = quoteProfile.name,
vehicleDescription = quoteProfile.description,
vehCode = quoteProfile.vehicleCode,
maxPassengers = quoteProfile.maxPassengers,
fares = fares
)
}
def fareDetailFrom(fareLineItems: Seq[FareLineItem]): String = {
val lines = for {
f <- fareLineItems
line = s"${f.lineItemDescription}: $$${"%1.2f".format(f.lineItemCost)}"
} yield line
lines.foldLeft("") { (a, l) =>
if (a == "") {
a + l;
} else {
a + "\\n" + l
}
}
}
}
| greghxc/shakespeare | src/main/scala/quotes/QuoteBuilder.scala | Scala | mit | 1,970 |
/*
* Copyright (c) 2013 Aviat Networks.
* This file is part of DocReg+Web. Please refer to the NOTICE.txt file for license details.
*/
package vvv.docreg.backend
import net.liftweb.common.Loggable
import vvv.docreg.model._
import vvv.docreg.agent.SubscriberInfo
import org.squeryl.PrimitiveTypeMode._
trait SubscriptionReconcile extends Loggable {
val userLookup: UserLookupProvider
def reconcileSubscriptions(document: Document, subscriptions: List[SubscriberInfo]) {
val subscribers: List[(Long,String)] = for {
s <- subscriptions
u <- userLookup.lookup(Some(s.userName), Some(s.email), None, "subscription on " + document + " for " + s)
} yield (u.id -> s.options)
inTransaction {
var userSubscriptions = Subscription.forDocument(document).map(s => s.userId -> s).toMap
// converting to a map makes the user distinct, and takes the last user option as the valid option.
subscribers.toMap.foreach { i =>
val uid = i._1
val options = i._2.toLowerCase
val notification = options contains "always"
val bookmark = options contains "bookmark"
userSubscriptions.get(uid) match {
case Some(s) if (s.notification != notification || s.bookmark != bookmark) => {
s.notification = notification
s.bookmark = bookmark
Subscription.dbTable.update(s)
}
case None => {
val s = new Subscription
s.documentId = document.id
s.userId = uid
s.notification = notification
s.bookmark = bookmark
Subscription.dbTable.insert(s)
}
case _ => {} // No change
}
userSubscriptions -= uid
}
if (userSubscriptions.size > 0) {
Subscription.dbTable.deleteWhere(s => (s.documentId === document.id) and (s.userId in userSubscriptions.keySet))
}
}
}
}
| scott-abernethy/docreg-web | src/main/scala/vvv/docreg/backend/SubscriptionReconcile.scala | Scala | gpl-3.0 | 1,922 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.